├── .gitignore ├── core ├── core_test.go ├── chunk_subscriber.go └── core.go ├── LICENSE ├── README.md ├── host ├── routed │ └── routed_host.go └── basic │ ├── natmgr.go │ └── basic_host.go └── cmd ├── kad └── kad.go └── livepeerd └── livepeerd.go /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.dll 4 | *.so 5 | *.dylib 6 | 7 | # Test binary, build with `go test -c` 8 | *.test 9 | 10 | # Output of the go coverage tool, specifically when used with LiteIDE 11 | *.out 12 | 13 | # Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 14 | .glide/ 15 | -------------------------------------------------------------------------------- /core/core_test.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "testing" 5 | 6 | crypto "github.com/libp2p/go-libp2p-crypto" 7 | peer "github.com/libp2p/go-libp2p-peer" 8 | ) 9 | 10 | func TestStreamIDs(t *testing.T) { 11 | sid := RandomStreamID() 12 | _, pub, _ := crypto.GenerateKeyPair(crypto.RSA, 2048) 13 | pid, _ := peer.IDFromPublicKey(pub) 14 | 15 | strmID := MakeStreamID(pid, sid) 16 | 17 | newPid, newSid := strmID.SplitComponents() 18 | 19 | if newPid != pid || newSid != sid { 20 | t.Errorf("streamID: %v", strmID) 21 | t.Errorf("pid: %v, newPID: %v", pid, newPid) 22 | t.Errorf("sid: %v, newSid: %v", sid, newSid) 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /core/chunk_subscriber.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "strings" 5 | 6 | "github.com/livepeer/lpms/stream" 7 | ) 8 | 9 | type ChunkSubscriber struct { 10 | Buffer *stream.HLSBuffer 11 | Node *LivepeerNode 12 | } 13 | 14 | func NewChunkSubscriber(buf *stream.HLSBuffer, n *LivepeerNode) *ChunkSubscriber { 15 | return &ChunkSubscriber{Buffer: buf, Node: n} 16 | } 17 | 18 | func (cs *ChunkSubscriber) WriteSegment(seqNo uint64, name string, duration float64, s []byte) error { 19 | //Send out Have Messages 20 | strmID := strings.Split(name, "_")[0] 21 | // cs.Node.SendHaveChunk(name, strmID, peer.IDHexEncode(cs.Node.Identity)) 22 | cs.Node.SendHaveChunk(name, strmID) 23 | 24 | return cs.Buffer.WriteSegment(seqNo, name, duration, s) 25 | } 26 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Eric Tang 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Livepeer Libp2p Spike 2 | 3 | [Livepeer](https://livepeer.org) is a decentralized live streaming broadcast platform. This 4 | repo is a proof-of-concept spike built on top of [libp2p](https://github.com/libp2p) by [Protocol Labs](https://protocol.ai/). 5 | 6 | This code allows you to stream a RTMP stream to many nodes on a peer-to-peer network. (The RTMP stream will automatically be converted to a HLS stream so it can be viewed over http). 7 | 8 | ## Installation 9 | Run `go get github.com/ericxtang/livepeer-libp2p-spike/cmd/livepeerd` 10 | 11 | ## Running the code 12 | 13 | #### Start Boot Node 14 | 15 | To start a boot node, create a data dir, and start the node by running: 16 | 17 | `livepeerd -datadir={datadir} -p={tcpport} -http={httpport} -rtmp={rtmpport} -logtostderr=true`. 18 | 19 | For example - `livepeerd -datadir=data1 -p=10000 -http=8080 -rtmp=1935 -logtostderr=true` 20 | 21 | #### Start Peer 22 | 23 | To start a peer, run 24 | 25 | `livepeerd -datadir={datadir} -p={tcpport} -http={httpport} -rtmp={rtmpport} -seedID={seedID} -seedAddr={seedAddr} -logtostderr=true`. 26 | 27 | The `seedID` and `seedAddr` can be found in the boot node console. For example: 28 | 29 | `livepeerd -datadir=data2 -p=10001 -http=8081 -rtmp=1936 -seedID=QmURyyVgQBd59rtLfNrdryZ6FAhYZvCUJTpePmXmbE4ghR -seedAddr=/ip4/127.0.0.1/tcp/10000 -logtostderr=true` 30 | 31 | #### Create Video Stream 32 | 33 | Now, start a rtmp stream by running: 34 | 35 | `ffmpeg -f avfoundation -framerate 30 -pixel_format uyvy422 -i "0:0" -vcodec libx264 -tune zerolatency -b 900k -x264-params keyint=60:min-keyint=60 -acodec aac -ac 1 -b:a 96k -f flv rtmp://localhost:1935/` 36 | 37 | This creates a RTMP stream and sends it to the boot node using [ffmpeg](http://ffmpeg.org/). You should be able to see a RTMP stream and a HLS stream created - something like: 38 | 39 | ``` 40 | I0606 17:46:25.650033 44931 listener.go:35] RTMP server got upstream: rtmp://localhost:1935/ 41 | I0606 17:46:25.650203 44931 livepeerd.go:369] Streaming hls stream: 12205a83cf18c934570c1308b88860a2631fbfab4246b5dea6d0f6f03eac1561232c4d118318be39b85585bf5318943cd91b 42 | I0606 17:46:25.650220 44931 listener.go:49] Got RTMP Stream: 12205a83cf18c934570c1308b88860a2631fbfab4246b5dea6d0f6f03eac1561232c35146d9a3ec1c8ace2162ab68cb599e6 43 | I0606 17:46:25.650227 44931 listener.go:53] Writing RTMP to stream 44 | I0606 17:46:25.653717 44931 video_segmenter.go:91] Ffmpeg path: 45 | I0606 17:46:25.689362 44931 player.go:34] LPMS got RTMP request @ rtmp://localhost:1935/stream/12205a83cf18c934570c1308b88860a2631fbfab4246b5dea6d0f6f03eac1561232c35146d9a3ec1c8ace2162ab68cb599e6 46 | I0606 17:46:25.689454 44931 livepeerd.go:383] Got req: %!(EXTRA string=/stream/12205a83cf18c934570c1308b88860a2631fbfab4246b5dea6d0f6f03eac1561232c35146d9a3ec1c8ace2162ab68cb599e6) 47 | ``` 48 | 49 | #### Subscribe to Stream 50 | 51 | Now you can subscribe to the stream with the new node. To do that, copy the HLS streamID from the output, and run 52 | 53 | `curl http://localhost:{peerHttpPort}/subscribe?streamID={hlsStreamID}` 54 | 55 | Make sure the peerHttpPort is the http port you used to start the second node. If the subscribe request is successful, you should get a `ok` as response. 56 | 57 | #### View Stream 58 | 59 | Now you can play the hls stream using ffplay - 60 | 61 | `ffplay http://localhost:{peerHttpPort}/stream/{hlsStreamID}` 62 | 63 | 64 | 65 | -------------------------------------------------------------------------------- /host/routed/routed_host.go: -------------------------------------------------------------------------------- 1 | package routedhost 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "time" 7 | 8 | // host "gx/ipfs/QmcyNeWPsoFGxThGpV8JnJdfUNankKhWCTrbrcFRQda4xR/go-libp2p-host" 9 | 10 | // ma "gx/ipfs/QmcyqRMCAXVtYPS4DiBrA7sezL9rRGfW8Ctx7cywL4TXJj/go-multiaddr" 11 | 12 | // pstore "gx/ipfs/QmNUVzEjq3XWJ89hegahPvyfJbTXgTaom48pLb7YBD9gHQ/go-libp2p-peerstore" 13 | 14 | // peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" 15 | 16 | // inet "gx/ipfs/QmVtMT3fD7DzQNW7hdm6Xe6KPstzcggrhNpeVZ4422UpKK/go-libp2p-net" 17 | 18 | // msmux "gx/ipfs/QmTnsezaB1wWNRHeHnYrm8K4d5i9wtyj3GsqjC3Rt5b5v5/go-multistream" 19 | 20 | logging "github.com/ipfs/go-log" 21 | host "github.com/libp2p/go-libp2p-host" 22 | lgbl "github.com/libp2p/go-libp2p-loggables" 23 | inet "github.com/libp2p/go-libp2p-net" 24 | peer "github.com/libp2p/go-libp2p-peer" 25 | pstore "github.com/libp2p/go-libp2p-peerstore" 26 | protocol "github.com/libp2p/go-libp2p-protocol" 27 | ma "github.com/multiformats/go-multiaddr" 28 | msmux "github.com/multiformats/go-multistream" 29 | ) 30 | 31 | var log = logging.Logger("routedhost") 32 | 33 | // AddressTTL is the expiry time for our addresses. 34 | // We expire them quickly. 35 | const AddressTTL = time.Second * 10 36 | 37 | // RoutedHost is a p2p Host that includes a routing system. 38 | // This allows the Host to find the addresses for peers when 39 | // it does not have them. 40 | type RoutedHost struct { 41 | host host.Host // embedded other host. 42 | route Routing 43 | } 44 | 45 | type Routing interface { 46 | FindPeer(context.Context, peer.ID) (pstore.PeerInfo, error) 47 | } 48 | 49 | func Wrap(h host.Host, r Routing) *RoutedHost { 50 | return &RoutedHost{h, r} 51 | } 52 | 53 | // Connect ensures there is a connection between this host and the peer with 54 | // given peer.ID. See (host.Host).Connect for more information. 55 | // 56 | // RoutedHost's Connect differs in that if the host has no addresses for a 57 | // given peer, it will use its routing system to try to find some. 58 | func (rh *RoutedHost) Connect(ctx context.Context, pi pstore.PeerInfo) error { 59 | // first, check if we're already connected. 60 | if len(rh.Network().ConnsToPeer(pi.ID)) > 0 { 61 | return nil 62 | } 63 | 64 | // if we were given some addresses, keep + use them. 65 | if len(pi.Addrs) > 0 { 66 | rh.Peerstore().AddAddrs(pi.ID, pi.Addrs, pstore.TempAddrTTL) 67 | } 68 | 69 | // Check if we have some addresses in our recent memory. 70 | addrs := rh.Peerstore().Addrs(pi.ID) 71 | if len(addrs) < 1 { 72 | 73 | // no addrs? find some with the routing system. 74 | pi2, err := rh.route.FindPeer(ctx, pi.ID) 75 | if err != nil { 76 | return err // couldnt find any :( 77 | } 78 | if pi2.ID != pi.ID { 79 | err = fmt.Errorf("routing failure: provided addrs for different peer") 80 | logRoutingErrDifferentPeers(ctx, pi.ID, pi2.ID, err) 81 | return err 82 | } 83 | addrs = pi2.Addrs 84 | } 85 | 86 | // if we're here, we got some addrs. let's use our wrapped host to connect. 87 | pi.Addrs = addrs 88 | return rh.host.Connect(ctx, pi) 89 | } 90 | 91 | func logRoutingErrDifferentPeers(ctx context.Context, wanted, got peer.ID, err error) { 92 | lm := make(lgbl.DeferredMap) 93 | lm["error"] = err 94 | lm["wantedPeer"] = func() interface{} { return wanted.Pretty() } 95 | lm["gotPeer"] = func() interface{} { return got.Pretty() } 96 | log.Event(ctx, "routingError", lm) 97 | } 98 | 99 | func (rh *RoutedHost) ID() peer.ID { 100 | return rh.host.ID() 101 | } 102 | 103 | func (rh *RoutedHost) Peerstore() pstore.Peerstore { 104 | return rh.host.Peerstore() 105 | } 106 | 107 | func (rh *RoutedHost) Addrs() []ma.Multiaddr { 108 | return rh.host.Addrs() 109 | } 110 | 111 | func (rh *RoutedHost) Network() inet.Network { 112 | return rh.host.Network() 113 | } 114 | 115 | func (rh *RoutedHost) Mux() *msmux.MultistreamMuxer { 116 | return rh.host.Mux() 117 | } 118 | 119 | func (rh *RoutedHost) SetStreamHandler(pid protocol.ID, handler inet.StreamHandler) { 120 | rh.host.SetStreamHandler(pid, handler) 121 | } 122 | 123 | func (rh *RoutedHost) SetStreamHandlerMatch(pid protocol.ID, m func(string) bool, handler inet.StreamHandler) { 124 | rh.host.SetStreamHandlerMatch(pid, m, handler) 125 | } 126 | 127 | func (rh *RoutedHost) RemoveStreamHandler(pid protocol.ID) { 128 | rh.host.RemoveStreamHandler(pid) 129 | } 130 | 131 | func (rh *RoutedHost) NewStream(ctx context.Context, p peer.ID, pids ...protocol.ID) (inet.Stream, error) { 132 | return rh.host.NewStream(ctx, p, pids...) 133 | } 134 | func (rh *RoutedHost) Close() error { 135 | // no need to close IpfsRouting. we dont own it. 136 | return rh.host.Close() 137 | } 138 | 139 | var _ (host.Host) = (*RoutedHost)(nil) 140 | -------------------------------------------------------------------------------- /cmd/kad/kad.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "flag" 7 | "io/ioutil" 8 | "path" 9 | "time" 10 | 11 | "github.com/ericxtang/livepeer-libp2p-spike/core" 12 | "github.com/golang/glog" 13 | crypto "github.com/libp2p/go-libp2p-crypto" 14 | net "github.com/libp2p/go-libp2p-net" 15 | peer "github.com/libp2p/go-libp2p-peer" 16 | ps "github.com/libp2p/go-libp2p-peerstore" 17 | protocol "github.com/libp2p/go-libp2p-protocol" 18 | ma "github.com/multiformats/go-multiaddr" 19 | ) 20 | 21 | var LivepeerProtocol = protocol.ID("/livepeer") 22 | var Peers []*core.LivepeerNode 23 | 24 | type KeyFile struct { 25 | Pub string 26 | Priv string 27 | } 28 | 29 | func getKeys(datadir string) (crypto.PrivKey, crypto.PubKey) { 30 | f, e := ioutil.ReadFile(path.Join(datadir, "keys.json")) 31 | var keyf KeyFile 32 | json.Unmarshal(f, &keyf) 33 | 34 | if e != nil || keyf.Priv == "" || keyf.Pub == "" { 35 | glog.Errorf("Cannot file keys in data dir, creating new keys") 36 | priv, pub, _ := crypto.GenerateKeyPair(crypto.RSA, 2048) 37 | 38 | privb, _ := priv.Bytes() 39 | pubb, _ := pub.Bytes() 40 | 41 | kf := KeyFile{Priv: crypto.ConfigEncodeKey(privb), Pub: crypto.ConfigEncodeKey(pubb)} 42 | kfb, _ := json.Marshal(kf) 43 | ioutil.WriteFile(path.Join(datadir, "keys.json"), kfb, 0644) 44 | 45 | return priv, pub 46 | } 47 | 48 | privb, _ := crypto.ConfigDecodeKey(keyf.Priv) 49 | pubb, _ := crypto.ConfigDecodeKey(keyf.Pub) 50 | priv, _ := crypto.UnmarshalPrivateKey(privb) 51 | pub, _ := crypto.UnmarshalPublicKey(pubb) 52 | return priv, pub 53 | } 54 | 55 | func handleProtocol(n *core.LivepeerNode, ws *core.WrappedStream) { 56 | var msg core.Message 57 | 58 | err := ws.Dec.Decode(&msg) 59 | if err != nil { 60 | glog.Errorf("Got error decoding msg: %v", err) 61 | return 62 | } 63 | //Protocol: 64 | // - Join Swarm (and form kademlia network) 65 | // - Get Peer Info??? 66 | // - Have(strmID, chunkID) 67 | // - Request Video Chunk(strmID, chunkID) (ChunkDATA) 68 | // - Transcode(strmID, config) (newStrmID) 69 | switch msg.Msg { 70 | case core.JoinMsgID: 71 | handleJoinSwarm(n, msg) 72 | 73 | case core.JoinAckID: 74 | glog.Infof("Recieved Ack") 75 | 76 | default: 77 | } 78 | 79 | } 80 | 81 | func handleJoinSwarm(n *core.LivepeerNode, msg core.Message) { 82 | //Join the swarm 83 | joinMsg, ok := msg.Data.(map[string]interface{}) 84 | if !ok { 85 | glog.Errorf("Error converting JoinMsg: %v", msg.Data) 86 | } 87 | pid, _ := peer.IDB58Decode(joinMsg["ID"].(string)) 88 | addr, _ := ma.NewMultiaddr(joinMsg["Addr"].(string)) 89 | n.Peerstore.AddAddr(pid, addr, ps.PermanentAddrTTL) 90 | n.PeerHost.Connect(context.Background(), ps.PeerInfo{ID: pid}) 91 | 92 | //Print current peers 93 | glog.Infof("%v Got join request from %v", n.Identity.Pretty(), pid.Pretty()) 94 | glog.Infof("Current peers:\n") 95 | for _, p := range n.Peerstore.Peers() { 96 | glog.Infof("%v :: %v", p.Pretty(), n.Peerstore.Addrs(p)) 97 | } 98 | glog.Infof("\n\n") 99 | 100 | //Send Ack 101 | n.SendJoinAck(pid) 102 | } 103 | 104 | func CreateNode(datadir string, port int, seedPeerID string, seedAddr string) { 105 | var priv crypto.PrivKey 106 | var pub crypto.PubKey 107 | if datadir == "" { 108 | priv, pub, _ = crypto.GenerateKeyPair(crypto.RSA, 2048) 109 | } else { 110 | priv, pub = getKeys(datadir) 111 | } 112 | 113 | n, err := core.NewNode(port, priv, pub) 114 | if err != nil { 115 | glog.Errorf("Cannot create node: %v", err) 116 | return 117 | } 118 | 119 | Peers = append(Peers, n) 120 | n.PeerHost.SetStreamHandler(LivepeerProtocol, func(s net.Stream) { 121 | wrappedStream := core.WrapStream(s) 122 | defer s.Close() 123 | handleProtocol(n, wrappedStream) 124 | }) 125 | 126 | // glog.Infof("Node ID: %v", n.PeerHost.ID()) 127 | // glog.Infof("Identity: %s", n.Identity.Pretty()) 128 | // glog.Infof("peer.ID: %s", peer.ID("0x1442ef0")) 129 | // glog.Infof("Addrs: %v", n.Peerstore.Addrs()) 130 | 131 | if seedPeerID != "" && seedAddr != "" { 132 | seedID, _ := peer.IDB58Decode(seedPeerID) 133 | addr, err := ma.NewMultiaddr(seedAddr) 134 | if err != nil { 135 | glog.Fatalf("Cannot join swarm: %v", err) 136 | } 137 | n.Peerstore.AddAddr(seedID, addr, ps.PermanentAddrTTL) 138 | n.PeerHost.Connect(context.Background(), ps.PeerInfo{ID: seedID}) 139 | 140 | n.SendJoin(seedID) 141 | } 142 | glog.Infof("%v listening for connections on %v", n.Identity.Pretty(), n.PeerHost.Addrs()) 143 | select {} // hang forever 144 | } 145 | 146 | func ConnectNode(n *core.LivepeerNode, seedPeerID, seedAddr string) { 147 | //Try and connect to the seed node 148 | } 149 | 150 | func main() { 151 | // glog.Infof("Starting node...") 152 | port := flag.Int("p", 0, "port") 153 | flag.Parse() 154 | 155 | if *port == 0 { 156 | glog.Fatalf("Please provide port") 157 | } 158 | 159 | //Initialize the Peer slice 160 | Peers = make([]*core.LivepeerNode, 0, 100) 161 | 162 | //Create Seed Node 163 | go CreateNode("", *port, "", "") 164 | 165 | time.Sleep(1 * time.Second) 166 | //Layer 1 peers, connect to the seed node 167 | newSeedPid := Peers[0].Identity.Pretty() 168 | newSeedAddr := Peers[0].PeerHost.Addrs()[0].String() 169 | glog.Infof("\n\n\n\n\nCreating 10 peers for seed: %v @ %v", newSeedPid, newSeedAddr) 170 | time.Sleep(1 * time.Second) 171 | for i := 0; i < 10; i++ { 172 | go CreateNode("", 10001+i, newSeedPid, newSeedAddr) 173 | time.Sleep(500 * time.Millisecond) 174 | } 175 | 176 | //Layer 2 peers, connect to the new seed node (the last node created in the previous loop) 177 | time.Sleep(5 * time.Second) 178 | newSeedPid = peer.IDB58Encode(Peers[5].Identity) 179 | newSeedAddr = Peers[5].PeerHost.Addrs()[0].String() 180 | glog.Infof("\n\n\n\n\nCreating 10 peers for seed: %v @ %v", newSeedPid, newSeedAddr) 181 | time.Sleep(1 * time.Second) 182 | for i := 0; i < 10; i++ { 183 | // glog.Infof("pid: %v, addr: %v", pid, addr.String()) 184 | go CreateNode("", 10011+i, newSeedPid, newSeedAddr) 185 | time.Sleep(500 * time.Millisecond) 186 | } 187 | 188 | //Search for a peer 189 | time.Sleep(5 * time.Second) 190 | glog.Info("\n\n\n\n\nDo a peer search (between nodes NOT directly connected with each other)") 191 | time.Sleep(1 * time.Second) 192 | info, err := Peers[15].Routing.FindPeer(context.Background(), Peers[1].Identity) 193 | if err != nil { 194 | glog.Errorf("Error finding peer: %v", err) 195 | } 196 | glog.Infof("%v is looking for %v, found %v(%v)\n\nSuccess!!!", Peers[15].Identity.Pretty(), Peers[1].Identity.Pretty(), info.ID.Pretty(), info) 197 | 198 | select {} // hang forever 199 | } 200 | -------------------------------------------------------------------------------- /host/basic/natmgr.go: -------------------------------------------------------------------------------- 1 | package basichost 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | 7 | inat "github.com/libp2p/go-libp2p-nat" 8 | 9 | goprocess "github.com/jbenet/goprocess" 10 | lgbl "github.com/libp2p/go-libp2p-loggables" 11 | inet "github.com/libp2p/go-libp2p-net" 12 | ma "github.com/multiformats/go-multiaddr" 13 | ) 14 | 15 | // natManager takes care of adding + removing port mappings to the nat. 16 | // Initialized with the host if it has a NATPortMap option enabled. 17 | // natManager receives signals from the network, and check on nat mappings: 18 | // * natManager listens to the network and adds or closes port mappings 19 | // as the network signals Listen() or ListenClose(). 20 | // * closing the natManager closes the nat and its mappings. 21 | type natManager struct { 22 | host *BasicHost 23 | natmu sync.RWMutex // guards nat (ready could obviate this mutex, but safety first.) 24 | nat *inat.NAT 25 | 26 | ready chan struct{} // closed once the nat is ready to process port mappings 27 | proc goprocess.Process // natManager has a process + children. can be closed. 28 | } 29 | 30 | func newNatManager(host *BasicHost) *natManager { 31 | nmgr := &natManager{ 32 | host: host, 33 | ready: make(chan struct{}), 34 | proc: goprocess.WithParent(host.proc), 35 | } 36 | 37 | // teardown 38 | nmgr.proc = goprocess.WithTeardown(func() error { 39 | // on closing, unregister from network notifications. 40 | host.Network().StopNotify((*nmgrNetNotifiee)(nmgr)) 41 | return nil 42 | }) 43 | 44 | // host is our parent. close when host closes. 45 | host.proc.AddChild(nmgr.proc) 46 | 47 | // discover the nat. 48 | nmgr.discoverNAT() 49 | return nmgr 50 | } 51 | 52 | // Close closes the natManager, closing the underlying nat 53 | // and unregistering from network events. 54 | func (nmgr *natManager) Close() error { 55 | return nmgr.proc.Close() 56 | } 57 | 58 | // Ready returns a channel which will be closed when the NAT has been found 59 | // and is ready to be used, or the search process is done. 60 | func (nmgr *natManager) Ready() <-chan struct{} { 61 | return nmgr.ready 62 | } 63 | 64 | func (nmgr *natManager) discoverNAT() { 65 | 66 | nmgr.proc.Go(func(worker goprocess.Process) { 67 | // inat.DiscoverNAT blocks until the nat is found or a timeout 68 | // is reached. we unfortunately cannot specify timeouts-- the 69 | // library we're using just blocks. 70 | // 71 | // Note: on early shutdown, there may be a case where we're trying 72 | // to close before DiscoverNAT() returns. Since we cant cancel it 73 | // (library) we can choose to (1) drop the result and return early, 74 | // or (2) wait until it times out to exit. For now we choose (2), 75 | // to avoid leaking resources in a non-obvious way. the only case 76 | // this affects is when the daemon is being started up and _immediately_ 77 | // asked to close. other services are also starting up, so ok to wait. 78 | discoverdone := make(chan struct{}) 79 | var nat *inat.NAT 80 | go func() { 81 | defer close(discoverdone) 82 | nat = inat.DiscoverNAT() 83 | }() 84 | 85 | // by this point -- after finding the NAT -- we may have already 86 | // be closing. if so, just exit. 87 | select { 88 | case <-worker.Closing(): 89 | return 90 | case <-discoverdone: 91 | if nat == nil { // no nat, or failed to get it. 92 | return 93 | } 94 | } 95 | 96 | // wire up the nat to close when nmgr closes. 97 | // nmgr.proc is our parent, and waiting for us. 98 | nmgr.proc.AddChild(nat.Process()) 99 | 100 | // set the nat. 101 | nmgr.natmu.Lock() 102 | nmgr.nat = nat 103 | nmgr.natmu.Unlock() 104 | 105 | // signal that we're ready to process nat mappings: 106 | close(nmgr.ready) 107 | 108 | // sign natManager up for network notifications 109 | // we need to sign up here to avoid missing some notifs 110 | // before the NAT has been found. 111 | nmgr.host.Network().Notify((*nmgrNetNotifiee)(nmgr)) 112 | 113 | // if any interfaces were brought up while we were setting up 114 | // the nat, now is the time to setup port mappings for them. 115 | // we release ready, then grab them to avoid losing any. adding 116 | // a port mapping is idempotent, so its ok to add the same twice. 117 | addrs := nmgr.host.Network().ListenAddresses() 118 | for _, addr := range addrs { 119 | // we do it async because it's slow and we may want to close beforehand 120 | go addPortMapping(nmgr, addr) 121 | } 122 | }) 123 | } 124 | 125 | // NAT returns the natManager's nat object. this may be nil, if 126 | // (a) the search process is still ongoing, or (b) the search process 127 | // found no nat. Clients must check whether the return value is nil. 128 | func (nmgr *natManager) NAT() *inat.NAT { 129 | nmgr.natmu.Lock() 130 | defer nmgr.natmu.Unlock() 131 | return nmgr.nat 132 | } 133 | 134 | func addPortMapping(nmgr *natManager, intaddr ma.Multiaddr) { 135 | nat := nmgr.NAT() 136 | if nat == nil { 137 | panic("natManager addPortMapping called without a nat.") 138 | } 139 | 140 | // first, check if the port mapping already exists. 141 | for _, mapping := range nat.Mappings() { 142 | if mapping.InternalAddr().Equal(intaddr) { 143 | return // it exists! return. 144 | } 145 | } 146 | 147 | ctx := context.TODO() 148 | lm := make(lgbl.DeferredMap) 149 | lm["internalAddr"] = func() interface{} { return intaddr.String() } 150 | 151 | defer log.EventBegin(ctx, "natMgrAddPortMappingWait", lm).Done() 152 | 153 | select { 154 | case <-nmgr.proc.Closing(): 155 | lm["outcome"] = "cancelled" 156 | return // no use. 157 | case <-nmgr.ready: // wait until it's ready. 158 | } 159 | 160 | // actually start the port map (sub-event because waiting may take a while) 161 | defer log.EventBegin(ctx, "natMgrAddPortMapping", lm).Done() 162 | 163 | // get the nat 164 | m, err := nat.NewMapping(intaddr) 165 | if err != nil { 166 | lm["outcome"] = "failure" 167 | lm["error"] = err 168 | return 169 | } 170 | 171 | extaddr, err := m.ExternalAddr() 172 | if err != nil { 173 | lm["outcome"] = "failure" 174 | lm["error"] = err 175 | return 176 | } 177 | 178 | lm["outcome"] = "success" 179 | lm["externalAddr"] = func() interface{} { return extaddr.String() } 180 | log.Infof("established nat port mapping: %s <--> %s", intaddr, extaddr) 181 | } 182 | 183 | func rmPortMapping(nmgr *natManager, intaddr ma.Multiaddr) { 184 | nat := nmgr.NAT() 185 | if nat == nil { 186 | panic("natManager rmPortMapping called without a nat.") 187 | } 188 | 189 | // list the port mappings (it may be gone on it's own, so we need to 190 | // check this list, and not store it ourselves behind the scenes) 191 | 192 | // close mappings for this internal address. 193 | for _, mapping := range nat.Mappings() { 194 | if mapping.InternalAddr().Equal(intaddr) { 195 | mapping.Close() 196 | } 197 | } 198 | } 199 | 200 | // nmgrNetNotifiee implements the network notification listening part 201 | // of the natManager. this is merely listening to Listen() and ListenClose() 202 | // events. 203 | type nmgrNetNotifiee natManager 204 | 205 | func (nn *nmgrNetNotifiee) natManager() *natManager { 206 | return (*natManager)(nn) 207 | } 208 | 209 | func (nn *nmgrNetNotifiee) Listen(n inet.Network, addr ma.Multiaddr) { 210 | if nn.natManager().NAT() == nil { 211 | return // not ready or doesnt exist. 212 | } 213 | 214 | addPortMapping(nn.natManager(), addr) 215 | } 216 | 217 | func (nn *nmgrNetNotifiee) ListenClose(n inet.Network, addr ma.Multiaddr) { 218 | if nn.natManager().NAT() == nil { 219 | return // not ready or doesnt exist. 220 | } 221 | 222 | rmPortMapping(nn.natManager(), addr) 223 | } 224 | 225 | func (nn *nmgrNetNotifiee) Connected(inet.Network, inet.Conn) {} 226 | func (nn *nmgrNetNotifiee) Disconnected(inet.Network, inet.Conn) {} 227 | func (nn *nmgrNetNotifiee) OpenedStream(inet.Network, inet.Stream) {} 228 | func (nn *nmgrNetNotifiee) ClosedStream(inet.Network, inet.Stream) {} 229 | -------------------------------------------------------------------------------- /host/basic/basic_host.go: -------------------------------------------------------------------------------- 1 | package basichost 2 | 3 | import ( 4 | "context" 5 | "io" 6 | "time" 7 | 8 | identify "github.com/libp2p/go-libp2p/p2p/protocol/identify" 9 | 10 | // ma "gx/ipfs/QmcyqRMCAXVtYPS4DiBrA7sezL9rRGfW8Ctx7cywL4TXJj/go-multiaddr" 11 | 12 | logging "github.com/ipfs/go-log" 13 | goprocess "github.com/jbenet/goprocess" 14 | metrics "github.com/libp2p/go-libp2p-metrics" 15 | mstream "github.com/libp2p/go-libp2p-metrics/stream" 16 | inet "github.com/libp2p/go-libp2p-net" 17 | peer "github.com/libp2p/go-libp2p-peer" 18 | pstore "github.com/libp2p/go-libp2p-peerstore" 19 | protocol "github.com/libp2p/go-libp2p-protocol" 20 | ma "github.com/multiformats/go-multiaddr" 21 | 22 | msmux "github.com/multiformats/go-multistream" 23 | ) 24 | 25 | var log = logging.Logger("basichost") 26 | 27 | var NegotiateTimeout = time.Second * 60 28 | 29 | // Option is a type used to pass in options to the host. 30 | type Option int 31 | 32 | const ( 33 | // NATPortMap makes the host attempt to open port-mapping in NAT devices 34 | // for all its listeners. Pass in this option in the constructor to 35 | // asynchronously a) find a gateway, b) open port mappings, c) republish 36 | // port mappings periodically. The NATed addresses are included in the 37 | // Host's Addrs() list. 38 | NATPortMap Option = iota 39 | ) 40 | 41 | // BasicHost is the basic implementation of the host.Host interface. This 42 | // particular host implementation: 43 | // * uses a protocol muxer to mux per-protocol streams 44 | // * uses an identity service to send + receive node information 45 | // * uses a nat service to establish NAT port mappings 46 | type BasicHost struct { 47 | baseNetwork inet.Network 48 | strmNetworks map[string]inet.Network 49 | mux *msmux.MultistreamMuxer 50 | ids *identify.IDService 51 | natmgr *natManager 52 | 53 | NegotiateTimeout time.Duration 54 | 55 | proc goprocess.Process 56 | 57 | bwc metrics.Reporter 58 | } 59 | 60 | // New constructs and sets up a new *BasicHost with given Network 61 | func New(net inet.Network, opts ...interface{}) *BasicHost { 62 | h := &BasicHost{ 63 | baseNetwork: net, 64 | mux: msmux.NewMultistreamMuxer(), 65 | NegotiateTimeout: NegotiateTimeout, 66 | } 67 | 68 | h.proc = goprocess.WithTeardown(func() error { 69 | if h.natmgr != nil { 70 | h.natmgr.Close() 71 | } 72 | 73 | return h.Network().Close() 74 | }) 75 | 76 | // setup host services 77 | h.ids = identify.NewIDService(h) 78 | 79 | for _, o := range opts { 80 | switch o := o.(type) { 81 | case Option: 82 | switch o { 83 | case NATPortMap: 84 | h.natmgr = newNatManager(h) 85 | } 86 | case metrics.Reporter: 87 | h.bwc = o 88 | } 89 | } 90 | 91 | h.ids.Reporter = h.bwc 92 | 93 | net.SetConnHandler(h.newConnHandler) 94 | net.SetStreamHandler(h.newStreamHandler) 95 | 96 | return h 97 | } 98 | 99 | // newConnHandler is the remote-opened conn handler for inet.Network 100 | func (h *BasicHost) newConnHandler(c inet.Conn) { 101 | // Clear protocols on connecting to new peer to avoid issues caused 102 | // by misremembering protocols between reconnects 103 | h.Peerstore().SetProtocols(c.RemotePeer()) 104 | h.ids.IdentifyConn(c) 105 | } 106 | 107 | // newStreamHandler is the remote-opened stream handler for inet.Network 108 | // TODO: this feels a bit wonky 109 | func (h *BasicHost) newStreamHandler(s inet.Stream) { 110 | before := time.Now() 111 | 112 | if h.NegotiateTimeout != 0 { 113 | if err := s.SetDeadline(time.Now().Add(h.NegotiateTimeout)); err != nil { 114 | log.Error("setting stream deadline: ", err) 115 | s.Close() 116 | return 117 | } 118 | } 119 | 120 | lzc, protoID, handle, err := h.Mux().NegotiateLazy(s) 121 | took := time.Now().Sub(before) 122 | if err != nil { 123 | if err == io.EOF { 124 | logf := log.Debugf 125 | if took > time.Second*10 { 126 | logf = log.Warningf 127 | } 128 | logf("protocol EOF: %s (took %s)", s.Conn().RemotePeer(), took) 129 | } else { 130 | log.Warning("protocol mux failed: %s (took %s)", err, took) 131 | } 132 | s.Close() 133 | return 134 | } 135 | 136 | s = &streamWrapper{ 137 | Stream: s, 138 | rw: lzc, 139 | } 140 | 141 | if h.NegotiateTimeout != 0 { 142 | if err := s.SetDeadline(time.Time{}); err != nil { 143 | log.Error("resetting stream deadline: ", err) 144 | s.Close() 145 | return 146 | } 147 | } 148 | 149 | s.SetProtocol(protocol.ID(protoID)) 150 | 151 | if h.bwc != nil { 152 | s = mstream.WrapStream(s, h.bwc) 153 | } 154 | log.Debugf("protocol negotiation took %s", took) 155 | 156 | go handle(protoID, s) 157 | } 158 | 159 | func (h *BasicHost) AddVideoStreamNetwork(strmID string, ntwk inet.Network) { 160 | h.strmNetworks[strmID] = ntwk 161 | } 162 | 163 | func (h *BasicHost) RemoveVideoStreamNetwork(strmID string) { 164 | delete(h.strmNetworks, strmID) 165 | } 166 | 167 | // ID returns the (local) peer.ID associated with this Host 168 | func (h *BasicHost) ID() peer.ID { 169 | return h.Network().LocalPeer() 170 | } 171 | 172 | // Peerstore returns the Host's repository of Peer Addresses and Keys. 173 | func (h *BasicHost) Peerstore() pstore.Peerstore { 174 | return h.Network().Peerstore() 175 | } 176 | 177 | // Network returns the Network interface of the Host 178 | func (h *BasicHost) Network() inet.Network { 179 | return h.baseNetwork 180 | } 181 | 182 | // Mux returns the Mux multiplexing incoming streams to protocol handlers 183 | func (h *BasicHost) Mux() *msmux.MultistreamMuxer { 184 | return h.mux 185 | } 186 | 187 | // IDService returns 188 | func (h *BasicHost) IDService() *identify.IDService { 189 | return h.ids 190 | } 191 | 192 | // SetStreamHandler sets the protocol handler on the Host's Mux. 193 | // This is equivalent to: 194 | // host.Mux().SetHandler(proto, handler) 195 | // (Threadsafe) 196 | func (h *BasicHost) SetStreamHandler(pid protocol.ID, handler inet.StreamHandler) { 197 | h.Mux().AddHandler(string(pid), func(p string, rwc io.ReadWriteCloser) error { 198 | is := rwc.(inet.Stream) 199 | is.SetProtocol(protocol.ID(p)) 200 | handler(is) 201 | return nil 202 | }) 203 | } 204 | 205 | // SetStreamHandlerMatch sets the protocol handler on the Host's Mux 206 | // using a matching function to do protocol comparisons 207 | func (h *BasicHost) SetStreamHandlerMatch(pid protocol.ID, m func(string) bool, handler inet.StreamHandler) { 208 | h.Mux().AddHandlerWithFunc(string(pid), m, func(p string, rwc io.ReadWriteCloser) error { 209 | is := rwc.(inet.Stream) 210 | is.SetProtocol(protocol.ID(p)) 211 | handler(is) 212 | return nil 213 | }) 214 | } 215 | 216 | // RemoveStreamHandler returns .. 217 | func (h *BasicHost) RemoveStreamHandler(pid protocol.ID) { 218 | h.Mux().RemoveHandler(string(pid)) 219 | } 220 | 221 | // NewStream opens a new stream to given peer p, and writes a p2p/protocol 222 | // header with given protocol.ID. If there is no connection to p, attempts 223 | // to create one. If ProtocolID is "", writes no header. 224 | // (Threadsafe) 225 | func (h *BasicHost) NewStream(ctx context.Context, p peer.ID, pids ...protocol.ID) (inet.Stream, error) { 226 | pref, err := h.preferredProtocol(p, pids) 227 | if err != nil { 228 | return nil, err 229 | } 230 | 231 | if pref != "" { 232 | return h.newStream(ctx, p, pref) 233 | } 234 | 235 | var protoStrs []string 236 | for _, pid := range pids { 237 | protoStrs = append(protoStrs, string(pid)) 238 | } 239 | 240 | s, err := h.Network().NewStream(ctx, p) 241 | if err != nil { 242 | return nil, err 243 | } 244 | 245 | selected, err := msmux.SelectOneOf(protoStrs, s) 246 | if err != nil { 247 | s.Close() 248 | return nil, err 249 | } 250 | selpid := protocol.ID(selected) 251 | s.SetProtocol(selpid) 252 | h.Peerstore().AddProtocols(p, selected) 253 | 254 | if h.bwc != nil { 255 | s = mstream.WrapStream(s, h.bwc) 256 | } 257 | 258 | return s, nil 259 | } 260 | 261 | func pidsToStrings(pids []protocol.ID) []string { 262 | out := make([]string, len(pids)) 263 | for i, p := range pids { 264 | out[i] = string(p) 265 | } 266 | return out 267 | } 268 | 269 | func (h *BasicHost) preferredProtocol(p peer.ID, pids []protocol.ID) (protocol.ID, error) { 270 | pidstrs := pidsToStrings(pids) 271 | supported, err := h.Peerstore().SupportsProtocols(p, pidstrs...) 272 | if err != nil { 273 | return "", err 274 | } 275 | 276 | var out protocol.ID 277 | if len(supported) > 0 { 278 | out = protocol.ID(supported[0]) 279 | } 280 | return out, nil 281 | } 282 | 283 | func (h *BasicHost) newStream(ctx context.Context, p peer.ID, pid protocol.ID) (inet.Stream, error) { 284 | s, err := h.Network().NewStream(ctx, p) 285 | if err != nil { 286 | return nil, err 287 | } 288 | 289 | s.SetProtocol(pid) 290 | 291 | if h.bwc != nil { 292 | s = mstream.WrapStream(s, h.bwc) 293 | } 294 | 295 | lzcon := msmux.NewMSSelect(s, string(pid)) 296 | return &streamWrapper{ 297 | Stream: s, 298 | rw: lzcon, 299 | }, nil 300 | } 301 | 302 | // Connect ensures there is a connection between this host and the peer with 303 | // given peer.ID. Connect will absorb the addresses in pi into its internal 304 | // peerstore. If there is not an active connection, Connect will issue a 305 | // h.Network.Dial, and block until a connection is open, or an error is 306 | // returned. 307 | func (h *BasicHost) Connect(ctx context.Context, pi pstore.PeerInfo) error { 308 | 309 | // absorb addresses into peerstore 310 | h.Peerstore().AddAddrs(pi.ID, pi.Addrs, pstore.TempAddrTTL) 311 | 312 | cs := h.Network().ConnsToPeer(pi.ID) 313 | if len(cs) > 0 { 314 | return nil 315 | } 316 | 317 | return h.dialPeer(ctx, pi.ID) 318 | } 319 | 320 | // dialPeer opens a connection to peer, and makes sure to identify 321 | // the connection once it has been opened. 322 | func (h *BasicHost) dialPeer(ctx context.Context, p peer.ID) error { 323 | log.Debugf("host %s dialing %s", h.ID, p) 324 | c, err := h.Network().DialPeer(ctx, p) 325 | if err != nil { 326 | return err 327 | } 328 | 329 | // Clear protocols on connecting to new peer to avoid issues caused 330 | // by misremembering protocols between reconnects 331 | h.Peerstore().SetProtocols(p) 332 | 333 | // identify the connection before returning. 334 | done := make(chan struct{}) 335 | go func() { 336 | h.ids.IdentifyConn(c) 337 | close(done) 338 | }() 339 | 340 | // respect don contexteone 341 | select { 342 | case <-done: 343 | case <-ctx.Done(): 344 | return ctx.Err() 345 | } 346 | 347 | log.Debugf("host %s finished dialing %s", h.ID(), p) 348 | return nil 349 | } 350 | 351 | // Addrs returns all the addresses of BasicHost at this moment in time. 352 | // It's ok to not include addresses if they're not available to be used now. 353 | func (h *BasicHost) Addrs() []ma.Multiaddr { 354 | addrs, err := h.Network().InterfaceListenAddresses() 355 | if err != nil { 356 | log.Debug("error retrieving network interface addrs") 357 | } 358 | 359 | if h.ids != nil { // add external observed addresses 360 | addrs = append(addrs, h.ids.OwnObservedAddrs()...) 361 | } 362 | 363 | if h.natmgr != nil { // natmgr is nil if we do not use nat option. 364 | nat := h.natmgr.NAT() 365 | if nat != nil { // nat is nil if not ready, or no nat is available. 366 | addrs = append(addrs, nat.ExternalAddrs()...) 367 | } 368 | } 369 | 370 | return addrs 371 | } 372 | 373 | // Close shuts down the Host's services (network, etc). 374 | func (h *BasicHost) Close() error { 375 | return h.proc.Close() 376 | } 377 | 378 | // GetBandwidthReporter exposes the Host's bandiwth metrics reporter 379 | func (h *BasicHost) GetBandwidthReporter() metrics.Reporter { 380 | return h.bwc 381 | } 382 | 383 | type streamWrapper struct { 384 | inet.Stream 385 | rw io.ReadWriter 386 | } 387 | 388 | func (s *streamWrapper) Read(b []byte) (int, error) { 389 | return s.rw.Read(b) 390 | } 391 | 392 | func (s *streamWrapper) Write(b []byte) (int, error) { 393 | return s.rw.Write(b) 394 | } 395 | -------------------------------------------------------------------------------- /core/core.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "bufio" 5 | "context" 6 | "fmt" 7 | "log" 8 | "math/rand" 9 | "time" 10 | 11 | // dht "gx/ipfs/QmQcRLisUbREko56ThfgzdBorMGNfNjgqzvwuPPr1jFw6A/go-libp2p-kad-dht" 12 | // routing "gx/ipfs/QmafuecpeZp3k3sHJ5mUARHd4795revuadECQMkmHB8LfW/go-libp2p-routing" 13 | // p2phost "gx/ipfs/QmcyNeWPsoFGxThGpV8JnJdfUNankKhWCTrbrcFRQda4xR/go-libp2p-host" 14 | // ma "gx/ipfs/QmcyqRMCAXVtYPS4DiBrA7sezL9rRGfW8Ctx7cywL4TXJj/go-multiaddr" 15 | 16 | // ds "github.com/ipfs/go-datastore" 17 | 18 | bhost "github.com/ericxtang/livepeer-libp2p-spike/host/basic" 19 | rhost "github.com/ericxtang/livepeer-libp2p-spike/host/routed" 20 | "github.com/golang/glog" 21 | ds "github.com/ipfs/go-datastore" 22 | crypto "github.com/libp2p/go-libp2p-crypto" 23 | p2phost "github.com/libp2p/go-libp2p-host" 24 | dht "github.com/libp2p/go-libp2p-kad-dht" 25 | net "github.com/libp2p/go-libp2p-net" 26 | peer "github.com/libp2p/go-libp2p-peer" 27 | ps "github.com/libp2p/go-libp2p-peerstore" 28 | protocol "github.com/libp2p/go-libp2p-protocol" 29 | routing "github.com/libp2p/go-libp2p-routing" 30 | swarm "github.com/libp2p/go-libp2p-swarm" 31 | ma "github.com/multiformats/go-multiaddr" 32 | multicodec "github.com/multiformats/go-multicodec" 33 | mcjson "github.com/multiformats/go-multicodec/json" 34 | 35 | lpmsStream "github.com/livepeer/lpms/stream" 36 | ) 37 | 38 | // var IpnsValidatorTag = "ipns" 39 | 40 | // var P_LIVEPEER = 422 41 | var LivepeerProtocol = protocol.ID("/livepeer") 42 | 43 | const ( 44 | JoinMsgID = iota 45 | JoinAckID 46 | SubReqID 47 | ChunkHaveID 48 | ChunkReqID 49 | ChunkReqAckID 50 | TranscodeMsgID 51 | HaveMsgID 52 | ) 53 | 54 | type Message struct { 55 | Msg int 56 | Data interface{} 57 | // HangUp bool 58 | } 59 | 60 | type JoinMsg struct { 61 | ID string 62 | Addr string 63 | //Shouldn't need any data... 64 | } 65 | 66 | type ChunkHaveMsg struct { 67 | ChunkName string 68 | StreamID string 69 | PeerID string 70 | Addr string 71 | } 72 | 73 | type ChunkReqMsg struct { 74 | ChunkName string 75 | StreamID string 76 | Peer string 77 | } 78 | 79 | type ChunkReqAckMsg struct { 80 | ChunkName string 81 | StreamID string 82 | Data string //hex encoded 83 | } 84 | 85 | type SubReqMsg struct { 86 | ID string 87 | Addr string 88 | SID string 89 | } 90 | 91 | type Transcode struct { 92 | StrmID string 93 | //Could be a transcode config, but let's assume it's just the same config. 94 | } 95 | 96 | type WrappedStream struct { 97 | Stream net.Stream 98 | Enc multicodec.Encoder 99 | Dec multicodec.Decoder 100 | Writer *bufio.Writer 101 | Reader *bufio.Reader 102 | } 103 | 104 | func WrapStream(s net.Stream) *WrappedStream { 105 | reader := bufio.NewReader(s) 106 | writer := bufio.NewWriter(s) 107 | // This is where we pick our specific multicodec. In order to change the 108 | // codec, we only need to change this place. 109 | // See https://godoc.org/github.com/multiformats/go-multicodec/json 110 | dec := mcjson.Multicodec(false).Decoder(reader) 111 | enc := mcjson.Multicodec(false).Encoder(writer) 112 | return &WrappedStream{ 113 | Stream: s, 114 | Reader: reader, 115 | Writer: writer, 116 | Enc: enc, 117 | Dec: dec, 118 | } 119 | } 120 | 121 | type LivepeerNode struct { 122 | // Self 123 | Identity peer.ID // the local node's identity 124 | Peerstore ps.Peerstore // storage for other Peer instances 125 | Routing routing.IpfsRouting // the routing system. recommend ipfs-dht 126 | Priv crypto.PrivKey 127 | Pub crypto.PubKey 128 | 129 | PeerHost p2phost.Host // the network host (server+client) 130 | StreamPeers map[string][]ps.PeerInfo // the network host (server+client) 131 | HLSBuf *lpmsStream.HLSBuffer 132 | // HaveMsgBuffer map[string]peer.ID 133 | 134 | // StreamSwarms map[string]net.Network //Every stream has its own swarm 135 | } 136 | 137 | //NewNode creates a new Livepeerd node. 138 | func NewNode(listenPort int, priv crypto.PrivKey, pub crypto.PubKey) (*LivepeerNode, error) { 139 | pid, err := peer.IDFromPublicKey(pub) 140 | if err != nil { 141 | return nil, err 142 | } 143 | 144 | // Create a multiaddress 145 | addr, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", listenPort)) 146 | if err != nil { 147 | return nil, err 148 | } 149 | 150 | // Create a peerstore 151 | store := ps.NewPeerstore() 152 | store.AddPrivKey(pid, priv) 153 | store.AddPubKey(pid, pub) 154 | 155 | // Create swarm (implements libP2P Network) 156 | netwrk, err := swarm.NewNetwork( 157 | context.Background(), 158 | []ma.Multiaddr{addr}, 159 | pid, 160 | store, 161 | nil) 162 | 163 | basicHost := bhost.New(netwrk) 164 | 165 | r, err := constructDHTRouting(context.Background(), basicHost, ds.NewMapDatastore()) 166 | rHost := rhost.Wrap(basicHost, r) 167 | 168 | // return &LivepeerNode{Identity: pid, Peerstore: store, Routing: r, PeerHost: rHost, HaveMsgBuffer: make(map[string]peer.ID), StreamPeers: make(map[string][]ps.PeerInfo), Priv: priv, Pub: pub}, nil 169 | buf := lpmsStream.NewHLSBuffer(5, 1000) 170 | return &LivepeerNode{Identity: pid, Peerstore: store, Routing: r, PeerHost: rHost, HLSBuf: buf, StreamPeers: make(map[string][]ps.PeerInfo), Priv: priv, Pub: pub}, nil 171 | } 172 | 173 | //Takes a HLS stream and sends HAVE messages for each chunk to known peers 174 | func (n *LivepeerNode) SendHaveChunk(name, strmID string) { 175 | // pid, _ := peer.IDHexDecode(peerID) 176 | // strmSwarm := n.StreamSwarm(strmID) 177 | peers := n.StreamPeers[strmID] 178 | if peers == nil { 179 | peers = make([]ps.PeerInfo, 0, 0) 180 | n.StreamPeers[strmID] = peers 181 | } 182 | 183 | for _, peer := range peers { 184 | if peer.ID != n.Identity { 185 | glog.Infof("Sending Have Msg to %v for %v", peer.ID.Pretty(), name) 186 | n.PeerHost.Connect(context.Background(), peer) 187 | stream, err := n.PeerHost.NewStream(context.Background(), peer.ID, LivepeerProtocol) 188 | if err != nil { 189 | log.Fatal(err) 190 | } 191 | n.sendMessage(stream, peer.ID, ChunkHaveID, ChunkHaveMsg{ChunkName: name, StreamID: strmID, PeerID: n.Identity.Pretty(), Addr: n.PeerHost.Addrs()[0].String()}) 192 | } 193 | } 194 | } 195 | 196 | func (n *LivepeerNode) SendJoin(pid peer.ID) { 197 | stream, err := n.PeerHost.NewStream(context.Background(), pid, LivepeerProtocol) 198 | if err != nil { 199 | log.Fatal(err) 200 | } 201 | n.sendMessage(stream, pid, JoinMsgID, JoinMsg{ID: n.Identity.Pretty(), Addr: n.PeerHost.Addrs()[0].String()}) 202 | } 203 | 204 | func (n *LivepeerNode) SendJoinAck(pid peer.ID) { 205 | stream, err := n.PeerHost.NewStream(context.Background(), pid, LivepeerProtocol) 206 | if err != nil { 207 | log.Fatal(err) 208 | } 209 | n.sendMessage(stream, pid, JoinAckID, nil) 210 | } 211 | 212 | func (n *LivepeerNode) SendChunkReq(peer ps.PeerInfo, cn, strmID string) { 213 | if n.PeerHost.Network().Connectedness(peer.ID) != net.Connected { 214 | n.PeerHost.Connect(context.Background(), peer) 215 | } 216 | glog.Infof("Sending Chunk Req for %v to %v", cn, peer.ID.Pretty()) 217 | stream, err := n.PeerHost.NewStream(context.Background(), peer.ID, LivepeerProtocol) 218 | if err != nil { 219 | log.Fatal(err) 220 | } 221 | n.sendMessage(stream, peer.ID, ChunkReqID, ChunkReqMsg{ChunkName: cn, StreamID: strmID, Peer: n.Identity.Pretty()}) 222 | } 223 | 224 | func (n *LivepeerNode) SendChunkReqAck(peer ps.PeerInfo, cn, strmID string, data []byte) { 225 | if n.PeerHost.Network().Connectedness(peer.ID) != net.Connected { 226 | n.PeerHost.Connect(context.Background(), peer) 227 | } 228 | stream, err := n.PeerHost.NewStream(context.Background(), peer.ID, LivepeerProtocol) 229 | if err != nil { 230 | log.Fatal(err) 231 | } 232 | n.sendMessage(stream, peer.ID, ChunkReqAckID, ChunkReqAckMsg{ChunkName: cn, StreamID: strmID, Data: fmt.Sprintf("%x", data)}) 233 | } 234 | 235 | func (n *LivepeerNode) SendSubscribeReq(strmID StreamID) { 236 | pid, _ := strmID.SplitComponents() 237 | 238 | //Try and join the stream-specific swarm 239 | peerInfo, err := n.Routing.FindPeer(context.Background(), pid) 240 | if err != nil { 241 | log.Fatalf("Cannot subscribe to stream: %v", err) 242 | } 243 | 244 | // n.Peerstore.AddAddrs(pid, peerInfo.Addrs, ps.PermanentAddrTTL) 245 | // n.PeerHost.Connect(context.Background(), peerInfo) 246 | // n.SendJoin(pid) 247 | // swarm := n.StreamSwarm(strmID.String()) 248 | // swarm.Peerstore().AddAddrs(pid, peerInfo.Addrs, ps.PermanentAddrTTL) 249 | // stream, err := swarm.NewStream(context.Background(), pid) 250 | 251 | n.PeerHost.Connect(context.Background(), peerInfo) 252 | stream, err := n.PeerHost.NewStream(context.Background(), pid, LivepeerProtocol) 253 | if err != nil { 254 | log.Fatal(err) 255 | } 256 | n.sendMessage(stream, pid, SubReqID, SubReqMsg{ID: n.Identity.Pretty(), Addr: n.PeerHost.Addrs()[0].String(), SID: strmID.String()}) 257 | 258 | // swarm := n.StreamSwarm(strmID.String()) 259 | // swarm.Peerstore().AddAddrs(pid, peerInfo.Addrs, ps.PermanentAddrTTL) 260 | // stream, err := swarm.NewStream(context.Background(), pid) 261 | // if err != nil { 262 | // log.Fatal(err) 263 | // } 264 | // glog.Infof("Sending subscribe message to %v", pid) 265 | // n.sendMessage(stream, pid, SubscribeReqID, nil) 266 | } 267 | 268 | func (n *LivepeerNode) sendMessage(stream net.Stream, pid peer.ID, msgID int, data interface{}) { 269 | // stream, err := n.PeerHost.NewStream(context.Background(), pid, LivepeerProtocol) 270 | // if err != nil { 271 | // log.Fatal(err) 272 | // } 273 | 274 | wrappedStream := WrapStream(stream) 275 | msg := Message{Msg: msgID, Data: data} 276 | // glog.Infof("msg.Data: %v", msg.Data) 277 | err := wrappedStream.Enc.Encode(msg) 278 | if err != nil { 279 | glog.Errorf("send message encode error: %v", err) 280 | } 281 | 282 | err = wrappedStream.Writer.Flush() 283 | if err != nil { 284 | glog.Errorf("send message flush error: %v", err) 285 | } 286 | } 287 | 288 | func constructDHTRouting(ctx context.Context, host p2phost.Host, dstore ds.Batching) (routing.IpfsRouting, error) { 289 | dhtRouting := dht.NewDHT(ctx, host, dstore) 290 | // dhtRouting.Validator[IpnsValidatorTag] = namesys.IpnsRecordValidator 291 | // dhtRouting.Selector[IpnsValidatorTag] = namesys.IpnsSelectorFunc 292 | return dhtRouting, nil 293 | } 294 | 295 | // func (n *LivepeerNode) StreamSwarm(strmID string) net.Network { 296 | // // var strmSwarm net.Network 297 | // if n.StreamSwarms[strmID] == nil { 298 | // glog.Infof("Creating a new local swarm for %v", strmID) 299 | // ps := ps.NewPeerstore() 300 | // ps.AddPrivKey(n.Identity, n.Priv) 301 | // ps.AddPubKey(n.Identity, n.Pub) 302 | 303 | // strmSwarm, err := swarm.NewNetwork( 304 | // context.Background(), 305 | // []ma.Multiaddr{n.PeerHost.Addrs()[0]}, 306 | // n.Identity, 307 | // ps, 308 | // nil) 309 | // if err != nil { 310 | // glog.Fatalf("Cannot create Stream Swarm: %v", err) 311 | // } 312 | // n.StreamSwarms[strmID] = strmSwarm 313 | // return strmSwarm 314 | // } 315 | 316 | // return n.StreamSwarms[strmID] 317 | // } 318 | 319 | type StreamID string 320 | 321 | func RandomStreamID() string { 322 | rand.Seed(time.Now().UnixNano()) 323 | x := make([]byte, 16, 16) 324 | for i := 0; i < len(x); i++ { 325 | x[i] = byte(rand.Uint32()) 326 | } 327 | // strmID, _ := peer.IDFromBytes(x) 328 | return fmt.Sprintf("%x", x) 329 | } 330 | 331 | func MakeStreamID(nodeID peer.ID, id string) StreamID { 332 | return StreamID(fmt.Sprintf("%s%s", peer.IDHexEncode(nodeID), id)) 333 | } 334 | 335 | func (self *StreamID) String() string { 336 | return string(*self) 337 | } 338 | 339 | // Given a stream ID, return it's origin nodeID and the unique stream ID 340 | func (self *StreamID) SplitComponents() (peer.ID, string) { 341 | strStreamID := string(*self) 342 | originComponentLength := 68 // 32 bytes == 64 hexadecimal digits 343 | if len(strStreamID) != (68 + 32) { 344 | return "", "" 345 | } 346 | pid, _ := peer.IDHexDecode(strStreamID[:originComponentLength]) 347 | return pid, strStreamID[originComponentLength:] 348 | } 349 | -------------------------------------------------------------------------------- /cmd/livepeerd/livepeerd.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "encoding/hex" 6 | "encoding/json" 7 | "errors" 8 | "flag" 9 | "io/ioutil" 10 | "net/http" 11 | "net/url" 12 | "path" 13 | "regexp" 14 | "strconv" 15 | "strings" 16 | "time" 17 | 18 | "github.com/ericxtang/livepeer-libp2p-spike/core" 19 | "github.com/golang/glog" 20 | crypto "github.com/libp2p/go-libp2p-crypto" 21 | net "github.com/libp2p/go-libp2p-net" 22 | peer "github.com/libp2p/go-libp2p-peer" 23 | ps "github.com/libp2p/go-libp2p-peerstore" 24 | protocol "github.com/libp2p/go-libp2p-protocol" 25 | "github.com/livepeer/lpms" 26 | ma "github.com/multiformats/go-multiaddr" 27 | "github.com/nareix/joy4/av" 28 | 29 | lpmsStream "github.com/livepeer/lpms/stream" 30 | ) 31 | 32 | var LivepeerProtocol = protocol.ID("/livepeer") 33 | 34 | // var SeedPeerID = "QmURyyVgQBd59rtLfNrdryZ6FAhYZvCUJTpePmXmbE4ghR" 35 | // var SeedAddr = "/ip4/127.0.0.1/tcp/10000" 36 | 37 | type KeyFile struct { 38 | Pub string 39 | Priv string 40 | } 41 | 42 | func getKeys(datadir string) (crypto.PrivKey, crypto.PubKey) { 43 | f, e := ioutil.ReadFile(path.Join(datadir, "keys.json")) 44 | var keyf KeyFile 45 | json.Unmarshal(f, &keyf) 46 | 47 | if e != nil || keyf.Priv == "" || keyf.Pub == "" { 48 | glog.Errorf("Cannot file keys in data dir, creating new keys") 49 | priv, pub, _ := crypto.GenerateKeyPair(crypto.RSA, 2048) 50 | 51 | privb, _ := priv.Bytes() 52 | pubb, _ := pub.Bytes() 53 | 54 | kf := KeyFile{Priv: crypto.ConfigEncodeKey(privb), Pub: crypto.ConfigEncodeKey(pubb)} 55 | kfb, _ := json.Marshal(kf) 56 | ioutil.WriteFile(path.Join(datadir, "keys.json"), kfb, 0644) 57 | 58 | return priv, pub 59 | } 60 | 61 | privb, _ := crypto.ConfigDecodeKey(keyf.Priv) 62 | pubb, _ := crypto.ConfigDecodeKey(keyf.Pub) 63 | priv, _ := crypto.UnmarshalPrivateKey(privb) 64 | pub, _ := crypto.UnmarshalPublicKey(pubb) 65 | return priv, pub 66 | } 67 | 68 | func handleProtocol(vs *VideoServer, n *core.LivepeerNode, ws *core.WrappedStream) { 69 | var msg core.Message 70 | err := ws.Dec.Decode(&msg) 71 | 72 | if err != nil { 73 | glog.Errorf("Got error decoding msg: %v", err) 74 | return 75 | } 76 | //Protocol: 77 | // - Join Swarm (and form kademlia network) 78 | // - Get Peer Info??? 79 | // - Have(strmID, chunkID) 80 | // - Request Video Chunk(strmID, chunkID) (ChunkDATA) 81 | // - Transcode(strmID, config) (newStrmID) 82 | switch msg.Msg { 83 | case core.JoinMsgID: 84 | handleJoinSwarm(n, msg) 85 | 86 | case core.JoinAckID: 87 | glog.Infof("Recieved Ack") 88 | 89 | case core.ChunkHaveID: 90 | handleHaveMsg(n, msg) 91 | 92 | case core.ChunkReqID: 93 | handleChunkReqMsg(vs.ChunkSub, n, msg) 94 | 95 | case core.ChunkReqAckID: 96 | handleChunkReqAckMsg(vs.ChunkSub.Buffer, n, msg) 97 | 98 | case core.SubReqID: 99 | handleSubscribeMsg(n, msg) 100 | default: 101 | } 102 | 103 | } 104 | 105 | func handleJoinSwarm(n *core.LivepeerNode, msg core.Message) { 106 | //Join the swarm 107 | joinMsg, ok := msg.Data.(map[string]interface{}) 108 | if !ok { 109 | glog.Errorf("Error converting JoinMsg: %v", msg.Data) 110 | } 111 | pid, _ := peer.IDB58Decode(joinMsg["ID"].(string)) 112 | addr, _ := ma.NewMultiaddr(joinMsg["Addr"].(string)) 113 | n.Peerstore.AddAddr(pid, addr, ps.PermanentAddrTTL) 114 | n.PeerHost.Connect(context.Background(), ps.PeerInfo{ID: pid}) 115 | 116 | //Print current peers 117 | glog.Infof("Peers: %v", n.Peerstore.Peers()) 118 | for _, p := range n.Peerstore.Peers() { 119 | glog.Infof("%v", p.Pretty()) 120 | } 121 | glog.Infof("\n\n") 122 | 123 | //Send Ack 124 | n.SendJoinAck(pid) 125 | } 126 | 127 | func handleHaveMsg(n *core.LivepeerNode, msg core.Message) { 128 | haveMsg, ok := msg.Data.(map[string]interface{}) 129 | glog.Infof("Got have Msg: %v", haveMsg) 130 | if !ok { 131 | glog.Errorf("Error converting JoinMsg: %v", msg.Data) 132 | } 133 | cName, _ := haveMsg["ChunkName"].(string) 134 | pid, _ := peer.IDB58Decode(haveMsg["PeerID"].(string)) 135 | addr, _ := ma.NewMultiaddr(haveMsg["Addr"].(string)) 136 | 137 | //Send ChunkReq - greedy approach 138 | n.SendChunkReq(ps.PeerInfo{ID: pid, Addrs: []ma.Multiaddr{addr}}, cName, "") 139 | } 140 | 141 | func handleSubscribeMsg(n *core.LivepeerNode, msg core.Message) { 142 | subMsg, ok := msg.Data.(map[string]interface{}) 143 | glog.Infof("Got subscribe Msg: %v", subMsg) 144 | if !ok { 145 | glog.Errorf("Error converting subMsg: %v", msg.Data) 146 | } 147 | 148 | //Add the peer to the stream swarm, so they'll start recieving HAVE messages 149 | pid, _ := peer.IDB58Decode(subMsg["ID"].(string)) 150 | addr, _ := ma.NewMultiaddr(subMsg["Addr"].(string)) 151 | sid := subMsg["SID"].(string) 152 | 153 | peers := n.StreamPeers[sid] 154 | if peers == nil { 155 | peers = make([]ps.PeerInfo, 0, 0) 156 | n.StreamPeers[sid] = peers 157 | } 158 | peers = append(peers, ps.PeerInfo{ID: pid, Addrs: []ma.Multiaddr{addr}}) 159 | n.StreamPeers[sid] = peers 160 | 161 | // swarm.Peerstore().AddAddr(pid, addr, ps.PermanentAddrTTL) 162 | glog.Infof("Peers for %v: %v", sid, n.StreamPeers[sid]) 163 | } 164 | 165 | func handleChunkReqMsg(cs *core.ChunkSubscriber, n *core.LivepeerNode, msg core.Message) { 166 | chunkReqMsg, ok := msg.Data.(map[string]interface{}) 167 | glog.Infof("Got chunkReqMsg: %v", chunkReqMsg) 168 | if !ok { 169 | glog.Errorf("Error converting subMsg: %v", msg.Data) 170 | } 171 | 172 | //Send the data back 173 | cname, _ := chunkReqMsg["ChunkName"].(string) 174 | pid, _ := peer.IDB58Decode(chunkReqMsg["Peer"].(string)) 175 | strmID, _ := chunkReqMsg["StreamID"].(string) 176 | seg, err := cs.Buffer.WaitAndGetSegment(context.Background(), cname) 177 | if err != nil { 178 | glog.Errorf("Chunk Req Handler Get Segment Failed: %v", err) 179 | } 180 | 181 | glog.Infof("Sending chunk req ack (%v) to %v", len(seg), pid.Pretty()) 182 | n.SendChunkReqAck(ps.PeerInfo{ID: pid, Addrs: []ma.Multiaddr{n.PeerHost.Addrs()[0]}}, cname, strmID, seg) 183 | } 184 | 185 | func handleChunkReqAckMsg(buf *lpmsStream.HLSBuffer, n *core.LivepeerNode, msg core.Message) { 186 | chunkReqAckMsg, ok := msg.Data.(map[string]interface{}) 187 | // glog.Infof("Got chunkReqAck Msg: %v", chunkReqAckMsg) 188 | if !ok { 189 | glog.Errorf("Error converting subMsg: %v", msg.Data) 190 | } 191 | 192 | // var msgStruct core.ChunkReqAckMsg 193 | // enc := gob.NewEncoder(&msgStruct) 194 | // err = enc.Encode(chunkReqAckMsg["Data"]) 195 | 196 | //Send the data back 197 | cname, ok := chunkReqAckMsg["ChunkName"].(string) 198 | if !ok { 199 | glog.Errorf("Chunk Req Ack Handler Failed: %v", chunkReqAckMsg["ChunkName"]) 200 | } 201 | data, ok := chunkReqAckMsg["Data"].(string) 202 | if !ok { 203 | glog.Errorf("Chunk Req Ack Handler Failed: %v", chunkReqAckMsg["Data"]) 204 | } 205 | numStr := strings.Split(strings.Split(cname, "_")[1], ".")[0] 206 | num, err := strconv.Atoi(numStr) 207 | if err != nil { 208 | glog.Errorf("Chunk Req Ack Handler Failed: %v", err) 209 | } 210 | 211 | dataBytes, err := hex.DecodeString(data) 212 | if err != nil { 213 | glog.Errorf("Chunk Req Ack Handler Failed: %v", err) 214 | } 215 | 216 | err = buf.WriteSegment(uint64(num), cname, 8, dataBytes) 217 | if err != nil { 218 | glog.Errorf("Chunk Req Ack Handler Failed: %v", err) 219 | } 220 | // glog.Infof("Got Chunk: %v(%v)", cname, len(data)) 221 | glog.Infof("Got Chunk #%v: %v(%v)", num, cname, len(dataBytes)) 222 | } 223 | 224 | // func getPeers(s net.Stream) { 225 | // buf := bufio.NewReader(s) 226 | // } 227 | 228 | //Other Functionalities 229 | // - Write Video Chunk to Swarm, IPFS, S3 230 | 231 | func main() { 232 | // glog.Infof("Starting node...") 233 | datadir := flag.String("datadir", "", "data directory") 234 | port := flag.Int("p", 0, "port") 235 | httpPort := flag.String("http", "", "http port") 236 | rtmpPort := flag.String("rtmp", "", "rtmp port") 237 | seedID := flag.String("seedID", "", "Node ID for the seed node") 238 | seedAddr := flag.String("seedAddr", "", "Addr for the seed node") 239 | flag.Parse() 240 | 241 | if *datadir == "" { 242 | glog.Fatalf("Please provide a datadir with -datadir") 243 | } 244 | if *port == 0 { 245 | glog.Fatalf("Please provide port") 246 | } 247 | if *httpPort == "" { 248 | glog.Fatalf("Please provide http port") 249 | } 250 | if *rtmpPort == "" { 251 | glog.Fatalf("Please provide rtmp port") 252 | } 253 | 254 | priv, pub := getKeys(*datadir) 255 | 256 | n, err := core.NewNode(*port, priv, pub) 257 | if err != nil { 258 | glog.Errorf("Cannot create node: %v", err) 259 | return 260 | } 261 | 262 | vs := NewVideoServer(n, *rtmpPort, *httpPort, "", "") 263 | 264 | n.PeerHost.SetStreamHandler(LivepeerProtocol, func(s net.Stream) { 265 | wrappedStream := core.WrapStream(s) 266 | defer s.Close() 267 | handleProtocol(vs, n, wrappedStream) 268 | }) 269 | 270 | // glog.Infof("Node ID: %v", n.PeerHost.ID()) 271 | // glog.Infof("Identity: %s", n.Identity.Pretty()) 272 | // glog.Infof("peer.ID: %s", peer.ID("0x1442ef0")) 273 | // glog.Infof("Addrs: %v", n.Peerstore.Addrs()) 274 | var sid peer.ID 275 | if *seedID == "" { 276 | sid = n.Identity 277 | } else { 278 | sid, err = peer.IDB58Decode(*seedID) 279 | if err != nil { 280 | glog.Fatalf("Wrong seedID: %v", err) 281 | } 282 | } 283 | 284 | //Try and connect to the seed node 285 | if n.Identity != sid { 286 | var saddr ma.Multiaddr 287 | if *seedAddr == "" { 288 | glog.Fatalf("Need to specify seedAddr") 289 | } 290 | saddr, err := ma.NewMultiaddr(*seedAddr) 291 | if err != nil { 292 | glog.Fatalf("Cannot join swarm: %v", err) 293 | } 294 | n.Peerstore.AddAddr(sid, saddr, ps.PermanentAddrTTL) 295 | n.PeerHost.Connect(context.Background(), ps.PeerInfo{ID: sid}) 296 | 297 | n.SendJoin(sid) 298 | } 299 | 300 | //TODO: Kick off a goroutine to monitor connection speed 301 | 302 | glog.Infof("%v listening for connections on %v", n.Identity.Pretty(), n.PeerHost.Addrs()) 303 | go vs.StartVideoServer(context.Background()) 304 | select {} // hang forever 305 | } 306 | 307 | type StreamDB struct { 308 | db map[string]lpmsStream.Stream 309 | } 310 | 311 | type BufferDB struct { 312 | db map[string]*lpmsStream.HLSBuffer 313 | } 314 | 315 | type VideoServer struct { 316 | SDB *StreamDB 317 | BDB *BufferDB 318 | Server *lpms.LPMS 319 | Node *core.LivepeerNode 320 | ChunkSub *core.ChunkSubscriber 321 | } 322 | 323 | func NewVideoServer(n *core.LivepeerNode, rtmpPort string, httpPort string, ffmpegPath string, vodPath string) *VideoServer { 324 | s := lpms.New(rtmpPort, httpPort, ffmpegPath, vodPath) 325 | sdb := &StreamDB{db: make(map[string]lpmsStream.Stream)} 326 | bdb := &BufferDB{db: make(map[string]*lpmsStream.HLSBuffer)} 327 | return &VideoServer{SDB: sdb, BDB: bdb, Server: s, Node: n} 328 | } 329 | 330 | func (vs *VideoServer) StartVideoServer(ctx context.Context) { 331 | //Create HLSBuffer, subscribe to the HLS stream 332 | glog.Infof("Creating new HLS Buffer") 333 | buffer := lpmsStream.NewHLSBuffer(10, 1000) 334 | cs := core.NewChunkSubscriber(buffer, vs.Node) 335 | vs.ChunkSub = cs 336 | 337 | vs.Server.HandleRTMPPublish( 338 | //getStreamID 339 | func(url *url.URL) (string, error) { 340 | return "", nil 341 | }, 342 | //getStream 343 | func(url *url.URL) (lpmsStream.Stream, lpmsStream.Stream, error) { 344 | 345 | rtmpStrmID := core.StreamID(parseStreamID(url.Path)) 346 | if rtmpStrmID == "" { 347 | rtmpStrmID = core.MakeStreamID(vs.Node.Identity, core.RandomStreamID()) 348 | } 349 | nodeID, _ := rtmpStrmID.SplitComponents() 350 | if nodeID != vs.Node.Identity { 351 | glog.Errorf("Invalid rtmp strmID - nodeID component needs to be self.") 352 | return nil, nil, errors.New("Invalid RTMP StrmID") 353 | } 354 | 355 | hlsStrmID := core.MakeStreamID(vs.Node.Identity, core.RandomStreamID()) 356 | rtmpStream := lpmsStream.NewVideoStream(string(rtmpStrmID), lpmsStream.RTMP) 357 | hlsStream := lpmsStream.NewVideoStream(string(hlsStrmID), lpmsStream.HLS) 358 | vs.SDB.db[string(rtmpStrmID)] = rtmpStream 359 | vs.SDB.db[string(hlsStrmID)] = hlsStream 360 | 361 | vs.BDB.db[string(hlsStrmID)] = buffer 362 | sub := lpmsStream.NewStreamSubscriber(hlsStream) 363 | go sub.StartHLSWorker(context.Background(), time.Second*10) 364 | err := sub.SubscribeHLS(string(hlsStrmID), cs) 365 | if err != nil { 366 | return nil, nil, errors.New("ErrStreamSubscriber") 367 | } 368 | 369 | glog.Infof("Streaming hls stream: %v", hlsStrmID) 370 | return rtmpStream, hlsStream, nil 371 | }, 372 | //finishStream 373 | func(rtmpStrmID string, hlsStrmID string) { 374 | glog.Infof("Finish Stream - canceling stream (need to implement handler for Done())") 375 | // streamer.DeleteNetworkStream(streaming.StreamID(rtmpStrmID)) 376 | // streamer.DeleteNetworkStream(streaming.StreamID(hlsStrmID)) 377 | // streamer.UnsubscribeAll(rtmpStrmID) 378 | // streamer.UnsubscribeAll(hlsStrmID) 379 | }) 380 | vs.Server.HandleRTMPPlay( 381 | //getStream 382 | func(ctx context.Context, reqPath string, dst av.MuxCloser) error { 383 | glog.Infof("Got req: ", reqPath) 384 | streamID := parseStreamID(reqPath) 385 | src := vs.SDB.db[streamID] 386 | 387 | if src != nil { 388 | src.ReadRTMPFromStream(ctx, dst) 389 | } else { 390 | glog.Error("Cannot find stream for ", streamID) 391 | return errors.New("ErrNotFound") 392 | } 393 | return nil 394 | }) 395 | 396 | vs.Server.HandleHLSPlay( 397 | //getHLSBuffer 398 | func(reqPath string) (*lpmsStream.HLSBuffer, error) { 399 | streamID := parseStreamID(reqPath) 400 | glog.Infof("Got HTTP Req for stream: %v", streamID) 401 | buffer := vs.BDB.db[streamID] 402 | s := vs.SDB.db[streamID] 403 | 404 | if s == nil { 405 | //Send request to the network 406 | pl, _ := vs.ChunkSub.Buffer.LatestPlaylist() 407 | if pl.Segments[0] != nil { 408 | return vs.ChunkSub.Buffer, nil 409 | } 410 | 411 | return nil, errors.New("NotFound") 412 | } 413 | 414 | if buffer == nil { 415 | return nil, errors.New("NotFound") 416 | } 417 | return buffer, nil 418 | }) 419 | 420 | http.HandleFunc("/subscribe", func(w http.ResponseWriter, r *http.Request) { 421 | vals, _ := url.ParseQuery(r.URL.RawQuery) 422 | strmID := core.StreamID(vals.Get("streamID")) 423 | glog.Infof("Got http request for /subscribe %v", strmID) 424 | 425 | vs.Node.SendSubscribeReq(strmID) 426 | w.Write([]byte("ok")) 427 | }) 428 | 429 | vs.Server.Start() 430 | } 431 | 432 | func parseStreamID(reqPath string) string { 433 | var strmID string 434 | regex, _ := regexp.Compile("\\/stream\\/([[:alpha:]]|\\d)*") 435 | match := regex.FindString(reqPath) 436 | if match != "" { 437 | strmID = strings.Replace(match, "/stream/", "", -1) 438 | } else { 439 | regex, _ = regexp.Compile("\\?streamID=([[:alpha:]]|\\d)*") 440 | match = regex.FindString(reqPath) 441 | if match != "" { 442 | strmID = strings.Replace(match, "/streamID=", "", -1) 443 | } 444 | } 445 | return strmID 446 | } 447 | --------------------------------------------------------------------------------