├── .gitattributes ├── .gitignore ├── .gitreview ├── .travis.yml ├── CHANGELOG.md ├── LICENSE ├── Makefile ├── README.md ├── agent ├── agent.go ├── probes.go └── topology_forwarder.go ├── alert └── server.go ├── analyzer ├── flow_client.go ├── flow_conn.go ├── probes.go ├── server.go ├── topology_forwarder.go └── topology_server.go ├── api ├── alert.go ├── api.go ├── capture.go ├── config.go ├── flow.go ├── flow_test.go ├── handler.go ├── packet_injector.go ├── pcap.go └── topology.go ├── cmd ├── agent.go ├── allinone.go ├── analyzer.go ├── client.go ├── client │ ├── alert.go │ ├── capture.go │ ├── common.go │ ├── gremlin.go │ ├── liner.go │ ├── packet_injector.go │ ├── pcap.go │ ├── shell.go │ ├── terminal_unix.go │ └── topology.go ├── cmd.go └── version.go ├── common └── common.go ├── config └── config.go ├── contrib ├── docker │ ├── Dockerfile │ ├── docker-compose.yml │ ├── skydive.yml │ └── start-skydive.sh ├── kubernetes │ └── skydive.yaml ├── openshift │ ├── README.md │ └── skydive-template.yaml ├── packaging │ └── rpm │ │ ├── generate-skydive-bootstrap.sh │ │ ├── skydive-agent.sysconfig │ │ ├── skydive-analyzer.sysconfig │ │ ├── skydive.spec │ │ └── specfile-update-bundles ├── systemd │ ├── skydive-agent.service │ └── skydive-analyzer.service └── vagrant │ ├── Vagrantfile │ ├── setup-agent.sh │ ├── setup-analyzer.sh │ ├── setup-common.sh │ ├── start-agent.sh │ └── start-analyzer.sh ├── coverage.sh ├── devstack ├── local.conf.cpu ├── local.conf.full ├── override-defaults ├── plugin.sh └── settings ├── doc ├── config.toml ├── content │ ├── api │ │ ├── alerts.md │ │ ├── captures.md │ │ ├── flows.md │ │ ├── gremlin.md │ │ ├── index.md │ │ └── rest.md │ ├── architecture.md │ ├── contact.md │ ├── contributing.md │ ├── deployment.md │ ├── getting-started │ │ ├── client.md │ │ ├── docker.md │ │ ├── index.md │ │ ├── install.md │ │ ├── kubernetes.md │ │ ├── openstack.md │ │ └── vagrant.md │ ├── index.md │ ├── license.md │ └── use-cases │ │ ├── index.md │ │ └── packet-injector.md ├── static │ └── images │ │ ├── architecture.png │ │ ├── devstack-two-nodes.png │ │ ├── kubernetes-two-nodes.png │ │ ├── skydive-logo-16x16.png │ │ ├── skydive-logo-32x32.png │ │ ├── skydive-logo-96x96.png │ │ ├── skydive-logo.png │ │ ├── skydive-logo.svg │ │ └── skydive-screenshot.png └── themes │ └── hugo-material-docs │ ├── CHANGELOG.md │ ├── LICENSE.md │ ├── README.md │ ├── archetypes │ └── default.md │ ├── exampleSite │ ├── config.toml │ ├── content │ │ ├── adding-content │ │ │ └── index.md │ │ ├── getting-started │ │ │ └── index.md │ │ ├── index.md │ │ ├── license │ │ │ └── index.md │ │ └── roadmap │ │ │ └── index.md │ └── static │ │ └── .gitkeep │ ├── images │ ├── screenshot.png │ └── tn.png │ ├── layouts │ ├── 404.html │ ├── _default │ │ ├── list.html │ │ └── single.html │ ├── index.html │ ├── partials │ │ ├── drawer.html │ │ ├── footer.html │ │ ├── footer_js.html │ │ ├── head.html │ │ ├── header.html │ │ ├── nav.html │ │ └── nav_link.html │ └── shortcodes │ │ ├── note.html │ │ └── warning.html │ ├── static │ ├── fonts │ │ ├── icon.eot │ │ ├── icon.svg │ │ ├── icon.ttf │ │ └── icon.woff │ ├── images │ │ ├── colors.png │ │ ├── favicon.ico │ │ ├── logo.png │ │ └── screen.png │ ├── javascripts │ │ ├── application.js │ │ └── modernizr.js │ └── stylesheets │ │ ├── application.css │ │ ├── highlight │ │ └── highlight.css │ │ └── palettes.css │ └── theme.toml ├── etc └── skydive.yml.default ├── etcd ├── client.go ├── election.go └── server.go ├── filters ├── filters.go └── filters.proto ├── flow ├── allocator.go ├── client.go ├── decoder.go ├── filters.go ├── flow.go ├── flow.proto ├── flow_test.go ├── hash.go ├── mappings │ ├── graph.go │ ├── neutron.go │ └── pipeline.go ├── ondemand │ ├── client │ │ └── client.go │ ├── ondemand.go │ └── server │ │ └── server.go ├── packet │ └── packet.go ├── pcap.go ├── pcaptraces │ ├── contrail-udp-mpls-eth-and-ipv4.pcap │ ├── eth-ip4-arp-dns-req-http-google.pcap │ └── ping-with-without-ethernet.pcap ├── probes │ ├── afpacket.go │ ├── afpacket │ │ ├── afpacket.go │ │ ├── header.go │ │ └── options.go │ ├── gopacket.go │ ├── ovssflow.go │ ├── pcapsocket.go │ └── probes.go ├── request.proto ├── server.go ├── set.go ├── set.proto ├── set_test.go ├── storage │ ├── elasticsearch │ │ └── elasticsearch.go │ ├── orientdb │ │ └── orientdb.go │ └── storage.go ├── table.go ├── table_test.go ├── testhelper.go └── traversal │ ├── traversal.go │ └── traversal_test.go ├── http ├── auth.go ├── basic.go ├── client.go ├── keystone.go ├── noauth.go ├── server.go ├── wsclient.go └── wsserver.go ├── logging └── logging.go ├── ovs ├── ovsdb.go └── ovsdb_test.go ├── packet_injector ├── client.go ├── inject_packet.go └── server.go ├── probe └── bundle.go ├── scripts ├── ci │ ├── create-release.sh │ ├── extract-changelog.py │ ├── install-elasticsearch.sh │ ├── install-go.sh │ ├── install-orientdb.sh │ ├── install-requirements.sh │ ├── install-static-requirements.sh │ ├── run-devstack.sh │ ├── run-functional-tests.sh │ ├── run-go-fmt.sh │ └── run-unit-tests.sh ├── multinode.sh ├── scale.sh ├── simple.sh ├── topology.sh └── tunnel.sh ├── sflow └── agent.go ├── skydive.go ├── statics ├── css │ ├── bootstrap-slider.min.css │ ├── bootstrap-switch.min.css │ ├── bootstrap.3.3.5.min.css │ ├── font-awesome.4.7.0.min.css │ ├── images │ │ ├── ui-bg_flat_0_aaaaaa_40x100.png │ │ ├── ui-bg_flat_75_ffffff_40x100.png │ │ ├── ui-bg_glass_55_fbf9ee_1x400.png │ │ ├── ui-bg_glass_65_ffffff_1x400.png │ │ ├── ui-bg_glass_75_dadada_1x400.png │ │ ├── ui-bg_glass_75_e6e6e6_1x400.png │ │ ├── ui-bg_glass_95_fef1ec_1x400.png │ │ ├── ui-bg_highlight-soft_75_cccccc_1x100.png │ │ ├── ui-icons_222222_256x240.png │ │ ├── ui-icons_2e83ff_256x240.png │ │ ├── ui-icons_454545_256x240.png │ │ ├── ui-icons_888888_256x240.png │ │ └── ui-icons_cd0a0a_256x240.png │ ├── jquery-ui.1.10.1.css │ ├── jquery.jsonview.css │ ├── skydive.css │ └── timeslider.css ├── fonts │ ├── fontawesome-webfont.woff2 │ └── glyphicons-halflings-regular.woff2 ├── img │ ├── bridge.png │ ├── collapse.gif │ ├── docker.png │ ├── expand.gif │ ├── host.png │ ├── intf.png │ ├── media-record.png │ ├── minus-outline-16.png │ ├── ns.png │ ├── openstack.png │ ├── pin.png │ ├── plus-16.png │ ├── port.png │ ├── record.png │ ├── record_red.png │ ├── refresh.png │ ├── skydive-logo-16x16.png │ ├── switch.png │ ├── trash.png │ └── veth.png ├── js │ ├── bootstrap-notify.min.js │ ├── bootstrap-slider.min.js │ ├── bootstrap-switch.min.js │ ├── capture.js │ ├── components │ │ ├── autocomplete.js │ │ ├── buttons.js │ │ ├── capture-form.js │ │ ├── capture-list.js │ │ ├── flow-table.js │ │ ├── inject-form.js │ │ ├── node-selector.js │ │ ├── object-detail.js │ │ └── tabs.js │ ├── conversation.js │ ├── d3.3.5.10.js │ ├── dev │ │ └── vue-2.1.9.js │ ├── discovery.js │ ├── jquery-ui.1.10.1.js │ ├── jquery.1.9.1.min.js │ ├── jquery.event.drag-2.3.0.js │ ├── jquery.jsonview.js │ ├── lscache.min.js │ ├── skydive.js │ ├── timeslider.js │ ├── topology.js │ ├── utils.js │ ├── vue-sidebar.js │ ├── vue.min-2.1.9.js │ └── websocket.js ├── login.html └── topology.html ├── storage ├── elasticsearch │ └── client.go └── orientdb │ └── client.go ├── tests ├── alert_test.go ├── api_test.go ├── flow_test.go ├── helper │ └── helper.go ├── neutron_test.go ├── packet_injector_test.go ├── pcaptraces │ └── eth-ip4-arp-dns-req-http-google.pcap ├── storage_test.go ├── tests.go └── topology_test.go ├── topology ├── graph │ ├── cachedbackend.go │ ├── elasticsearch.go │ ├── filters.go │ ├── graph.go │ ├── graph_test.go │ ├── memory.go │ ├── memory_test.go │ ├── message.go │ ├── orientdb.go │ ├── server.go │ └── traversal │ │ ├── traversal.go │ │ ├── traversal_extension.go │ │ ├── traversal_parser.go │ │ ├── traversal_scanner.go │ │ └── traversal_test.go ├── probes │ ├── docker.go │ ├── fabric.go │ ├── netlink.go │ ├── netns.go │ ├── neutron.go │ ├── opencontrail.go │ ├── ovsdb.go │ └── peering.go ├── tid.go ├── topology.go ├── topology_test.go ├── topology_traversal.go └── topology_traversal_test.go ├── validator └── validator.go ├── vendor └── vendor.json └── version └── version.go /.gitattributes: -------------------------------------------------------------------------------- 1 | jquery*.js linguist-vendored=true 2 | bootstrap*.js linguist-vendored=true 3 | d3*.js linguist-vendored=true 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | vendor/github.com/ 2 | vendor/golang.org/ 3 | vendor/google.golang.org/ 4 | vendor/gopkg.in/ 5 | contrib/packaging/rpm/*.tar.gz 6 | flow/*.pb.go 7 | statics/bindata.go 8 | 9 | .idea 10 | -------------------------------------------------------------------------------- /.gitreview: -------------------------------------------------------------------------------- 1 | [gerrit] 2 | host=softwarefactory-project.io 3 | port=29418 4 | project=skydive 5 | defaultbranch=master 6 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | go_import_path: github.com/skydive-project/skydive 3 | 4 | go: 5 | - 1.6 6 | 7 | sudo: required 8 | dist: trusty 9 | 10 | before_install: 11 | - sudo apt-get -qq update 12 | - sudo apt-get install -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y openvswitch-switch unzip docker.io build-essential flex bison libxml2-dev libz-dev liblzma-dev libicu-dev libc++-dev bridge-utils libdb5.1-dev 13 | - sudo ovs-vsctl show 14 | - go get github.com/mattn/goveralls 15 | - go get golang.org/x/tools/cmd/cover 16 | 17 | - git clone https://github.com/the-tcpdump-group/libpcap.git 18 | - cd libpcap 19 | - git checkout libpcap-1.5.3 20 | - ./configure --prefix=/usr/local --disable-shared --disable-dbus --disable-bluetooth --disable-canusb 21 | - make 22 | - sudo make install 23 | - cd .. 24 | 25 | - wget https://www.kernel.org/pub/linux/utils/net/iproute2/iproute2-4.0.0.tar.gz 26 | - tar -xvzf iproute2-4.0.0.tar.gz 27 | - cd iproute2-4.0.0 28 | - make 29 | - sudo make install 30 | - cd .. 31 | 32 | - mkdir ${HOME}/protoc 33 | - pushd ${HOME}/protoc 34 | - wget https://github.com/google/protobuf/releases/download/v3.1.0/protoc-3.1.0-linux-x86_64.zip 35 | - unzip protoc-3.1.0-linux-x86_64.zip 36 | - popd 37 | - export PATH=${HOME}/protoc/bin:${PATH} 38 | 39 | script: 40 | - export BUILD_TAG=$(date +%Y-%m-%d).${TRAVIS_JOB_NUMBER} 41 | 42 | - make install 43 | - make static 44 | - ./coverage.sh --coveralls 45 | 46 | - echo "--- DOCKER IMAGE ---" 47 | - make docker-image DOCKER_IMAGE=${DOCKER_IMAGE} DOCKER_TAG=${BUILD_TAG} 48 | - sudo -E docker login -e "${DOCKER_EMAIL}" -u "${DOCKER_USERNAME}" -p "${DOCKER_PASSWORD}" 49 | - sudo -E docker tag ${DOCKER_IMAGE}:${BUILD_TAG} ${DOCKER_IMAGE}:latest 50 | - sudo -E docker push docker.io/${DOCKER_IMAGE}:${BUILD_TAG} 51 | - sudo -E docker push docker.io/${DOCKER_IMAGE}:latest 52 | -------------------------------------------------------------------------------- /agent/probes.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package agent 24 | 25 | import ( 26 | "github.com/skydive-project/skydive/config" 27 | shttp "github.com/skydive-project/skydive/http" 28 | "github.com/skydive-project/skydive/logging" 29 | "github.com/skydive-project/skydive/probe" 30 | "github.com/skydive-project/skydive/topology/graph" 31 | tprobes "github.com/skydive-project/skydive/topology/probes" 32 | ) 33 | 34 | func NewTopologyProbeBundleFromConfig(g *graph.Graph, n *graph.Node, wspool *shttp.WSAsyncClientPool) (*probe.ProbeBundle, error) { 35 | list := config.GetConfig().GetStringSlice("agent.topology.probes") 36 | logging.GetLogger().Infof("Topology probes: %v", list) 37 | 38 | probes := make(map[string]probe.Probe) 39 | bundle := probe.NewProbeBundle(probes) 40 | 41 | for _, t := range list { 42 | if _, ok := probes[t]; ok { 43 | continue 44 | } 45 | 46 | switch t { 47 | case "netlink": 48 | probes[t] = tprobes.NewNetLinkProbe(g, n) 49 | case "netns": 50 | nsProbe, err := tprobes.NewNetNSProbeFromConfig(g, n) 51 | if err != nil { 52 | return nil, err 53 | } 54 | probes[t] = nsProbe 55 | case "ovsdb": 56 | probes[t] = tprobes.NewOvsdbProbeFromConfig(g, n) 57 | case "docker": 58 | dockerProbe, err := tprobes.NewDockerProbeFromConfig(g, n) 59 | if err != nil { 60 | return nil, err 61 | } 62 | probes[t] = dockerProbe 63 | case "neutron": 64 | neutron, err := tprobes.NewNeutronMapperFromConfig(g, wspool) 65 | if err != nil { 66 | logging.GetLogger().Errorf("Failed to initialize Neutron probe: %s", err.Error()) 67 | return nil, err 68 | } 69 | probes["neutron"] = neutron 70 | case "opencontrail": 71 | probes[t] = tprobes.NewOpenContrailMapper(g, n) 72 | default: 73 | logging.GetLogger().Errorf("unknown probe type %s", t) 74 | } 75 | } 76 | 77 | return bundle, nil 78 | } 79 | -------------------------------------------------------------------------------- /analyzer/probes.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package analyzer 24 | 25 | import ( 26 | "github.com/skydive-project/skydive/probe" 27 | "github.com/skydive-project/skydive/topology/graph" 28 | tprobes "github.com/skydive-project/skydive/topology/probes" 29 | ) 30 | 31 | func NewTopologyProbeBundleFromConfig(g *graph.Graph) (*probe.ProbeBundle, error) { 32 | probes := make(map[string]probe.Probe) 33 | probes["fabric"] = tprobes.NewFabricProbe(g) 34 | probes["peering"] = tprobes.NewPeeringProbe(g) 35 | return probe.NewProbeBundle(probes), nil 36 | } 37 | -------------------------------------------------------------------------------- /api/alert.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package api 24 | 25 | import ( 26 | "time" 27 | 28 | "github.com/nu7hatch/gouuid" 29 | ) 30 | 31 | type Alert struct { 32 | UUID string 33 | Name string `json:",omitempty"` 34 | Description string `json:",omitempty"` 35 | Expression string `json:",omitempty" valid:"nonzero"` 36 | Action string `json:",omitempty" valid:"regexp=^(|http://|https://|file://).*$"` 37 | Trigger string `json:",omitempty" valid:"regexp=^(graph|duration:.+|)$"` 38 | CreateTime time.Time 39 | } 40 | 41 | type AlertResourceHandler struct { 42 | } 43 | 44 | type AlertApiHandler struct { 45 | BasicApiHandler 46 | } 47 | 48 | func NewAlert() *Alert { 49 | id, _ := uuid.NewV4() 50 | 51 | return &Alert{ 52 | UUID: id.String(), 53 | CreateTime: time.Now().UTC(), 54 | } 55 | } 56 | 57 | func (a *AlertResourceHandler) New() ApiResource { 58 | id, _ := uuid.NewV4() 59 | 60 | return &Alert{ 61 | UUID: id.String(), 62 | } 63 | } 64 | 65 | func (a *AlertResourceHandler) Name() string { 66 | return "alert" 67 | } 68 | 69 | func (a *Alert) ID() string { 70 | return a.UUID 71 | } 72 | 73 | func (a *Alert) SetID(i string) { 74 | a.UUID = i 75 | } 76 | -------------------------------------------------------------------------------- /api/config.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "encoding/json" 5 | "net/http" 6 | 7 | auth "github.com/abbot/go-http-auth" 8 | "github.com/gorilla/mux" 9 | 10 | "github.com/skydive-project/skydive/config" 11 | shttp "github.com/skydive-project/skydive/http" 12 | "github.com/spf13/viper" 13 | ) 14 | 15 | type ConfigApi struct { 16 | cfg *viper.Viper 17 | } 18 | 19 | func (c *ConfigApi) configGet(w http.ResponseWriter, r *auth.AuthenticatedRequest) { 20 | w.Header().Set("Content-Type", "application/json; charset=UTF-8") 21 | 22 | vars := mux.Vars(&r.Request) 23 | // lookup into ConfigApi 24 | Value := c.cfg.GetString(vars["key"]) 25 | w.WriteHeader(http.StatusOK) 26 | if err := json.NewEncoder(w).Encode(Value); err != nil { 27 | panic(err) 28 | } 29 | } 30 | 31 | func (c *ConfigApi) registerEndpoints(r *shttp.Server) { 32 | routes := []shttp.Route{ 33 | { 34 | Name: "ConfigGet", 35 | Method: "GET", 36 | Path: "/api/config/{key}", 37 | HandlerFunc: c.configGet, 38 | }, 39 | } 40 | 41 | r.RegisterRoutes(routes) 42 | } 43 | 44 | func RegisterConfigApi(r *shttp.Server) { 45 | c := &ConfigApi{ 46 | cfg: config.GetConfig(), 47 | } 48 | 49 | c.registerEndpoints(r) 50 | } 51 | -------------------------------------------------------------------------------- /api/pcap.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package api 24 | 25 | import ( 26 | "net/http" 27 | 28 | "github.com/abbot/go-http-auth" 29 | "github.com/skydive-project/skydive/flow" 30 | shttp "github.com/skydive-project/skydive/http" 31 | ) 32 | 33 | type PcapApi struct { 34 | packetsChan chan *flow.FlowPackets 35 | } 36 | 37 | func (p *PcapApi) injectPcap(w http.ResponseWriter, r *auth.AuthenticatedRequest) { 38 | writer, err := flow.NewPcapWriter(r.Body, p.packetsChan, false) 39 | if err != nil { 40 | writeError(w, http.StatusBadRequest, err) 41 | return 42 | } 43 | 44 | writer.Start() 45 | writer.Wait() 46 | 47 | w.Header().Set("Content-Type", "application/json; charset=UTF-8") 48 | w.WriteHeader(http.StatusOK) 49 | } 50 | 51 | func (p *PcapApi) registerEndpoints(r *shttp.Server) { 52 | routes := []shttp.Route{ 53 | { 54 | Name: "PCAP", 55 | Method: "POST", 56 | Path: "/api/pcap", 57 | HandlerFunc: p.injectPcap, 58 | }, 59 | } 60 | 61 | r.RegisterRoutes(routes) 62 | } 63 | 64 | func RegisterPcapApi(r *shttp.Server, packetsChan chan *flow.FlowPackets) { 65 | p := &PcapApi{packetsChan: packetsChan} 66 | 67 | p.registerEndpoints(r) 68 | } 69 | -------------------------------------------------------------------------------- /cmd/agent.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2015 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package cmd 24 | 25 | import ( 26 | "os" 27 | "os/signal" 28 | "syscall" 29 | 30 | "github.com/skydive-project/skydive/agent" 31 | "github.com/skydive-project/skydive/config" 32 | "github.com/skydive-project/skydive/logging" 33 | 34 | "github.com/spf13/cobra" 35 | ) 36 | 37 | var Agent = &cobra.Command{ 38 | Use: "agent", 39 | Short: "Skydive agent", 40 | Long: "Skydive agent", 41 | SilenceUsage: true, 42 | Run: func(cmd *cobra.Command, args []string) { 43 | logging.SetLoggingID("agent") 44 | logging.GetLogger().Notice("Skydive Agent starting...") 45 | agent := agent.NewAgent() 46 | agent.Start() 47 | 48 | logging.GetLogger().Notice("Skydive Agent started") 49 | ch := make(chan os.Signal) 50 | signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM) 51 | <-ch 52 | 53 | agent.Stop() 54 | 55 | logging.GetLogger().Notice("Skydive Agent stopped.") 56 | }, 57 | } 58 | 59 | func init() { 60 | host, err := os.Hostname() 61 | if err != nil { 62 | panic(err) 63 | } 64 | 65 | Agent.Flags().String("host-id", host, "ID used to reference the agent, defaults to hostname") 66 | config.GetConfig().BindPFlag("host_id", Agent.Flags().Lookup("host-id")) 67 | 68 | Agent.Flags().String("listen", "127.0.0.1:8081", "address and port for the agent API") 69 | config.GetConfig().BindPFlag("agent.listen", Agent.Flags().Lookup("listen")) 70 | 71 | Agent.Flags().String("ovsdb", "unix:///var/run/openvswitch/db.sock", "ovsdb connection") 72 | config.GetConfig().BindPFlag("ovs.ovsdb", Agent.Flags().Lookup("ovsdb")) 73 | } 74 | -------------------------------------------------------------------------------- /cmd/client.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package cmd 24 | 25 | import ( 26 | "os" 27 | 28 | "github.com/skydive-project/skydive/cmd/client" 29 | "github.com/skydive-project/skydive/config" 30 | "github.com/spf13/cobra" 31 | ) 32 | 33 | var analyzerAddr string 34 | 35 | var Client = &cobra.Command{ 36 | Use: "client", 37 | Short: "Skydive client", 38 | Long: "Skydive client", 39 | SilenceUsage: true, 40 | PersistentPreRun: func(cmd *cobra.Command, args []string) { 41 | cmd.Root().PersistentPreRun(cmd.Root(), args) 42 | if analyzerAddr != "" { 43 | config.GetConfig().Set("analyzers", analyzerAddr) 44 | } else { 45 | config.GetConfig().SetDefault("analyzers", "localhost:8082") 46 | } 47 | }, 48 | } 49 | 50 | func init() { 51 | Client.PersistentFlags().StringVarP(&client.AuthenticationOpts.Username, "username", "", os.Getenv("SKYDIVE_USERNAME"), "username auth parameter") 52 | Client.PersistentFlags().StringVarP(&client.AuthenticationOpts.Password, "password", "", os.Getenv("SKYDIVE_PASSWORD"), "password auth parameter") 53 | Client.PersistentFlags().StringVarP(&analyzerAddr, "analyzer", "", os.Getenv("SKYDIVE_ANALYZER"), "analyzer address") 54 | 55 | Client.AddCommand(client.AlertCmd) 56 | Client.AddCommand(client.CaptureCmd) 57 | Client.AddCommand(client.PacketInjectorCmd) 58 | Client.AddCommand(client.PcapCmd) 59 | Client.AddCommand(client.ShellCmd) 60 | Client.AddCommand(client.TopologyCmd) 61 | } 62 | -------------------------------------------------------------------------------- /cmd/client/common.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package client 24 | 25 | import ( 26 | "encoding/json" 27 | "fmt" 28 | "os" 29 | 30 | shttp "github.com/skydive-project/skydive/http" 31 | "github.com/skydive-project/skydive/logging" 32 | "github.com/spf13/cobra" 33 | ) 34 | 35 | var ( 36 | AuthenticationOpts shttp.AuthenticationOpts 37 | ) 38 | 39 | func printJSON(obj interface{}) { 40 | s, err := json.MarshalIndent(obj, "", " ") 41 | if err != nil { 42 | logging.GetLogger().Errorf(err.Error()) 43 | os.Exit(1) 44 | } 45 | fmt.Println(string(s)) 46 | } 47 | 48 | func setFromFlag(cmd *cobra.Command, flag string, value *string) { 49 | if flag := cmd.LocalFlags().Lookup(flag); flag.Changed { 50 | *value = flag.Value.String() 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /cmd/client/packet_injector.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package client 24 | 25 | import ( 26 | "fmt" 27 | "os" 28 | 29 | "github.com/skydive-project/skydive/api" 30 | "github.com/skydive-project/skydive/logging" 31 | "github.com/skydive-project/skydive/validator" 32 | 33 | "github.com/spf13/cobra" 34 | ) 35 | 36 | var ( 37 | srcNode string 38 | dstNode string 39 | packetType string 40 | payload string 41 | count int 42 | ) 43 | 44 | var PacketInjectorCmd = &cobra.Command{ 45 | Use: "inject-packet", 46 | Short: "inject packets", 47 | Long: "inject packets", 48 | SilenceUsage: false, 49 | Run: func(cmd *cobra.Command, args []string) { 50 | client, err := api.NewCrudClientFromConfig(&AuthenticationOpts) 51 | if err != nil { 52 | logging.GetLogger().Criticalf(err.Error()) 53 | } 54 | 55 | packet := &api.PacketParamsReq{} 56 | packet.Src = srcNode 57 | packet.Dst = dstNode 58 | packet.Type = packetType 59 | packet.Payload = payload 60 | packet.Count = count 61 | if errs := validator.Validate(packet); errs != nil { 62 | fmt.Println("Error: ", errs) 63 | cmd.Usage() 64 | os.Exit(1) 65 | } 66 | if err := client.Create("injectpacket", &packet); err != nil { 67 | logging.GetLogger().Errorf(err.Error()) 68 | os.Exit(1) 69 | } 70 | }, 71 | } 72 | 73 | func addInjectPacketFlags(cmd *cobra.Command) { 74 | cmd.Flags().StringVarP(&srcNode, "src", "", "", "source node gremlin expression") 75 | cmd.Flags().StringVarP(&dstNode, "dst", "", "", "destination node gremlin expression") 76 | cmd.Flags().StringVarP(&packetType, "type", "", "icmp", "packet type: icmp") 77 | cmd.Flags().StringVarP(&payload, "payload", "", "", "payload") 78 | cmd.Flags().IntVarP(&count, "count", "", 1, "number of packets to be generated") 79 | } 80 | 81 | func init() { 82 | addInjectPacketFlags(PacketInjectorCmd) 83 | } 84 | -------------------------------------------------------------------------------- /cmd/client/pcap.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package client 24 | 25 | import ( 26 | "fmt" 27 | "io/ioutil" 28 | "net/http" 29 | "os" 30 | 31 | "github.com/skydive-project/skydive/api" 32 | "github.com/skydive-project/skydive/logging" 33 | 34 | "github.com/spf13/cobra" 35 | ) 36 | 37 | var ( 38 | pcapTrace string 39 | ) 40 | 41 | var PcapCmd = &cobra.Command{ 42 | Use: "pcap", 43 | Short: "Import flows from PCAP file", 44 | Long: "Import flows from PCAP file", 45 | PreRun: func(cmd *cobra.Command, args []string) { 46 | if pcapTrace == "" { 47 | logging.GetLogger().Error("You need to specify a PCAP file") 48 | cmd.Usage() 49 | os.Exit(1) 50 | } 51 | }, 52 | Run: func(cmd *cobra.Command, args []string) { 53 | client, err := api.NewCrudClientFromConfig(&AuthenticationOpts) 54 | if err != nil { 55 | logging.GetLogger().Fatal(err) 56 | } 57 | 58 | file, err := os.Open(pcapTrace) 59 | if err != nil { 60 | logging.GetLogger().Fatal(err) 61 | } 62 | defer file.Close() 63 | 64 | resp, err := client.Request("POST", "api/pcap", file) 65 | if err != nil { 66 | logging.GetLogger().Fatal(err) 67 | } 68 | 69 | if resp.StatusCode == http.StatusOK { 70 | fmt.Printf("%s was successfully imported\n", pcapTrace) 71 | } else { 72 | content, _ := ioutil.ReadAll(resp.Body) 73 | logging.GetLogger().Errorf("Failed to import %s: %s", pcapTrace, string(content)) 74 | } 75 | }, 76 | } 77 | 78 | func init() { 79 | PcapCmd.Flags().StringVarP(&pcapTrace, "trace", "t", "", "PCAP trace file to read") 80 | } 81 | -------------------------------------------------------------------------------- /cmd/client/terminal_unix.go: -------------------------------------------------------------------------------- 1 | // +build !windows 2 | 3 | /* 4 | * Copyright (C) 2016 Red Hat, Inc. 5 | * 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, 17 | * software distributed under the License is distributed on an 18 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 19 | * KIND, either express or implied. See the License for the 20 | * specific language governing permissions and limitations 21 | * under the License. 22 | * 23 | */ 24 | 25 | package client 26 | 27 | import ( 28 | "fmt" 29 | ) 30 | 31 | func cursorUp() { 32 | fmt.Print("\x1b[1A") 33 | } 34 | 35 | func eraseInLine() { 36 | fmt.Print("\x1b[0K") 37 | } 38 | -------------------------------------------------------------------------------- /cmd/client/topology.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package client 24 | 25 | import ( 26 | "os" 27 | 28 | "github.com/spf13/cobra" 29 | 30 | "github.com/skydive-project/skydive/logging" 31 | ) 32 | 33 | var gremlinQuery string 34 | 35 | var TopologyCmd = &cobra.Command{ 36 | Use: "topology", 37 | Short: "Request on topology", 38 | Long: "Request on topology", 39 | SilenceUsage: false, 40 | } 41 | 42 | var TopologyRequest = &cobra.Command{ 43 | Use: "query", 44 | Short: "query topology", 45 | Long: "query topology", 46 | Run: func(cmd *cobra.Command, args []string) { 47 | var value interface{} 48 | 49 | queryHelper := NewGremlinQueryHelper(&AuthenticationOpts) 50 | if err := queryHelper.Query(gremlinQuery, &value); err != nil { 51 | logging.GetLogger().Errorf(err.Error()) 52 | os.Exit(1) 53 | } 54 | 55 | printJSON(value) 56 | }, 57 | } 58 | 59 | func addTopologyFlags(cmd *cobra.Command) { 60 | cmd.Flags().StringVarP(&gremlinQuery, "gremlin", "", "", "Gremlin Query") 61 | } 62 | 63 | func init() { 64 | TopologyCmd.AddCommand(TopologyRequest) 65 | TopologyRequest.Flags().StringVarP(&gremlinQuery, "gremlin", "", "", "Gremlin Query") 66 | } 67 | -------------------------------------------------------------------------------- /cmd/cmd.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package cmd 24 | 25 | import ( 26 | "fmt" 27 | "os" 28 | 29 | "github.com/skydive-project/skydive/config" 30 | "github.com/spf13/cobra" 31 | ) 32 | 33 | var ( 34 | CfgPath string 35 | CfgBackend string 36 | ) 37 | 38 | func LoadConfiguration() { 39 | if CfgPath != "" { 40 | if err := config.InitConfig(CfgBackend, CfgPath); err != nil { 41 | fmt.Fprintf(os.Stderr, "%v\n", err) 42 | os.Exit(1) 43 | } 44 | } 45 | } 46 | 47 | var RootCmd = &cobra.Command{ 48 | Use: "skydive [sub]", 49 | Short: "Skydive", 50 | SilenceUsage: true, 51 | PersistentPreRun: func(cmd *cobra.Command, args []string) { 52 | LoadConfiguration() 53 | }, 54 | } 55 | 56 | func init() { 57 | RootCmd.PersistentFlags().StringVarP(&CfgPath, "conf", "c", "", "location of Skydive agent config file") 58 | RootCmd.PersistentFlags().StringVarP(&CfgBackend, "config-backend", "b", "file", "configuration backend (defaults to file)") 59 | 60 | RootCmd.AddCommand(VersionCmd) 61 | RootCmd.AddCommand(Agent) 62 | RootCmd.AddCommand(Analyzer) 63 | RootCmd.AddCommand(Client) 64 | RootCmd.AddCommand(AllInOne) 65 | } 66 | -------------------------------------------------------------------------------- /cmd/version.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package cmd 24 | 25 | import ( 26 | "github.com/skydive-project/skydive/version" 27 | "github.com/spf13/cobra" 28 | ) 29 | 30 | var VersionCmd = &cobra.Command{ 31 | Use: "version", 32 | Short: "Print the version number of Skydive", 33 | Run: func(cmd *cobra.Command, args []string) { 34 | version.PrintVersion() 35 | }, 36 | } 37 | -------------------------------------------------------------------------------- /contrib/docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM scratch 2 | COPY skydive /usr/bin/skydive 3 | COPY skydive.yml /etc/skydive.yml 4 | ENTRYPOINT ["/usr/bin/skydive", "--conf", "/etc/skydive.yml"] 5 | -------------------------------------------------------------------------------- /contrib/docker/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | services: 3 | elasticsearch: 4 | image: elasticsearch:2 5 | command: elasticsearch -Des.network.bind_host=0.0.0.0 6 | ports: 7 | - "9200:9200" 8 | - "9300:9300" 9 | 10 | skydive-analyzer: 11 | image: skydive/skydive 12 | ports: 13 | - "2379:2379" 14 | - "8082:8082" 15 | - "8082:8082/udp" 16 | links: 17 | - elasticsearch 18 | command: analyzer --listen=0.0.0.0:8082 19 | environment: 20 | - SKYDIVE_STORAGE_ELASTICSEARCH_HOST=elasticsearch:9200 21 | - SKYDIVE_ANALYZER_STORAGE=elasticsearch 22 | - SKYDIVE_GRAPH_BACKEND=elasticsearch 23 | 24 | skydive-agent: 25 | image: skydive/skydive 26 | depends_on: 27 | - skydive-analyzer 28 | ports: 29 | - "8081:8081" 30 | network_mode: "host" 31 | pid: "host" 32 | command: agent --listen=0.0.0.0:8081 33 | privileged: true 34 | volumes: 35 | - /var/run/docker.sock:/var/run/docker.sock 36 | - /var/run/netns:/host/run:shared 37 | #- /var/run/openvswitch/db.sock:/var/run/openvswitch/db.sock 38 | environment: 39 | - SKYDIVE_NETNS_RUN_PATH=/host/run 40 | - SKYDIVE_ANALYZERS=127.0.0.1:8082 41 | -------------------------------------------------------------------------------- /contrib/docker/skydive.yml: -------------------------------------------------------------------------------- 1 | agent: 2 | topology: 3 | probes: 4 | - netlink 5 | - netns 6 | - ovsdb 7 | - docker 8 | flow: 9 | probes: 10 | - ovssflow 11 | - gopacket 12 | 13 | analyzer: 14 | listen: 0.0.0.0:8082 15 | -------------------------------------------------------------------------------- /contrib/docker/start-skydive.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | ELASTICSEARCH_PORT="${ELASTICSEARCH_PORT:-127.0.0.1:9200}" 4 | SKYDIVE_ANALYZER_PORT="${ANALYZER_PORT:-127.0.0.1:8082}" 5 | SKYDIVE_ANALYZER_PORT_2379_ADDR="${ANALYZER_PORT_2379_ADDR:-127.0.0.1:2379}" 6 | OVSDB="${OVSDB:-unix:///var/run/openvswitch/db.sock}" 7 | SKYDIVE_NETNS_RUN_PATH="${SKYDIVE_NETNS_RUN_PATH:-/host/run}" 8 | SKYDIVE_LOG_LEVEL="${SKYDIVE_LOG_LEVEL:-INFO}" 9 | FLOWTABLE_UPDATE="${FLOWTABLE_UPDATE:-10}" 10 | FLOWTABLE_EXPIRE="${FLOWTABLE_EXPIRE:-300}" 11 | 12 | if [ ! -e /etc/skydive.yml ] 13 | then 14 | cat > /etc/skydive.yml <&2 26 | usage 27 | exit 1 28 | ;; 29 | esac 30 | done 31 | 32 | define="" 33 | version=$(git rev-parse --verify $from) 34 | if [ $? -ne 0 ]; then 35 | echo "commit revision $from didn't exist" 36 | exit 1 37 | fi 38 | tagname=$(grep $version <(git show-ref --tags)) 39 | if [ -n "$tagname" ]; then 40 | version=$(echo $tagname | awk -F '/' '{print $NF}' | tr -d [a-z]) 41 | define="tagversion $version" 42 | else 43 | define="commit $version" 44 | fi 45 | 46 | set -e 47 | 48 | gitdir=$(cd "$(dirname "$0")/../../.."; pwd) 49 | rpmbuilddir=$gitdir/rpmbuild 50 | 51 | mkdir -p $rpmbuilddir/SOURCES 52 | mkdir -p $rpmbuilddir/SPECS 53 | make -C $gitdir dist DESTDIR=$rpmbuilddir/SOURCES 54 | $(dirname "$0")/specfile-update-bundles $gitdir/vendor/vendor.json $gitdir/contrib/packaging/rpm/skydive.spec > $rpmbuilddir/SPECS/skydive.spec 55 | rpmbuild --nodeps $build_opts --undefine dist --define "$define" --define "_topdir $rpmbuilddir" $rpmbuilddir/SPECS/skydive.spec 56 | -------------------------------------------------------------------------------- /contrib/packaging/rpm/skydive-agent.sysconfig: -------------------------------------------------------------------------------- 1 | SKYDIVE_AGENT_OPTIONS= 2 | -------------------------------------------------------------------------------- /contrib/packaging/rpm/skydive-analyzer.sysconfig: -------------------------------------------------------------------------------- 1 | SKYDIVE_ANALYZER_OPTIONS= 2 | -------------------------------------------------------------------------------- /contrib/packaging/rpm/specfile-update-bundles: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import json 4 | import sys 5 | 6 | BUNDLE_MARKER = "### AUTO-BUNDLED-GEN-ENTRY-POINT" 7 | 8 | def usage(): 9 | print sys.argv[0], " " 10 | 11 | if len(sys.argv) != 3: 12 | usage() 13 | sys.exit(0) 14 | 15 | provides = [] 16 | vendor = json.load(open(sys.argv[1])) 17 | for package in vendor["package"]: 18 | provides.append("Provides: bundled(golang(%s)) = %s" % (package["path"], package["revision"])) 19 | 20 | spec = open(sys.argv[2]).read() 21 | spec = spec.replace(BUNDLE_MARKER, "\n".join(provides)) 22 | print spec 23 | -------------------------------------------------------------------------------- /contrib/systemd/skydive-agent.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Skydive agent 3 | After=network-online.target openvswitch.service docker.service 4 | 5 | [Service] 6 | Type=simple 7 | EnvironmentFile=-/etc/sysconfig/skydive-agent 8 | ExecStart=/usr/bin/skydive agent $SKYDIVE_AGENT_OPTIONS --conf /etc/skydive/skydive.yml 9 | Restart=on-failure 10 | 11 | [Install] 12 | WantedBy=multi-user.target 13 | -------------------------------------------------------------------------------- /contrib/systemd/skydive-analyzer.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Skydive analyzer 3 | After=network-online.target elasticsearch.service 4 | 5 | [Service] 6 | Type=simple 7 | EnvironmentFile=-/etc/sysconfig/skydive-analyzer 8 | ExecStart=/usr/bin/skydive analyzer $SKYDIVE_ANALYZER_OPTIONS --conf /etc/skydive/skydive.yml 9 | Restart=on-failure 10 | 11 | [Install] 12 | WantedBy=multi-user.target 13 | -------------------------------------------------------------------------------- /contrib/vagrant/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | Vagrant.configure(2) do |config| 5 | config.vm.define "analyzer" do |analyzer| 6 | analyzer.vm.box = "fedora/25-cloud-base" 7 | analyzer.vm.hostname = "analyzer" 8 | analyzer.vm.network "private_network", ip: "192.168.50.10" 9 | analyzer.vm.synced_folder ".", "/vagrant", :disabled => true 10 | analyzer.vm.provision "common", type: "shell", path: "setup-common.sh" 11 | analyzer.vm.provision "setup", type: "shell", path: "setup-analyzer.sh" 12 | analyzer.vm.provision "start", type: "shell", inline: "sudo systemctl start skydive-analyzer.service" 13 | analyzer.vm.provider :libvirt do |domain| 14 | domain.memory = 1024 15 | end 16 | end 17 | 18 | config.vm.define "agent1" do |agent| 19 | agent.vm.box = "fedora/25-cloud-base" 20 | agent.vm.hostname = "agent1" 21 | agent.vm.network "private_network", ip: "192.168.50.20" 22 | agent.vm.synced_folder ".", "/vagrant", :disabled => true 23 | agent.vm.provision "common", type: "shell", path: "setup-common.sh" 24 | agent.vm.provision "setup", type: "shell", path: "setup-agent.sh" 25 | agent.vm.provision "start", type: "shell", inline: "sudo systemctl start skydive-agent.service" 26 | agent.vm.provider :libvirt do |domain| 27 | domain.memory = 1536 28 | end 29 | end 30 | 31 | config.vm.define "agent2" do |agent| 32 | agent.vm.box = "fedora/25-cloud-base" 33 | agent.vm.hostname = "agent2" 34 | agent.vm.network "private_network", ip: "192.168.50.30" 35 | agent.vm.synced_folder ".", "/vagrant", :disabled => true 36 | agent.vm.provision "common", type: "shell", path: "setup-common.sh" 37 | agent.vm.provision "setup", type: "shell", path: "setup-agent.sh" 38 | agent.vm.provision "start", type: "shell", inline: "sudo systemctl start skydive-agent.service" 39 | agent.vm.provider :libvirt do |domain| 40 | domain.memory = 1536 41 | end 42 | end 43 | end 44 | -------------------------------------------------------------------------------- /contrib/vagrant/setup-agent.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | sudo dnf -y install openvswitch docker 4 | sudo systemctl enable openvswitch.service 5 | sudo systemctl enable docker.service 6 | sudo systemctl start openvswitch.service 7 | sudo systemctl start docker.service 8 | 9 | sudo mkdir -p /etc/skydive 10 | sudo tee /etc/skydive/skydive.yml << EOF 11 | analyzers: 12 | - 192.168.50.10:8082 13 | agent: 14 | flow: 15 | probes: 16 | - ovssflow 17 | - gopacket 18 | topology: 19 | probes: 20 | - netlink 21 | - netns 22 | - ovsdb 23 | etcd: 24 | client_timeout: 100 25 | EOF 26 | sudo curl -o /usr/lib/systemd/system/skydive-agent.service https://raw.githubusercontent.com/skydive-project/skydive/master/contrib/systemd/skydive-agent.service 27 | sudo systemctl daemon-reload 28 | sudo systemctl enable skydive-agent.service 29 | -------------------------------------------------------------------------------- /contrib/vagrant/setup-analyzer.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | sudo rpm --import https://packages.elastic.co/GPG-KEY-elasticsearch 4 | cat > /etc/yum.repos.d/elasticsearch.repo <> /etc/elasticsearch/elasticsearch.yml" 14 | sudo systemctl enable elasticsearch.service 15 | sudo systemctl start elasticsearch.service 16 | sudo mkdir -p /etc/skydive 17 | sudo tee /etc/skydive/skydive.yml << EOF 18 | analyzer: 19 | listen: 0.0.0.0:8082 20 | flowtable_expire: 60 21 | flowtable_update: 5 22 | flowtable_agent_ratio: 0.5 23 | storage: elasticsearch 24 | topology: 25 | fabric: 26 | - TOR1[Name=tor1] -> TOR1_PORT1[Name=port1, MTU=1500] 27 | - TOR1[Name=tor1] -> TOR1_PORT2[Name=port2, MTU=1500] 28 | - TOR1_PORT1 -> *[Type=host,Name=agent1]/eth1 29 | - TOR1_PORT2 -> *[Type=host,Name=agent2]/eth1 30 | etcd: 31 | client_timeout: 100 32 | graph: 33 | backend: elasticsearch 34 | elasticsearch: 35 | addr: 127.0.0.1:9200 36 | EOF 37 | sudo curl -o /usr/lib/systemd/system/skydive-analyzer.service https://raw.githubusercontent.com/skydive-project/skydive/master/contrib/systemd/skydive-analyzer.service 38 | sudo systemctl daemon-reload 39 | sudo systemctl enable skydive-analyzer.service 40 | -------------------------------------------------------------------------------- /contrib/vagrant/setup-common.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sudo dnf -y install nfs-utils nfs-utils-lib jq 4 | sudo setenforce 0 5 | sudo sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/sysconfig/selinux 6 | sudo curl -o /usr/bin/skydive -L `curl -s https://api.github.com/repos/skydive-project/skydive/releases/latest | jq --raw-output '.assets[0] | .browser_download_url'` 7 | sudo chmod +x /usr/bin/skydive 8 | -------------------------------------------------------------------------------- /contrib/vagrant/start-agent.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sudo systemctl start skydive-agent.service 4 | -------------------------------------------------------------------------------- /contrib/vagrant/start-analyzer.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sudo systemctl start skydive-analyzer 4 | -------------------------------------------------------------------------------- /coverage.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Generate test coverage statistics for Go packages. 3 | # 4 | # Works around the fact that `go test -coverprofile` currently does not work 5 | # with multiple packages, see https://code.google.com/p/go/issues/detail?id=6909 6 | # 7 | # Usage: script/coverage [--html|--coveralls] 8 | # 9 | # --html Additionally create HTML report and open it in browser 10 | # --coveralls Push coverage statistics to coveralls.io 11 | # 12 | 13 | set -e 14 | 15 | workdir=".cover" 16 | profile="$workdir/cover.out" 17 | mode=count 18 | 19 | generate_cover_data() { 20 | rm -rf "$workdir" 21 | mkdir "$workdir" 22 | 23 | # unit test 24 | PKG=$(go list ./... | grep -v -e '/tests' -e '/vendor') 25 | for pkg in ${PKG}; do 26 | coverfile="$workdir/$(echo $pkg | tr / -).cover" 27 | govendor test -tags "${TAGS} test" -timeout 6m -covermode="$mode" -coverprofile="$coverfile" "$pkg" 28 | done 29 | 30 | # add fonctional testing 31 | PKG=$(go list ./... | grep -v -e '/tests' -e '/vendor' | tr '\n' ',' | sed -e 's/,$//') 32 | coverfile="$workdir/functional.cover" 33 | govendor test -tags "${TAGS} test" -v -cover -covermode="$mode" -coverprofile="$f" -coverpkg=${PKG} -timeout 2m -c -o tests/functionals ./tests/ 34 | FUNC_TESTS=$( grep -e 'func Test' tests/*.go | perl -pe 's|.*func (.*?)\(.*|\1|g' | shuf ) 35 | for functest in ${FUNC_TESTS} ; do 36 | coverfile="../$workdir/$functest.cover" 37 | cd tests && sudo -E ./functionals -test.v -test.timeout 2m -test.coverprofile="$coverfile" -test.run $functest$ && cd .. 38 | done 39 | 40 | # merge all together 41 | echo "mode: $mode" >"$profile" 42 | grep -h -v "^mode:" "$workdir"/*.cover | grep -v "skydive/statics" | awk '{ stmt[$1] += $2; count[$1] += $3 } END{ for(e in stmt) { print e, stmt[e], count[e] } }' >> "$profile" 43 | } 44 | 45 | show_cover_report() { 46 | go tool cover -${1}="$profile" 47 | } 48 | 49 | push_to_coveralls() { 50 | echo "Pushing coverage statistics to coveralls.io" 51 | goveralls -coverprofile="$profile" 52 | } 53 | 54 | generate_cover_data 55 | show_cover_report func 56 | case "$1" in 57 | "") 58 | ;; 59 | --html) 60 | show_cover_report html ;; 61 | --coveralls) 62 | push_to_coveralls ;; 63 | *) 64 | echo >&2 "error: invalid option: $1"; exit 1 ;; 65 | esac 66 | -------------------------------------------------------------------------------- /devstack/local.conf.cpu: -------------------------------------------------------------------------------- 1 | [[local|localrc]] 2 | HOST_IP=192.168.0.57 3 | 4 | SERVICE_HOST=192.168.0.56 5 | 6 | MYSQL_HOST=${SERVICE_HOST} 7 | RABBIT_HOST=${SERVICE_HOST} 8 | GLANCE_HOSTPORT=${SERVICE_HOST}:9292 9 | ADMIN_PASSWORD=password 10 | MYSQL_PASSWORD=password 11 | RABBIT_PASSWORD=password 12 | SERVICE_PASSWORD=password 13 | 14 | ENABLED_SERVICES=n-cpu,rabbit,q-agt 15 | 16 | enable_plugin skydive https://github.com/skydive-project/skydive.git 17 | enable_service skydive-agent 18 | 19 | SKYDIVE_ANALYZERS=${SERVICE_HOST}:8082 20 | SKYDIVE_AGENT_LISTEN=${HOST_IP}:8081 21 | SKYDIVE_AGENT_ETCD=http://${SERVICE_HOST}:2379 22 | -------------------------------------------------------------------------------- /devstack/local.conf.full: -------------------------------------------------------------------------------- 1 | [[local|localrc]] 2 | HOST_IP=192.168.0.56 3 | 4 | SERVICE_HOST=${HOST_IP} 5 | 6 | MYSQL_HOST=${HOST_IP} 7 | RABBIT_HOST=${HOST_IP} 8 | GLANCE_HOSTPORT=${HOST_IP}:9292 9 | ADMIN_PASSWORD=password 10 | DATABASE_PASSWORD=password 11 | RABBIT_PASSWORD=password 12 | SERVICE_PASSWORD=password 13 | 14 | disable_service n-net 15 | ENABLED_SERVICES+=,q-svc,q-dhcp,q-meta,q-agt,q-l3 16 | 17 | enable_plugin skydive https://github.com/skydive-project/skydive.git 18 | enable_service skydive-agent skydive-analyzer 19 | 20 | SKYDIVE_ANALYZER_LISTEN=${HOST_IP}:8082 21 | SKYDIVE_ANALYZERS=${HOST_IP}:8082 22 | SKYDIVE_AGENT_LISTEN=${HOST_IP}:8081 23 | SKYDIVE_AGENT_ETCD=http://${HOST_IP}:2379 24 | 25 | # When using multiple nodes in your Devstack, you need to 26 | # specify the nodes and their public interface 27 | # SKYDIVE_PUBLIC_INTERFACES=devstack1/eth0 devstack2/eth1 28 | -------------------------------------------------------------------------------- /devstack/override-defaults: -------------------------------------------------------------------------------- 1 | ELASTICSEARCH_VERSION=2.0 2 | 3 | -------------------------------------------------------------------------------- /devstack/settings: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/devstack/settings -------------------------------------------------------------------------------- /doc/content/api/alerts.md: -------------------------------------------------------------------------------- 1 | --- 2 | date: 2016-09-29T11:02:01+02:00 3 | title: Alerts 4 | --- 5 | 6 | Skydive allows you to create alerts, based on queries on both topology graph 7 | and flows. 8 | 9 | ## Alert evaluation 10 | An alert can be specified through a [Gremlin](/api/gremlin) query or a 11 | JavaScript expression. The alert will be triggered if it returns: 12 | 13 | * true 14 | * a non empty string 15 | * a number different from zero 16 | * a non empty array 17 | * a non empty map 18 | 19 | Gremlin example: 20 | 21 | ```console 22 | $ skydive client alert create --expression "G.V().Has('Name', 'eth0', 'State', 'DOWN')" 23 | { 24 | "UUID": "185c49ba-341d-41a0-6f96-f3224140b2fa", 25 | "Expression": "G.V().Has('Name', 'eth0', 'State', 'DOWN')", 26 | "CreateTime": "2016-12-29T13:29:05.273620179+01:00" 27 | } 28 | ``` 29 | 30 | JavaScript example: 31 | 32 | ```console 33 | $ skydive client alert create --expression "Gremlin(\"G.Flows().Has('Network.A', '192.168.0.1').Metrics().Sum()\").ABBytes > 1*1024*1024" --trigger "duration:10s" 34 | { 35 | "UUID": "331b5590-c45d-4723-55f5-0087eef899eb", 36 | "Expression": "Gremlin(\"G.Flows().Has('Network.A', '192.168.0.1').Metrics().Sum()\").ABBytes > 1*1024*1024", 37 | "Trigger": "duration:10s", 38 | "CreateTime": "2016-12-29T13:29:05.197612381+01:00" 39 | } 40 | ``` 41 | 42 | ## Fields 43 | * `Name`, the alert name (optional) 44 | * `Description`, a description for the alert (optional) 45 | * `Expression`, a Gremlin query or JavaScript expression 46 | * `Action`, URL to trigger. Can be a [local file](/api/alerts#webhook) or a [WebHook](/api/alerts#script) 47 | * `Trigger`, event that triggers the alert evaluation. Periodic alerts can be 48 | specified with `duration:5s`, for an alert that will be evaluated every 5 seconds. 49 | 50 | ## Notifications 51 | 52 | When an alert is triggered, all the WebSocket clients will be notified with a 53 | message of type `Alert` with a JSON object with the attributes: 54 | 55 | * `UUID`, ID of the triggered alert 56 | * `Timestamp`, timestamp of trigger 57 | * `ReasonData`, the result of the alert evaluation. If `expression` is a 58 | Gremlin query, it will be the result of the query. If `expression` is a 59 | JavaScript statement, it will be the result of the evaluation of this 60 | statement. 61 | 62 | In addition to the WebSocket message, an alert can trigger different kind of 63 | actions. 64 | 65 | ### Webhook 66 | A POST request is issued with the JSON message as payload. 67 | 68 | ### Script 69 | A local file (prefixed by file://) to execute a script. It receives the JSON 70 | message through stdin 71 | -------------------------------------------------------------------------------- /doc/content/api/captures.md: -------------------------------------------------------------------------------- 1 | --- 2 | date: 2017-01-05T11:44:01+02:00 3 | title: Captures 4 | --- 5 | 6 | 7 | Flow captures can be started from the WebUI or thanks to the [Skydive client](/getting-started/client). 8 | Skydive leverages the [Gremlin language](/api/gremlin/) in order to select nodes on which a 9 | capture will be started. The Gremlin expression is continuously evaluated which 10 | means that it is possible to define a capture on nodes that do not exist yet. 11 | It useful when you want to start a capture on all OpenvSwitch whatever the 12 | number of Skydive agents you will start. 13 | 14 | While starting the capture, you can specify the capture name, 15 | capture description and capture type optionally. 16 | 17 | At this time, the following capture types are supported: 18 | 19 | * `ovssflow`, for interfaces managed by OpenvSwitch such as OVS bridges 20 | * `afpacket`, for interfaces suchs as Linux bridges, veth, devices, ... 21 | * `pcap`, same as `afpacket` 22 | * `pcapsocket`. This capture type allows you to inject traffic from a PCAP file. 23 | See [below](/api/captures#pcap-files) for more information. 24 | 25 | Node types that support captures are : 26 | 27 | * ovsbridge 28 | * veth 29 | * device 30 | * internal 31 | * tun 32 | * bridge 33 | 34 | ### PCAP files 35 | 36 | If the flow probe `pcapsocket` is enabled, you can create captures with the 37 | type `pcapsocket`. Skydive will create a TCP socket where you can copy PCAP 38 | files (using `nc` for instance). Traffic injected into this socket will have 39 | its capture point set to the selected node. The TCP socket address can be 40 | retrieved using the `PCAPSocket` attribute of the node or using the 41 | `PCAPSocket` attribute of the capture. 42 | -------------------------------------------------------------------------------- /doc/content/api/flows.md: -------------------------------------------------------------------------------- 1 | --- 2 | date: 2016-09-29T11:02:01+02:00 3 | title: Flows 4 | --- 5 | 6 | The Flow Schema is described in a 7 | [protobuf file](https://github.com/skydive-project/skydive/blob/master/flow/flow.proto). 8 | 9 | A typical Gremlin request on Flows will return a JSON version of the Flow 10 | structure. 11 | 12 | ```console 13 | $ skydive client topology query --gremlin "G.Flows().Limit(1)" 14 | [ 15 | { 16 | "ANodeTID": "422190f1-bbde-4eb0-4849-1fd1209229fe", 17 | "BNodeTID": "f3f1256b-7097-487c-7a02-38a32e009b3c", 18 | "LastUpdateMetric": { 19 | "ABBytes": 490, 20 | "ABPackets": 5, 21 | "BABytes": 490, 22 | "BAPackets": 5, 23 | "Last": 1477563666, 24 | "Start": 1477563661 25 | }, 26 | "LayersPath": "Ethernet/IPv4/ICMPv4/Payload", 27 | "Link": { 28 | "A": "02:48:4f:c4:40:99", 29 | "B": "e2:d0:f0:61:e7:81", 30 | "Protocol": "ETHERNET" 31 | }, 32 | "Metric": { 33 | "ABBytes": 21658, 34 | "ABPackets": 221, 35 | "BABytes": 21658, 36 | "BAPackets": 221, 37 | "Last": 1477563666, 38 | "Start": 1477563444 39 | }, 40 | "Network": { 41 | "A": "192.168.0.1", 42 | "B": "192.168.0.2", 43 | "Protocol": "IPV4" 44 | }, 45 | "NodeTID": "f3f1256b-7097-487c-7a02-38a32e009b3c", 46 | "TrackingID": "f745fb1f59298a1773e35827adfa42dab4f469f9", 47 | "UUID": "caa24da240cb3b40c84ebb708e2e5dcbe3c54784" 48 | } 49 | ] 50 | ``` 51 | 52 | Below the description of the fields : 53 | 54 | * `UUID`, Unique ID of the flow. The ID is unique per capture point, meaning 55 | that a same flow will get a different ID for a different capture. 56 | * `TrackingID`, ID of the Flow which is the same across all the 57 | captures point. This ID can be used to follow a Flow on each capture points. 58 | * `NodeTID`, TID metadata of the interface node in the topology where the flow was 59 | captured. 60 | * `ANodeTID`, TID metadata of the interface node in the topology where the packet is 61 | coming from. 62 | * `BNodeTID`, TID metadata of the interface node in the topology where the packet is 63 | going to. 64 | * `LayersPath`, All the layers composing the packets. 65 | * `Link`, Link layer of the flow. A, B and Protocol describing the endpoints and 66 | the protocol of this layer. 67 | * `Network`, Network layer of the flow. A, B and Protocol describing the 68 | endpoints and the protocol of this layer. 69 | * `Transport`, Transport layer of the flow. A, B and Protocol describing the 70 | endpoints and the protocol of this layer. 71 | * `Metric`, Current metrics of the flow. `AB*` stands for metrics from 72 | endpoint `A` to endpoint `B`, and `BA*` for the reverse path. 73 | -------------------------------------------------------------------------------- /doc/content/api/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | date: 2016-05-04T17:48:22+02:00 3 | title: API 4 | --- 5 | -------------------------------------------------------------------------------- /doc/content/architecture.md: -------------------------------------------------------------------------------- 1 | --- 2 | date: 2016-09-29T11:02:01+02:00 3 | title: Architecture 4 | --- 5 | 6 | ![Skydive Architecture](/images/architecture.png) 7 | 8 | ## Graph 9 | 10 | Skydive relies on a event based graph engine, which means that notifications 11 | are sent for each modification. Graphs expose notifications over WebSocket 12 | connections. Skydive support multiple graph backends for the Graph. The `memory` 13 | backend will be always used by agents while the backend for analyzers can be 14 | choosen. Each modification is kept in the datastore so that we have a full 15 | history of the graph. This is really useful to troubleshoot even if 16 | interfaces do not exist anymore. 17 | 18 | ## Forwarder 19 | 20 | Forwards graph messages from agents to analyzers so that analyzers can build 21 | an aggregation of all agent graphs. 22 | 23 | ## Topology probes 24 | 25 | Fill the graph with topology informations collected. Multiple probes fill the 26 | graph in parallel. As an example there are probes filling graph with 27 | network namespaces, netlink or OVSDB information. 28 | 29 | ## Flow table 30 | 31 | Skydive keep a track of packets captured in flow tables. It allows Skydive to 32 | keep metrics for each flows. At a given frequency or when the flow expires 33 | (see the config file) flows are forwarded from agents to analyzers and then 34 | to the datastore. 35 | 36 | ## Flow enhancer 37 | 38 | Each time a new flow is received by the analyzer the flow is enhanced with 39 | topology informations like where it has been captured, where it originates from, 40 | where the packet is going to. 41 | 42 | ## Flow probes 43 | 44 | Flow probes capture packets and fill agent flow tables. There are different 45 | ways to capture packets like sFlow, afpacket, PCAP, etc. 46 | 47 | ## Gremlin engine 48 | 49 | Skydive uses Gremlin language as its graph traversal language. The Skydive 50 | Gremlin implementation allows to use Gremlin for flow traversal purpose. 51 | The Gremlin engine can either retrieve informations from the datastore or from 52 | agents depending whether the request is about something is the past or for live 53 | monitoring/troubleshooting. 54 | 55 | ## Etcd 56 | 57 | Skydive uses Etcd to store API objects like captures. Agents are watching Etcd 58 | so that they can react on API calls. 59 | 60 | ## On-demand probe 61 | 62 | This component watches Etcd and the graph in order to start captures. So when a 63 | new capture is created by the API on-demande probe looks for graph nodes 64 | matching the Gremlin expression, and if so, start capturing traffic. 65 | -------------------------------------------------------------------------------- /doc/content/contact.md: -------------------------------------------------------------------------------- 1 | --- 2 | date: 2016-05-06T11:03:48+02:00 3 | title: Contact 4 | --- 5 | 6 | ## IRC 7 | * #skydive-project on irc.freenode.net 8 | 9 | ## Mailing list 10 | * https://www.redhat.com/mailman/listinfo/skydive-dev 11 | -------------------------------------------------------------------------------- /doc/content/contributing.md: -------------------------------------------------------------------------------- 1 | --- 2 | date: 2016-05-06T11:02:01+02:00 3 | title: Contributing 4 | --- 5 | 6 | This project accepts contributions. Skydive uses the Gerrit workflow 7 | through Software Factory. 8 | 9 | http://softwarefactory-project.io/r/#/q/project:skydive 10 | 11 | ## Setting up your environment 12 | 13 | ```console 14 | git clone https://softwarefactory-project.io/r/skydive 15 | ``` 16 | 17 | git-review installation : 18 | 19 | ```console 20 | yum install git-review 21 | ``` 22 | 23 | or 24 | 25 | 26 | ```console 27 | apt-get install git-review 28 | ``` 29 | 30 | or to get the latest version 31 | 32 | ```console 33 | sudo pip install git-review 34 | ``` 35 | 36 | ## Starting a Change 37 | 38 | Create a topic branch : 39 | 40 | ```console 41 | git checkout -b TOPIC-BRANCH 42 | ``` 43 | 44 | Submit your change : 45 | 46 | ```console 47 | git review 48 | ``` 49 | 50 | Updating your Change : 51 | 52 | ```console 53 | git commit -a --amend 54 | git review 55 | ``` 56 | 57 | For a more complete documentation about 58 | [how to contribute to a gerrit hosted project](https://gerrit-documentation.storage.googleapis.com/Documentation/2.12/intro-quick.html#_the_life_and_times_of_a_change). 59 | -------------------------------------------------------------------------------- /doc/content/deployment.md: -------------------------------------------------------------------------------- 1 | --- 2 | date: 2016-11-23T01:07:31+00:00 3 | title: Deployment 4 | --- 5 | 6 | ## Configuration file 7 | 8 | Skydive is based on an unique binary and configuration file for the Agent and Analyzer. 9 | Each Agent and Analyzer have his own section. 10 | 11 | A configuration example can be found (here)[https://github.com/skydive-project/skydive/blob/master/etc/skydive.yml.default] 12 | 13 | ## Security 14 | 15 | To secure communication between Agent(s) and Analyzer, Skydive relies on TLS communication with strict cross validation. 16 | TLS communication can be enabled by defining X509 certificates in their respective section in the configuration file, like : 17 | 18 | ``` 19 | analyzer: 20 | X509_cert: /etc/ssl/certs/analyzer.domain.com.crt 21 | X509_key: /etc/ssl/certs/analyzer.domain.com.key 22 | 23 | agent: 24 | X509_cert: /etc/ssl/certs/agent.domain.com.crt 25 | X509_key: /etc/ssl/certs/agent.domain.com.key 26 | ``` 27 | 28 | ### Generate the certificates 29 | 30 | ### Certificate Signing Request (CSR) 31 | ``` 32 | openssl genrsa -out analyzer/analyzer.domain.com.key 2048 33 | chmod 400 analyzer/analyzer.domain.com.key 34 | openssl req -new -key analyzer/analyzer.domain.com.key -out analyzer/analyzer.domain.com.csr -subj "/CN=skydive-analyzer" -config skydive-openssl.cnf 35 | ``` 36 | 37 | ### Analyzer (Server certificate CRT) 38 | ``` 39 | yes '' | openssl x509 -req -days 365 -signkey analyzer/analyzer.domain.com.key -in analyzer/analyzer.domain.com.csr -out analyzer/analyzer.domain.com.crt -extfile skydive-openssl.cnf -extensions v3_req 40 | chmod 444 analyzer/analyzer.domain.com.crt 41 | ``` 42 | 43 | ### Agent (Client certificate CRT) 44 | ``` 45 | openssl genrsa -out agent/agent.domain.com.key 2048 46 | chmod 400 agent/agent.domain.com.key 47 | yes '' | openssl req -new -key agent/agent.domain.com.key -out agent/agent.domain.com.csr -subj "/CN=skydive-agent" -config skydive-openssl.cnf 48 | openssl x509 -req -days 365 -signkey agent/agent.domain.com.key -in agent/agent.domain.com.csr -out agent/agent.domain.com.crt -extfile skydive-openssl.cnf -extensions v3_req 49 | ``` 50 | 51 | ### skydive-openssl.cnf 52 | ``` 53 | [req] 54 | distinguished_name = req_distinguished_name 55 | req_extensions = v3_req 56 | 57 | [req_distinguished_name] 58 | countryName = Country Name (2 letter code) 59 | countryName_default = FR 60 | stateOrProvinceName = State or Province Name (full name) 61 | stateOrProvinceName_default = Paris 62 | localityName = Locality Name (eg, city) 63 | localityName_default = Paris 64 | organizationalUnitName = Organizational Unit Name (eg, section) 65 | organizationalUnitName_default = Skydive Team 66 | commonName = skydive.domain.com 67 | commonName_max = 64 68 | 69 | [ v3_req ] 70 | # Extensions to add to a certificate request 71 | basicConstraints = CA:TRUE 72 | keyUsage = digitalSignature, keyEncipherment, keyCertSign 73 | extendedKeyUsage = serverAuth,clientAuth 74 | subjectAltName = @alt_names 75 | 76 | [alt_names] 77 | DNS.1 = agent.domain.com 78 | DNS.2 = analyzer.domain.com 79 | DNS.3 = localhost 80 | IP.1 = 192.168.1.1 81 | IP.2 = 192.168.69.14 82 | IP.3 = 127.0.0.1 83 | ``` 84 | -------------------------------------------------------------------------------- /doc/content/getting-started/client.md: -------------------------------------------------------------------------------- 1 | --- 2 | date: 2016-05-06T11:02:01+02:00 3 | title: Skydive client, API & WebUI 4 | --- 5 | 6 | ## Client 7 | 8 | Skydive client can be used to interact with Skydive Analyzer and Agents. 9 | Running it without any command will return all the commands available. 10 | 11 | ```console 12 | $ skydive client 13 | 14 | Usage: 15 | skydive client [command] 16 | 17 | Available Commands: 18 | alert Manage alerts 19 | capture Manage captures 20 | 21 | Flags: 22 | -h, --help[=false]: help for client 23 | --password="": password auth parameter 24 | --username="": username auth parameter 25 | ``` 26 | 27 | Specifying the subcommand will give the usage of the subcommand. 28 | 29 | ```console 30 | $ skydive client capture 31 | ``` 32 | 33 | If an authentication mechanism is defined in the configuration file the username 34 | and password parameter have to be used for each command. Environment variables 35 | SKYDIVE_USERNAME and SKYDIVE_PASSWORD can be used as default value for the 36 | username/password command line parameters. 37 | 38 | ## WebUI 39 | 40 | To access to the WebUI of agents or analyzer: 41 | 42 | ```console 43 | http://
: 44 | ``` 45 | 46 | ## Topology requests 47 | 48 | Skydive uses the Gremlin traversal language as a topology request language. 49 | Requests on the topology can be done as following : 50 | 51 | ```console 52 | $ skydive client topology query --gremlin "G.V().Has('Name', 'br-int', 'Type' ,'ovsbridge')" 53 | [ 54 | { 55 | "Host": "pc48.home", 56 | "ID": "1e4fc503-312c-4e4f-4bf5-26263ce82e0b", 57 | "Metadata": { 58 | "Name": "br-int", 59 | "Type": "ovsbridge", 60 | "UUID": "c80cf5a7-998b-49ca-b2b2-7a1d050facc8" 61 | } 62 | } 63 | ] 64 | ``` 65 | Refer to the [Gremlin section](/api/gremlin/) for further 66 | explanations about the syntax and the functions available. 67 | 68 | ## Flow captures 69 | 70 | Captures are described in [this section](/api/captures/) 71 | The following command starts a capture on all `docker0` interfaces: 72 | 73 | ```console 74 | $ skydive client capture create --gremlin "G.V().Has('Name', 'docker0')" 75 | 76 | { 77 | "UUID": "76de5697-106a-4f50-7455-47c2fa7a964f", 78 | "GremlinQuery": "G.V().Has('Name', 'docker0')" 79 | } 80 | 81 | ``` 82 | 83 | To delete a capture : 84 | 85 | ```console 86 | $ skydive client capture delete 87 | ``` 88 | 89 | The Flows Gremlin step can be used in order to see the flows captured. See the 90 | [Gremlin section](/getting-started/gremlin/) for further explanations. 91 | 92 | ```console 93 | skydive client topology query --gremlin "G.V().Has('Name', 'docker0').Flows()" 94 | ``` 95 | -------------------------------------------------------------------------------- /doc/content/getting-started/docker.md: -------------------------------------------------------------------------------- 1 | --- 2 | date: 2016-05-06T11:02:01+02:00 3 | title: How to deploy Skydive with Docker 4 | --- 5 | 6 | ## Docker 7 | 8 | A Docker image is available on the [Skydive Docker Hub account](https://hub.docker.com/r/skydive/). 9 | 10 | To start the analyzer : 11 | ```console 12 | docker run -p 8082:8082 -p 2379:2379 skydive/skydive analyzer 13 | ``` 14 | 15 | To start the agent : 16 | ```console 17 | docker run --privileged --pid=host --net=host -p 8081:8081 -v /var/run/docker.sock:/var/run/docker.sock skydive/skydive agent 18 | ``` 19 | 20 | ## Docker Compose 21 | 22 | [Docker Compose](https://docs.docker.com/compose/) can also be used to automatically start 23 | an Elasticsearch container, a Skydive analyzer container and a Skydive agent container. The service 24 | definition is located in the `contrib/docker` folder of the Skydive sources. 25 | 26 | ```console 27 | docker-compose up 28 | ``` 29 | -------------------------------------------------------------------------------- /doc/content/getting-started/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | date: 2016-05-04T17:48:22+02:00 3 | title: Getting started 4 | --- 5 | -------------------------------------------------------------------------------- /doc/content/getting-started/install.md: -------------------------------------------------------------------------------- 1 | --- 2 | date: 2016-05-06T11:02:01+02:00 3 | title: Installation 4 | --- 5 | 6 | ## Introduction 7 | 8 | Skydive relies on two main components: 9 | 10 | * skydive agent, has to be started on each node where the topology and flows 11 | informations will be captured 12 | * skydive analyzer, the node collecting data captured by the agents 13 | 14 | ## Dependencies 15 | 16 | * Go >= 1.6 17 | * Elasticsearch >= 2.0 18 | * libpcap 19 | * libxml2 20 | * protoc >= 3.0 21 | 22 | ## Install 23 | 24 | Make sure you have a working Go environment. [See the install instructions] 25 | (http://golang.org/doc/install.html). 26 | 27 | ```console 28 | $ mkdir -p $GOPATH/src/github.com/skydive-project 29 | $ git clone https://github.com/skydive-project/skydive.git $GOPATH/src/github.com/skydive-project/skydive 30 | $ cd $GOPATH/src/github.com/skydive-project/skydive 31 | $ make install 32 | ``` 33 | 34 | ## Configuration 35 | 36 | For a single node setup, the configuration file is optional. For a multiple 37 | node setup, the analyzer IP/PORT need to be adapted. 38 | 39 | Processes are bound to 127.0.0.1 by default, you can explicitly change binding 40 | address with "listen: 0.0.0.0:port" in the proper configuration sections. 41 | 42 | User can add host metadata to specify an extra host information in 43 | "agent.metadata" configuration section. All the key value pairs given 44 | under this configuration section will be added to host metadata. 45 | 46 | See the full list of configuration parameters in the sample configuration file 47 | [etc/skydive.yml.default](https://github.com/skydive-project/skydive/blob/master/etc/skydive.yml.default). 48 | 49 | ## Start 50 | 51 | ```console 52 | $ skydive agent [--conf etc/skydive.yml] 53 | ``` 54 | ```console 55 | $ skydive analyzer [--conf etc/skydive.yml] 56 | ``` 57 | 58 | ## All-in-one 59 | 60 | The `all-in-one` mode can be used to start an Agent and an Analyzer at once. 61 | 62 | ```console 63 | $ skydive allinone [--conf etc/skydive.yml] 64 | ``` 65 | -------------------------------------------------------------------------------- /doc/content/getting-started/kubernetes.md: -------------------------------------------------------------------------------- 1 | --- 2 | date: 2016-05-14T11:02:01+02:00 3 | title: How to deploy Skydive with kubernetes 4 | --- 5 | 6 | ## Kubernetes deployment 7 | 8 | Skydive provides a Kubernetes 9 | [file](https://github.com/skydive-project/skydive/blob/master/contrib/kubernetes/skydive.yaml) 10 | which can be used to deploy Skydive. It will deploy an Elasticsearch, 11 | a Skydive analyzer and Skydive Agent on each Kubernetes nodes. Once you will 12 | have Skydive deployment on top on your Kubernetes cluster you will be able to 13 | monitor, capture, troubleshoot your container networking stack. 14 | 15 | A skydive Analyzer [Kubernetes service](http://kubernetes.io/docs/user-guide/services/) 16 | is created and exposes ports for Elasticsearch and the Analyzer: 17 | 18 | * Elasticsearch: 9200 19 | * Analyzer: 8082 20 | 21 | [Kubernetes DaemonSet](http://kubernetes.io/docs/admin/daemons/) is used for 22 | Agents in order to have one Agent per node. 23 | 24 | ## Creation 25 | 26 | ```console 27 | kubectl create -f skydive.yaml 28 | ``` 29 | 30 | Once you have your environment set up, going to the Analyzer service 31 | should show similar to the following capture. 32 | 33 | ![WebUI Capture](/images/kubernetes-two-nodes.png) 34 | -------------------------------------------------------------------------------- /doc/content/getting-started/vagrant.md: -------------------------------------------------------------------------------- 1 | --- 2 | date: 2016-11-28T15:21:44+01:00 3 | title: vagrant 4 | --- 5 | 6 | ## Vagrant deployment 7 | 8 | You can use Vagrant to deploy a Skydive environment with one virtual machine 9 | running both Skydive analyzer and Elasticsearch, and two virtual machines with the 10 | Skydive agent. This `Vagrantfile`, hosted in `contrib/vagrant` of the Git 11 | repository, makes use of the 12 | [libvirt Vagrant provider](https://github.com/vagrant-libvirt/vagrant-libvirt) 13 | and uses Fedora as the box image. 14 | 15 | ```console 16 | cd contrib/vagrant 17 | vagrant up 18 | ``` 19 | -------------------------------------------------------------------------------- /doc/content/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | date: 2016-05-04T17:48:22+02:00 3 | title: Overview 4 | type: index 5 | --- 6 | 7 | Skydive is an open source real-time network topology and protocols analyzer. 8 | It aims to provide a comprehensive way of understanding what is happening in 9 | the network infrastructure. 10 | 11 | Skydive agents collect topology informations and flows and forward them to a 12 | central agent for further analysis. All the informations are stored in an 13 | Elasticsearch database. 14 | 15 | Skydive is SDN-agnostic but provides SDN drivers in order to enhance the 16 | topology and flows informations. 17 | 18 | ## Topology Probes supported 19 | 20 | Topology probes currently implemented: 21 | 22 | * OVSDB 23 | * NetLINK 24 | * NetNS 25 | * Ethtool 26 | 27 | Topology connectors: 28 | 29 | * Neutron 30 | * Docker 31 | * Opencontrail 32 | 33 | ## Flow Probes supported 34 | 35 | Flow probes currently implemented: 36 | 37 | * sFlow 38 | * AFPacket 39 | * PCAP 40 | * PCAP socket 41 | 42 | For more information, check section [Captures](/api/captures) 43 | -------------------------------------------------------------------------------- /doc/content/use-cases/index.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/doc/content/use-cases/index.md -------------------------------------------------------------------------------- /doc/content/use-cases/packet-injector.md: -------------------------------------------------------------------------------- 1 | Skydive provides a Packet injector, which is helpful to verify the successful packet flow between two network devices by injecting a packet from one device and capture the same in the other device. 2 | 3 | The packet injector can be used with either the command line or the WebUI. 4 | 5 | ### How to use 6 | 7 | To use the packet injector we need to provide the below parameters, 8 | 9 | * Source node, needs to be expressed in gremlin query format. 10 | * Destination node, needs to be expressed in gremlin query format. 11 | * Type of packet. currently only ICMP is supported. 12 | * Number of packets to be generated, default is 1. 13 | * Payload of the packet. 14 | 15 | ```console 16 | $ skydive client inject-packet [flags] 17 | 18 | Flags: 19 | --count int number of packets to be generated (default 1) 20 | --dst string destination node gremlin expression 21 | --payload string payload 22 | --src string source node gremlin expression 23 | --type string packet type: icmp (default "icmp") 24 | ``` 25 | 26 | ### Example 27 | ```console 28 | $ skydive client inject-packet --src="G.V().Has('TID', 'feae10c1-240e-48e0-4a13-c608ffd157ab')" --dst="G.V().Has('TID', 'feae10c1-240e-48e0-4a13-c608ffd15700')" --type="icmp" --count=15 29 | ``` 30 | -------------------------------------------------------------------------------- /doc/static/images/architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/doc/static/images/architecture.png -------------------------------------------------------------------------------- /doc/static/images/devstack-two-nodes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/doc/static/images/devstack-two-nodes.png -------------------------------------------------------------------------------- /doc/static/images/kubernetes-two-nodes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/doc/static/images/kubernetes-two-nodes.png -------------------------------------------------------------------------------- /doc/static/images/skydive-logo-16x16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/doc/static/images/skydive-logo-16x16.png -------------------------------------------------------------------------------- /doc/static/images/skydive-logo-32x32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/doc/static/images/skydive-logo-32x32.png -------------------------------------------------------------------------------- /doc/static/images/skydive-logo-96x96.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/doc/static/images/skydive-logo-96x96.png -------------------------------------------------------------------------------- /doc/static/images/skydive-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/doc/static/images/skydive-logo.png -------------------------------------------------------------------------------- /doc/static/images/skydive-screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/doc/static/images/skydive-screenshot.png -------------------------------------------------------------------------------- /doc/themes/hugo-material-docs/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ### 2016-03-22 4 | 5 | #### Changing setup for Google Analytics 6 | 7 | Formerly, the tracking id for Google Analytics was set like below: 8 | 9 | ```toml 10 | [params] 11 | google_analytics = ["UA-XXXXXXXX-X", "auto"] 12 | ``` 13 | 14 | Now the theme uses Hugo's own Google Analytics config option. The variable moved outside the scope of `params` and the setup requires only the tracking id as a string: 15 | 16 | ```toml 17 | googleAnalytics = "UA-XXXXXXXX-X" 18 | ``` 19 | 20 | [Show me the diff](https://github.com/digitalcraftsman/hugo-material-docs/commit/fa10c8eef935932426d46b662a51f29a5e0d48e2) -------------------------------------------------------------------------------- /doc/themes/hugo-material-docs/LICENSE.md: -------------------------------------------------------------------------------- 1 | Copyright (c) 2016 Digitalcraftsman
2 | Copyright (c) 2016 Martin Donath 3 | 4 | Permission is hereby granted, free of charge, to any person obtaining a copy 5 | of this software and associated documentation files (the "Software"), to 6 | deal in the Software without restriction, including without limitation the 7 | rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 8 | sell copies of the Software, and to permit persons to whom the Software is 9 | furnished to do so, subject to the following conditions: 10 | 11 | The above copyright notice and this permission notice shall be included in 12 | all copies or substantial portions of the Software. 13 | 14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE 17 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 20 | IN THE SOFTWARE. -------------------------------------------------------------------------------- /doc/themes/hugo-material-docs/README.md: -------------------------------------------------------------------------------- 1 | # Material Docs 2 | 3 | A material design theme for [Hugo](https://gohugo.io). 4 | 5 | [![Screenshot](https://raw.githubusercontent.com/digitalcraftsman/hugo-material-docs/master/static/images/screen.png)](https://digitalcraftsman.github.io/hugo-material-docs/) 6 | 7 | ## Quick start 8 | 9 | Install with `git`: 10 | 11 | ```sh 12 | git clone git@github.com:digitalcraftsman/hugo-material-docs.git themes/hugo-material-docs 13 | ``` 14 | 15 | Next, take a look in the `exampleSite` folder at. This directory contains an example config file and the content for the demo. It serves as an example setup for your documentation. 16 | 17 | Copy at least the `config.toml` in the root directory of your website. Overwrite the existing config file if necessary. 18 | 19 | Hugo includes a development server, so you can view your changes as you go - 20 | very handy. Spin it up with the following command: 21 | 22 | ``` sh 23 | hugo server 24 | ``` 25 | 26 | Now you can go to [localhost:1313](http://localhost:1313) and the Material 27 | theme should be visible. For detailed installation instructions visit the [demo](http://themes.gohugo.io/theme/material-docs/). 28 | 29 | Noteworthy changes of this theme are listed in the [changelog](https://github.com/digitalcraftsman/hugo-material-docs/blob/master/CHANGELOG.md). 30 | 31 | ## Acknowledgements 32 | 33 | A big thank you to [Martin Donath](https://github.com/squidfunk). He created the original [Material theme](https://github.com/squidfunk/mkdocs-material) for Hugo's companion [MkDocs](http://www.mkdocs.org/). This port wouldn't be possible without him. 34 | 35 | Furthermore, thanks to [Steve Francia](https://gihub.com/spf13) for creating Hugo and the [awesome community](https://github.com/spf13/hugo/graphs/contributors) around the project. 36 | 37 | ## License 38 | 39 | The theme is released under the MIT license. Read the [license](https://github.com/digitalcraftsman/hugo-material-docs/blob/master/LICENSE.md) for more information. 40 | 41 | -------------------------------------------------------------------------------- /doc/themes/hugo-material-docs/archetypes/default.md: -------------------------------------------------------------------------------- 1 | --- 2 | --- -------------------------------------------------------------------------------- /doc/themes/hugo-material-docs/exampleSite/config.toml: -------------------------------------------------------------------------------- 1 | baseurl = "http://replace-this-with-your-hugo-site.com/" 2 | languageCode = "en-us" 3 | title = "Material Docs" 4 | theme = "hugo-material-docs" 5 | metadataformat = "yaml" 6 | canonifyurls = true 7 | # Enable Google Analytics by entering your tracking id 8 | googleAnalytics = "" 9 | 10 | [params] 11 | # General information 12 | author = "Digitalcraftsman" 13 | description = "A material design theme for documentations." 14 | copyright = "Released under the MIT license" 15 | 16 | # Repository 17 | provider = "GitHub" 18 | repo_url = "https://github.com/digitalcraftsman/hugo-material-docs" 19 | 20 | version = "1.0.0" 21 | logo = "images/logo.png" 22 | favicon = "" 23 | 24 | permalink = "#" 25 | 26 | # Custom assets 27 | custom_css = [] 28 | custom_js = [] 29 | 30 | # Syntax highlighting theme 31 | highlight_css = "" 32 | 33 | [params.palette] 34 | primary = "red" 35 | accent = "light green" 36 | 37 | [params.font] 38 | text = "Ubuntu" 39 | code = "Ubuntu Mono" 40 | 41 | 42 | [social] 43 | twitter = "" 44 | github = "digitalcraftsman" 45 | 46 | 47 | [[menu.main]] 48 | name = "Material" 49 | url = "/" 50 | weight = 0 51 | 52 | [[menu.main]] 53 | name = "Getting started" 54 | url = "getting-started/" 55 | weight = 10 56 | 57 | [[menu.main]] 58 | name = "Adding content" 59 | url = "adding-content/" 60 | weight = 20 61 | 62 | [[menu.main]] 63 | name = "Roadmap" 64 | url = "roadmap/" 65 | weight = 30 66 | 67 | [[menu.main]] 68 | name = "License" 69 | url = "license/" 70 | weight = 40 71 | 72 | 73 | [blackfriday] 74 | smartypants = true 75 | fractions = true 76 | smartDashes = true 77 | plainIDAnchors = true -------------------------------------------------------------------------------- /doc/themes/hugo-material-docs/exampleSite/content/adding-content/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | date: 2016-03-09T19:56:50+01:00 3 | title: Adding content 4 | --- 5 | 6 | ## Hello world 7 | 8 | Let's create our first content file for your documentation. Open a terminal and add the following command for each new file you want to add. Replace `` with a general term that describes your document in detail. 9 | 10 | ```sh 11 | hugo new /filename.md 12 | ``` 13 | 14 | Visitors of your website will find the final document under `www.example.com//filename/`. 15 | 16 | Since it's possible to have multiple content files in the same section I recommend to create at least one `index.md` file per section. This ensures that users will find an index page under `www.example.com/`. 17 | 18 | ## Homepage 19 | 20 | To add content to the homepage you need to add a small indicator to the frontmatter of the content file: 21 | 22 | ```toml 23 | type: index 24 | ``` 25 | 26 | Otherwise the theme will not be able to find the corresponding content file. 27 | 28 | ## Table of contents 29 | 30 | You maybe noticed that the menu on the left contains a small table of contents of the current page. All `

` tags (`## Headline` in Markdown) will be added automatically. 31 | 32 | ## Admonitions 33 | 34 | Admonition is a handy feature that adds block-styled side content to your documentation, for example hints, notes or warnings. It can be enabled by using the corresponding [shortcodes](http://gohugo.io/extras/shortcodes/) inside your content: 35 | 36 | ```go 37 | {{}} 38 | Nothing to see here, move along. 39 | {{}} 40 | ``` 41 | 42 | This will print the following block: 43 | 44 | {{< note title="Note" >}} 45 | Nothing to see here, move along. 46 | {{< /note >}} 47 | 48 | The shortcode adds a neutral color for the note class and a red color for the warning class. You can also add a custom title: 49 | 50 | ```go 51 | {{}} 52 | Nothing to see here, move along. 53 | {{}} 54 | ``` 55 | 56 | This will print the following block: 57 | 58 | {{< warning title="Don't try this at home" >}} 59 | Nothing to see here, move along. 60 | {{< /warning >}} -------------------------------------------------------------------------------- /doc/themes/hugo-material-docs/exampleSite/content/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | date: 2016-03-08T21:07:13+01:00 3 | title: Material for Hugo 4 | type: index 5 | --- 6 | 7 | ## Beautiful documentation 8 | 9 | Material is a theme for [Hugo](https://gohugo.io), a fast and flexible static site generator. It is built using Google's [material design](https://www.google.com/design/spec/material-design/introduction.html) 10 | guidelines, fully responsive, optimized for touch and pointer devices as well 11 | as all sorts of screen sizes. 12 | 13 | ![Material Screenshot](/images/screen.png) 14 | 15 | Material is very lightweight – it is built from scratch using Javascript and 16 | CSS that weighs less than 30kb (minified, gzipped and excluding Google Fonts 17 | and Analytics). Yet, it is highly customizable and degrades gracefully in older 18 | browsers. 19 | 20 | ## Quick start 21 | 22 | Install with `git`: 23 | 24 | ```sh 25 | git clone git@github.com:digitalcraftsman/hugo-material-docs.git themes/hugo-material-docs 26 | ``` 27 | 28 | ## Features 29 | 30 | - Beautiful, readable and very user-friendly design based on Google's material 31 | design guidelines, packed in a full responsive template with a well-defined 32 | and [easily customizable color palette]({{< relref "getting-started/index.md#changing-the-color-palette" >}}), great typography, as well as a 33 | beautiful search interface and footer. 34 | 35 | - Well-tested and optimized Javascript and CSS including a cross-browser 36 | fixed/sticky header, a drawer that even works without Javascript using 37 | the [checkbox hack](http://tutorialzine.com/2015/08/quick-tip-css-only-dropdowns-with-the-checkbox-hack/) with fallbacks, responsive tables that scroll when 38 | the screen is too small and well-defined print styles. 39 | 40 | - Extra configuration options like a [project logo]({{< relref "getting-started/index.md#adding-a-logo" >}}), links to the authors 41 | [GitHub and Twitter accounts]({{< relref "getting-started/index.md#adding-a-github-and-twitter-account" >}}) and display of the amount of stars the 42 | project has on GitHub. 43 | 44 | - Web application capability on iOS – when the page is saved to the homescreen, 45 | it behaves and looks like a native application. 46 | 47 | See the [getting started guide]({{< relref "getting-started/index.md" >}}) for instructions how to get 48 | it up and running. 49 | 50 | ## Acknowledgements 51 | 52 | Last but not least a big thank you to [Martin Donath](https://github.com/squidfunk). He created the original [Material theme](https://github.com/squidfunk/mkdocs-material) for Hugo's companion [MkDocs](http://www.mkdocs.org/). This port wouldn't be possible without him. 53 | 54 | Furthermore, thanks to [Steve Francia](https://gihub.com/spf13) for creating Hugo and the [awesome community](https://github.com/spf13/hugo/graphs/contributors) around the project. -------------------------------------------------------------------------------- /doc/themes/hugo-material-docs/exampleSite/content/license/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | date: 2016-03-09T20:10:46+01:00 3 | title: License 4 | --- 5 | 6 | Copyright (c) 2016 Digitalcraftsman
7 | Copyright (c) 2016 Martin Donath 8 | 9 | Permission is hereby granted, free of charge, to any person obtaining a copy 10 | of this software and associated documentation files (the "Software"), to 11 | deal in the Software without restriction, including without limitation the 12 | rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 13 | sell copies of the Software, and to permit persons to whom the Software is 14 | furnished to do so, subject to the following conditions: 15 | 16 | The above copyright notice and this permission notice shall be included in 17 | all copies or substantial portions of the Software. 18 | 19 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 | FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE 22 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 23 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 24 | FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 25 | IN THE SOFTWARE. 26 | 27 | -------------------------------------------------------------------------------- /doc/themes/hugo-material-docs/exampleSite/content/roadmap/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | date: 2016-03-09T20:08:11+01:00 3 | title: Roadmap 4 | --- 5 | 6 | Quo vadis? The port of the original [Material theme](https://github.com/squidfunk/mkdocs-material) has replicated nearly all of its features. A few are still missing but I've good news: the Hugo community is actively working on this issues. Maybe with the next release of Hugo we can abandon this list. Stay tuned. 7 | 8 | ## Localization 9 | 10 | Currently, it is possible to collect all strings in a single place for easy customization. However, this only enables you to define all strings in a single language. This approach is quite limiting in terms of localization support. Therefore, I decided to wait for a native integration. This way we can avoid a second setup of all strings in your website. 11 | 12 | Keep an eye on [#1734](https://github.com/spf13/hugo/issues/1734). 13 | 14 | ## Search 15 | 16 | Beside third-party services, some hacky workarounds and Grunt-/Gulp-based scripts that only require unnecessary dependencies, future versions of Hugo will support the generation of a content index as a core feature. 17 | 18 | This approach plays well with this theme since MkDocs does the same. 19 | 20 | Keep an eye on [#1853](https://github.com/spf13/hugo/pull/1853). 21 | 22 | ## Contributing 23 | 24 | Did you found an bug or you would like to suggest a new feature? I'm open for feedback. Please open a new [issue](https://github.com/digitalcraftsman/hugo-material-docs/issues) and let me know. 25 | 26 | You're also welcome to contribute with [pull requests](https://github.com/digitalcraftsman/hugo-material-docs/pulls). 27 | -------------------------------------------------------------------------------- /doc/themes/hugo-material-docs/exampleSite/static/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/doc/themes/hugo-material-docs/exampleSite/static/.gitkeep -------------------------------------------------------------------------------- /doc/themes/hugo-material-docs/images/screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/doc/themes/hugo-material-docs/images/screenshot.png -------------------------------------------------------------------------------- /doc/themes/hugo-material-docs/images/tn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/doc/themes/hugo-material-docs/images/tn.png -------------------------------------------------------------------------------- /doc/themes/hugo-material-docs/layouts/404.html: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/doc/themes/hugo-material-docs/layouts/404.html -------------------------------------------------------------------------------- /doc/themes/hugo-material-docs/layouts/_default/list.html: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/doc/themes/hugo-material-docs/layouts/_default/list.html -------------------------------------------------------------------------------- /doc/themes/hugo-material-docs/layouts/_default/single.html: -------------------------------------------------------------------------------- 1 | {{ partial "head" . }} 2 | 3 | {{ if (eq (trim .Site.Params.provider " " | lower) "github") | and (isset .Site.Params "repo_url") }} 4 | {{ $repo_id := replace .Site.Params.repo_url "https://github.com/" ""}} 5 | {{ .Scratch.Set "repo_id" $repo_id }} 6 | {{ end }} 7 | 8 |
9 |
10 |
11 | 12 | 13 | 14 | 15 | 16 |
17 | {{ partial "header" . }} 18 |
19 | 20 |
21 |
22 | {{ partial "drawer" . }} 23 |
24 | 25 |
26 |
27 |

{{ .Title }} {{ if .IsDraft }} (Draft){{ end }}

28 | 29 | {{ .Content }} 30 | 31 | 42 | 43 |
44 | {{ partial "footer" . }} 45 |
46 |
47 |
48 | 49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 | 59 | {{ partial "footer_js" . }} -------------------------------------------------------------------------------- /doc/themes/hugo-material-docs/layouts/index.html: -------------------------------------------------------------------------------- 1 | {{ partial "head" . }} 2 | 3 | {{ if (eq (trim .Site.Params.provider " " | lower) "github") | and (isset .Site.Params "repo_url") }} 4 | {{ $repo_id := replace .Site.Params.repo_url "https://github.com/" ""}} 5 | {{ .Scratch.Set "repo_id" $repo_id }} 6 | {{ end }} 7 | 8 |
9 |
10 |
11 | 12 | 13 | 14 | 15 | 16 |
17 | {{ partial "header" . }} 18 |
19 | 20 |
21 |
22 | {{ partial "drawer" . }} 23 |
24 | 25 |
26 |
27 | {{ range where .Site.Pages "Type" "index" }} 28 |

{{ .Title }} {{ if .IsDraft }} (Draft){{ end }}

29 | 30 | {{ .Content }} 31 | {{ end }} 32 | 33 | 44 | 45 |
46 | {{ partial "footer" . }} 47 |
48 |
49 |
50 | 51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 | 61 | {{ partial "footer_js" . }} -------------------------------------------------------------------------------- /doc/themes/hugo-material-docs/layouts/partials/drawer.html: -------------------------------------------------------------------------------- 1 | 71 | -------------------------------------------------------------------------------- /doc/themes/hugo-material-docs/layouts/partials/footer.html: -------------------------------------------------------------------------------- 1 | {{ if .IsPage }} 2 | {{ if .Prev | or .Next }} 3 | 44 | {{ end }} 45 | {{ end }} 46 | 47 | {{ if .IsHome }} 48 | {{ if gt (len .Site.Pages) 2 }} 49 | 73 | {{ end }} 74 | {{ end }} -------------------------------------------------------------------------------- /doc/themes/hugo-material-docs/layouts/partials/header.html: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /doc/themes/hugo-material-docs/layouts/partials/nav.html: -------------------------------------------------------------------------------- 1 | {{ $currentNode := . }} 2 | 3 | {{ range .Site.Menus.main.ByWeight }} 4 | 5 | {{ $.Scratch.Set "currentMenuEntry" . }} 6 |
  • 7 | {{ if .HasChildren }} 8 |
  • 9 | {{ $currentURL := .URL }} 10 | 11 | {{ range $.Site.Pages }} 12 | {{ if eq (printf "%s%s" $.Site.BaseURL $currentURL) ( printf "%s" .Permalink) }} 13 | {{ .Section | title }} 14 | {{ end }} 15 | {{ end }} 16 | 17 | 18 |
      19 | {{ range .Children }} 20 | {{ $.Scratch.Set "currentMenuEntry" . }} 21 | {{ partial "nav_link" $currentNode }} 22 | {{ end }} 23 |
    24 |
  • 25 | {{ else }} 26 | {{ partial "nav_link" $currentNode }} 27 | {{ end }} 28 | 29 | {{ end }} -------------------------------------------------------------------------------- /doc/themes/hugo-material-docs/layouts/partials/nav_link.html: -------------------------------------------------------------------------------- 1 | {{ $currentMenuEntry := .Scratch.Get "currentMenuEntry" }} 2 | {{ $isCurrent := eq .Permalink ($currentMenuEntry.URL | absURL | printf "%s") }} 3 | 4 | 5 | 6 | {{ $currentMenuEntry.Pre }} 7 | {{ $currentMenuEntry.Name }} 8 | 9 | 10 | {{ if $isCurrent }} 11 |
      12 |
    13 | {{ end }} -------------------------------------------------------------------------------- /doc/themes/hugo-material-docs/layouts/shortcodes/note.html: -------------------------------------------------------------------------------- 1 |
    2 |

    {{ .Get "title" }}

    3 |

    {{ printf "%s" .Inner | markdownify }}

    4 |
    -------------------------------------------------------------------------------- /doc/themes/hugo-material-docs/layouts/shortcodes/warning.html: -------------------------------------------------------------------------------- 1 |
    2 |

    {{ .Get "title" }}

    3 |

    {{ printf "%s" .Inner | markdownify }}

    4 |
    -------------------------------------------------------------------------------- /doc/themes/hugo-material-docs/static/fonts/icon.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/doc/themes/hugo-material-docs/static/fonts/icon.eot -------------------------------------------------------------------------------- /doc/themes/hugo-material-docs/static/fonts/icon.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/doc/themes/hugo-material-docs/static/fonts/icon.ttf -------------------------------------------------------------------------------- /doc/themes/hugo-material-docs/static/fonts/icon.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/doc/themes/hugo-material-docs/static/fonts/icon.woff -------------------------------------------------------------------------------- /doc/themes/hugo-material-docs/static/images/colors.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/doc/themes/hugo-material-docs/static/images/colors.png -------------------------------------------------------------------------------- /doc/themes/hugo-material-docs/static/images/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/doc/themes/hugo-material-docs/static/images/favicon.ico -------------------------------------------------------------------------------- /doc/themes/hugo-material-docs/static/images/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/doc/themes/hugo-material-docs/static/images/logo.png -------------------------------------------------------------------------------- /doc/themes/hugo-material-docs/static/images/screen.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/doc/themes/hugo-material-docs/static/images/screen.png -------------------------------------------------------------------------------- /doc/themes/hugo-material-docs/static/stylesheets/highlight/highlight.css: -------------------------------------------------------------------------------- 1 | /* 2 | * overwrite the current primary color of the 3 | * theme that is used as fallback in codeblocks 4 | */ 5 | .article pre code { 6 | color: rgba(0, 0, 0, 0.8) !important; 7 | } 8 | 9 | 10 | /* 11 | HIGHLIGHT.JS THEME 12 | 13 | tweaked version of the Github theme 14 | */ 15 | 16 | .hljs { 17 | display:block; 18 | overflow-x:auto; 19 | } 20 | 21 | .hljs-comment, 22 | .hljs-quote { 23 | color:#998; 24 | font-style:italic; 25 | } 26 | 27 | .hljs-keyword, 28 | .hljs-selector-tag, 29 | .hljs-subst { 30 | color:#333; 31 | font-weight:700; 32 | } 33 | 34 | .hljs-number, 35 | .hljs-literal, 36 | .hljs-variable, 37 | .hljs-template-variable, 38 | .hljs-tag .hljs-attr { 39 | color:teal; 40 | } 41 | 42 | .hljs-string, 43 | .hljs-doctag { 44 | color:#d14; 45 | } 46 | 47 | .hljs-title, 48 | .hljs-section, 49 | .hljs-selector-id { 50 | color:#900; 51 | font-weight:700; 52 | } 53 | 54 | .hljs-subst { 55 | font-weight:400; 56 | } 57 | 58 | .hljs-type, 59 | .hljs-class .hljs-title { 60 | color:#458; 61 | font-weight:700; 62 | } 63 | 64 | .hljs-tag, 65 | .hljs-name, 66 | .hljs-attribute { 67 | color:navy; 68 | font-weight:400; 69 | } 70 | 71 | .hljs-regexp, 72 | .hljs-link { 73 | color:#009926; 74 | } 75 | 76 | .hljs-symbol, 77 | .hljs-bullet { 78 | color:#990073; 79 | } 80 | 81 | .hljs-built_in, 82 | .hljs-builtin-name { 83 | color:#0086b3; 84 | } 85 | 86 | .hljs-meta { 87 | color:#999; 88 | font-weight:700; 89 | } 90 | 91 | .hljs-deletion { 92 | background:#fdd; 93 | } 94 | 95 | .hljs-addition { 96 | background:#dfd; 97 | } 98 | 99 | .hljs-emphasis { 100 | font-style:italic; 101 | } 102 | 103 | .hljs-strong { 104 | font-weight:700; 105 | } 106 | -------------------------------------------------------------------------------- /doc/themes/hugo-material-docs/theme.toml: -------------------------------------------------------------------------------- 1 | name = "Material Docs" 2 | license = "MIT" 3 | licenselink = "https://github.com/digitalcraftsman/hugo-material-docs/blob/master/LICENSE.md" 4 | description = "A material design theme for documentations." 5 | homepage = "https://github.com/digitalcraftsman/hugo-material-docs" 6 | tags = ["material", "documentation", "docs", "google analytics", "responsive"] 7 | features = ["", ""] 8 | min_version = 0.15 9 | 10 | [author] 11 | name = "Digitalcraftsman" 12 | homepage = "https://github.com/digitalcraftsman" 13 | 14 | # If porting an existing theme 15 | [original] 16 | name = "Martin Donath" 17 | homepage = "http://struct.cc/" 18 | repo = "https://github.com/squidfunk/mkdocs-material" 19 | -------------------------------------------------------------------------------- /etcd/client.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package etcd 24 | 25 | import ( 26 | "fmt" 27 | "strconv" 28 | "time" 29 | 30 | "golang.org/x/net/context" 31 | 32 | etcd "github.com/coreos/etcd/client" 33 | 34 | "github.com/skydive-project/skydive/config" 35 | ) 36 | 37 | type EtcdClient struct { 38 | Client *etcd.Client 39 | KeysApi etcd.KeysAPI 40 | } 41 | 42 | func (client *EtcdClient) GetInt64(key string) (int64, error) { 43 | resp, err := client.KeysApi.Get(context.Background(), key, nil) 44 | if err != nil { 45 | return 0, err 46 | } 47 | return strconv.ParseInt(resp.Node.Value, 10, 64) 48 | } 49 | 50 | func (client *EtcdClient) SetInt64(key string, value int64) error { 51 | _, err := client.KeysApi.Set(context.Background(), key, strconv.FormatInt(value, 10), nil) 52 | return err 53 | } 54 | 55 | func (client *EtcdClient) Stop() { 56 | if tr, ok := etcd.DefaultTransport.(interface { 57 | CloseIdleConnections() 58 | }); ok { 59 | tr.CloseIdleConnections() 60 | } 61 | } 62 | 63 | func NewEtcdClient(etcdServers []string, clientTimeout time.Duration) (*EtcdClient, error) { 64 | cfg := etcd.Config{ 65 | Endpoints: etcdServers, 66 | Transport: etcd.DefaultTransport, 67 | HeaderTimeoutPerRequest: clientTimeout, 68 | } 69 | 70 | etcdClient, err := etcd.New(cfg) 71 | if err != nil { 72 | return nil, fmt.Errorf("Failed to connect to etcd: %s", err) 73 | } 74 | 75 | kapi := etcd.NewKeysAPI(etcdClient) 76 | 77 | return &EtcdClient{ 78 | Client: &etcdClient, 79 | KeysApi: kapi, 80 | }, nil 81 | } 82 | 83 | func NewEtcdClientFromConfig() (*EtcdClient, error) { 84 | etcdServers := config.GetEtcdServerAddrs() 85 | etcdTimeout := config.GetConfig().GetInt("etcd.client_timeout") 86 | switch etcdTimeout { 87 | case 0: 88 | etcdTimeout = 5 // Default timeout 89 | case -1: 90 | etcdTimeout = 0 // No timeout 91 | } 92 | 93 | return NewEtcdClient(etcdServers, time.Duration(etcdTimeout)*time.Second) 94 | } 95 | -------------------------------------------------------------------------------- /filters/filters.proto: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | syntax = "proto3"; 24 | 25 | package filters; 26 | 27 | enum BoolFilterOp { 28 | OR = 0; 29 | AND = 1; 30 | NOT = 2; 31 | } 32 | 33 | message TermStringFilter { 34 | string Key = 1; 35 | string value = 2; 36 | } 37 | 38 | message TermInt64Filter { 39 | string Key = 1; 40 | int64 value = 2; 41 | } 42 | 43 | message NeStringFilter { 44 | string Key = 1; 45 | string Value = 2; 46 | } 47 | 48 | message NeInt64Filter { 49 | string Key = 1; 50 | int64 Value = 2; 51 | } 52 | 53 | message GtInt64Filter { 54 | string Key = 1; 55 | int64 Value = 2; 56 | } 57 | 58 | message LtInt64Filter { 59 | string Key = 1; 60 | int64 Value = 2; 61 | } 62 | 63 | message GteInt64Filter { 64 | string Key = 1; 65 | int64 Value = 2; 66 | } 67 | 68 | message LteInt64Filter { 69 | string Key = 1; 70 | int64 Value = 2; 71 | } 72 | 73 | message RegexFilter { 74 | string Key = 1; 75 | string Value = 2; 76 | } 77 | 78 | message Filter { 79 | TermStringFilter TermStringFilter = 1; 80 | TermInt64Filter TermInt64Filter = 2; 81 | 82 | GtInt64Filter GtInt64Filter = 3; 83 | LtInt64Filter LtInt64Filter = 4; 84 | GteInt64Filter GteInt64Filter = 5; 85 | LteInt64Filter LteInt64Filter = 6; 86 | 87 | BoolFilter BoolFilter = 7; 88 | RegexFilter RegexFilter = 8; 89 | } 90 | 91 | message BoolFilter { 92 | BoolFilterOp Op = 1; 93 | repeated Filter Filters = 2; 94 | } 95 | 96 | message Range { 97 | int64 From = 1; 98 | int64 To = 2; 99 | } 100 | 101 | message SearchQuery { 102 | Filter Filter = 1; 103 | Range PaginationRange = 2; 104 | bool Sort = 3; 105 | bool Dedup = 4; 106 | string DedupBy = 5; 107 | string SortBy = 6; 108 | } 109 | -------------------------------------------------------------------------------- /flow/allocator.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package flow 24 | 25 | import ( 26 | "net/http" 27 | "sync" 28 | "time" 29 | ) 30 | 31 | type TableAllocator struct { 32 | sync.RWMutex 33 | update time.Duration 34 | expire time.Duration 35 | tables map[*Table]bool 36 | } 37 | 38 | func (a *TableAllocator) Flush() { 39 | a.RLock() 40 | defer a.RUnlock() 41 | 42 | for table := range a.tables { 43 | table.Flush() 44 | } 45 | } 46 | 47 | func (a *TableAllocator) aggregateReplies(query *TableQuery, replies []*TableReply) *TableReply { 48 | reply := &TableReply{ 49 | status: http.StatusOK, 50 | Obj: make([][]byte, 0), 51 | } 52 | 53 | for _, r := range replies { 54 | if r.status >= http.StatusBadRequest { 55 | // FIX, 207 => http.StatusMultiStatus when moving to >= 1.7 56 | reply.status = 207 57 | continue 58 | } 59 | reply.Obj = append(reply.Obj, r.Obj...) 60 | } 61 | 62 | return reply 63 | } 64 | 65 | func (a *TableAllocator) QueryTable(query *TableQuery) *TableReply { 66 | a.RLock() 67 | defer a.RUnlock() 68 | 69 | var replies []*TableReply 70 | for table := range a.tables { 71 | reply := table.Query(query) 72 | if reply != nil { 73 | replies = append(replies, reply) 74 | } 75 | } 76 | 77 | return a.aggregateReplies(query, replies) 78 | } 79 | 80 | func (a *TableAllocator) Alloc(flowCallBack ExpireUpdateFunc) *Table { 81 | a.Lock() 82 | defer a.Unlock() 83 | 84 | updateHandler := NewFlowHandler(flowCallBack, a.update) 85 | expireHandler := NewFlowHandler(flowCallBack, a.expire) 86 | t := NewTable(updateHandler, expireHandler) 87 | a.tables[t] = true 88 | 89 | return t 90 | } 91 | 92 | func (a *TableAllocator) Release(t *Table) { 93 | a.Lock() 94 | delete(a.tables, t) 95 | a.Unlock() 96 | } 97 | 98 | func NewTableAllocator(update, expire time.Duration) *TableAllocator { 99 | return &TableAllocator{ 100 | update: update, 101 | expire: expire, 102 | tables: make(map[*Table]bool), 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /flow/filters.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package flow 24 | 25 | import ( 26 | "github.com/skydive-project/skydive/filters" 27 | "github.com/skydive-project/skydive/topology/graph" 28 | ) 29 | 30 | func NewFilterForNodeTIDs(uuids []string) *filters.Filter { 31 | return filters.NewFilterForIds(uuids, "NodeTID", "ANodeTID", "BNodeTID") 32 | } 33 | 34 | func NewFilterForNodes(nodes []*graph.Node) *filters.Filter { 35 | var ids []string 36 | for _, node := range nodes { 37 | if t, ok := node.Metadata()["TID"]; ok { 38 | ids = append(ids, t.(string)) 39 | } 40 | } 41 | return NewFilterForNodeTIDs(ids) 42 | } 43 | 44 | func NewFilterForFlowSet(flowset *FlowSet) *filters.Filter { 45 | ids := make([]string, len(flowset.Flows)) 46 | for i, flow := range flowset.Flows { 47 | ids[i] = string(flow.UUID) 48 | } 49 | return filters.NewFilterForIds(ids, "UUID") 50 | } 51 | -------------------------------------------------------------------------------- /flow/flow.proto: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2015 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | syntax = "proto3"; 24 | /* Notes : 25 | o proto3 fields are optional by default 26 | o required fields are not allowed in proto3 */ 27 | 28 | package flow; 29 | 30 | enum FlowProtocol { 31 | ETHERNET = 0; 32 | IPV4 = 1; 33 | TCPPORT = 2; 34 | UDPPORT = 3; 35 | SCTPPORT = 4; 36 | IPV6 = 5; 37 | } 38 | 39 | message FlowLayer { 40 | FlowProtocol Protocol = 1; 41 | string A = 3; 42 | string B = 4; 43 | } 44 | 45 | message FlowMetric { 46 | int64 Start = 1; 47 | int64 Last = 2; 48 | int64 ABPackets = 3; 49 | int64 ABBytes = 4; 50 | int64 BAPackets = 5; 51 | int64 BABytes = 6; 52 | } 53 | 54 | message Flow { 55 | /* Flow Universally Unique IDentifier 56 | flow.UUID is unique in the universe, as it should be used as a key of an 57 | hashtable. By design 2 different flows, their UUID are always different. 58 | flow.UUID can be used as Database Index. 59 | */ 60 | string UUID = 1; 61 | string LayersPath = 2; 62 | 63 | /* Application is the last layer which is not a payload. 64 | */ 65 | string Application = 3; 66 | 67 | /* Data Flow info */ 68 | FlowLayer Link = 20; 69 | FlowLayer Network = 21; 70 | FlowLayer Transport = 22; 71 | 72 | /* Data Flow Metric info from the 1st layer 73 | amount of data between two updates 74 | */ 75 | FlowMetric LastUpdateMetric = 31; 76 | /* Total amount of data for the whole flow duration */ 77 | FlowMetric Metric = 32; 78 | 79 | /* Flow Tracking IDentifier, from 1st packet bytes 80 | flow.TrackingID could be used to identify an unique flow whatever it has 81 | been captured on the infrastructure. flow.TrackingID is calculated from 82 | the bytes of the first packet of his session. 83 | flow.TrackingID can be used as a Tag. 84 | */ 85 | string TrackingID = 50; 86 | string L3TrackingID = 51; 87 | 88 | /* Flow Parent UUID is used as reference to the parent flow 89 | Flow.ParentUUID is the same value that point to his parent flow.UUID 90 | */ 91 | string ParentUUID = 6; 92 | 93 | /* Topology info */ 94 | string NodeTID = 33; 95 | string ANodeTID = 34; 96 | string BNodeTID = 35; 97 | } 98 | -------------------------------------------------------------------------------- /flow/mappings/graph.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2015 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package mappings 24 | 25 | import ( 26 | "github.com/skydive-project/skydive/flow" 27 | "github.com/skydive-project/skydive/flow/packet" 28 | "github.com/skydive-project/skydive/logging" 29 | "github.com/skydive-project/skydive/topology/graph" 30 | ) 31 | 32 | type GraphFlowEnhancer struct { 33 | Graph *graph.Graph 34 | } 35 | 36 | func (gfe *GraphFlowEnhancer) getNodeTID(mac string) string { 37 | if packet.IsBroadcastMac(mac) || packet.IsMulticastMac(mac) { 38 | return "*" 39 | } 40 | 41 | gfe.Graph.RLock() 42 | defer gfe.Graph.RUnlock() 43 | 44 | intfs := gfe.Graph.GetNodes(graph.Metadata{"MAC": mac}) 45 | if len(intfs) > 1 { 46 | logging.GetLogger().Infof("GraphFlowEnhancer found more than one interface for the mac: %s", mac) 47 | } else if len(intfs) == 1 { 48 | if t, ok := intfs[0].Metadata()["TID"]; ok { 49 | return t.(string) 50 | } 51 | } 52 | return "" 53 | } 54 | 55 | func (gfe *GraphFlowEnhancer) Enhance(f *flow.Flow) { 56 | if f.ANodeTID == "" || f.BNodeTID == "" { 57 | if f.Link == nil { 58 | return 59 | } 60 | } 61 | if f.ANodeTID == "" { 62 | f.ANodeTID = gfe.getNodeTID(f.Link.A) 63 | } 64 | if f.BNodeTID == "" { 65 | f.BNodeTID = gfe.getNodeTID(f.Link.B) 66 | } 67 | } 68 | 69 | func NewGraphFlowEnhancer(g *graph.Graph) *GraphFlowEnhancer { 70 | return &GraphFlowEnhancer{ 71 | Graph: g, 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /flow/mappings/neutron.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package mappings 24 | 25 | import ( 26 | "github.com/skydive-project/skydive/flow" 27 | "github.com/skydive-project/skydive/flow/packet" 28 | "github.com/skydive-project/skydive/logging" 29 | "github.com/skydive-project/skydive/topology/graph" 30 | ) 31 | 32 | type NeutronFlowEnhancer struct { 33 | Graph *graph.Graph 34 | } 35 | 36 | func (nfe *NeutronFlowEnhancer) getNodeTID(mac string) string { 37 | if packet.IsBroadcastMac(mac) || packet.IsMulticastMac(mac) { 38 | return "*" 39 | } 40 | 41 | nfe.Graph.RLock() 42 | defer nfe.Graph.RUnlock() 43 | 44 | // use the PeerIntfMAC metadata provided by the neutron probe. The interface used is the 45 | // one attached to the VM interface. 46 | intfs := nfe.Graph.GetNodes(graph.Metadata{"PeerIntfMAC": mac, "Manager": "neutron"}) 47 | if len(intfs) > 1 { 48 | logging.GetLogger().Infof("NeutronFlowEnhancer found more than one interface with the PeerIntfMAC: %s", mac) 49 | } else if len(intfs) == 1 { 50 | if t, ok := intfs[0].Metadata()["TID"]; ok { 51 | return t.(string) 52 | } 53 | } 54 | return "" 55 | } 56 | 57 | func (nfe *NeutronFlowEnhancer) Enhance(f *flow.Flow) { 58 | if f.ANodeTID == "" || f.BNodeTID == "" { 59 | if f.Link == nil { 60 | return 61 | } 62 | } 63 | if f.ANodeTID == "" { 64 | f.ANodeTID = nfe.getNodeTID(f.Link.A) 65 | } 66 | if f.BNodeTID == "" { 67 | f.BNodeTID = nfe.getNodeTID(f.Link.B) 68 | } 69 | } 70 | 71 | func NewNeutronFlowEnhancer(g *graph.Graph) *NeutronFlowEnhancer { 72 | return &NeutronFlowEnhancer{ 73 | Graph: g, 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /flow/mappings/pipeline.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2015 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package mappings 24 | 25 | import ( 26 | "github.com/skydive-project/skydive/flow" 27 | ) 28 | 29 | type FlowEnhancer interface { 30 | Enhance(flow *flow.Flow) 31 | } 32 | 33 | type FlowMappingPipeline struct { 34 | Enhancers []FlowEnhancer 35 | } 36 | 37 | func (fe *FlowMappingPipeline) EnhanceFlow(flow *flow.Flow) { 38 | for _, enhancer := range fe.Enhancers { 39 | enhancer.Enhance(flow) 40 | } 41 | } 42 | 43 | func (fe *FlowMappingPipeline) Enhance(flows []*flow.Flow) { 44 | for _, flow := range flows { 45 | fe.EnhanceFlow(flow) 46 | } 47 | } 48 | 49 | func (fe *FlowMappingPipeline) AddEnhancer(e FlowEnhancer) { 50 | fe.Enhancers = append(fe.Enhancers, e) 51 | } 52 | 53 | func NewFlowMappingPipeline(enhancers ...FlowEnhancer) *FlowMappingPipeline { 54 | return &FlowMappingPipeline{ 55 | Enhancers: enhancers, 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /flow/ondemand/ondemand.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package ondemand 24 | 25 | import "github.com/skydive-project/skydive/api" 26 | 27 | const ( 28 | Namespace = "OnDemand" 29 | ) 30 | 31 | type CaptureQuery struct { 32 | NodeID string 33 | Capture api.Capture 34 | } 35 | -------------------------------------------------------------------------------- /flow/packet/packet.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2015 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package packet 24 | 25 | import ( 26 | "net" 27 | ) 28 | 29 | func IsMulticastMac(mac string) bool { 30 | hw, err := net.ParseMAC(mac) 31 | if err != nil { 32 | return false 33 | } 34 | return (hw[0] & 0x01) == 0x01 35 | } 36 | 37 | func IsBroadcastMac(mac string) bool { 38 | return mac == "ff:ff:ff:ff:ff:ff" 39 | } 40 | 41 | func IsMulticastIP(ip string) bool { 42 | netip := net.ParseIP(ip) 43 | if netip == nil { 44 | return false 45 | } 46 | return netip.IsMulticast() 47 | } 48 | -------------------------------------------------------------------------------- /flow/pcaptraces/contrail-udp-mpls-eth-and-ipv4.pcap: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/flow/pcaptraces/contrail-udp-mpls-eth-and-ipv4.pcap -------------------------------------------------------------------------------- /flow/pcaptraces/eth-ip4-arp-dns-req-http-google.pcap: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/flow/pcaptraces/eth-ip4-arp-dns-req-http-google.pcap -------------------------------------------------------------------------------- /flow/pcaptraces/ping-with-without-ethernet.pcap: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/flow/pcaptraces/ping-with-without-ethernet.pcap -------------------------------------------------------------------------------- /flow/probes/afpacket.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package probes 24 | 25 | import ( 26 | "github.com/google/gopacket" 27 | "github.com/skydive-project/skydive/flow/probes/afpacket" 28 | ) 29 | 30 | type AFPacketHandle struct { 31 | tpacket *afpacket.TPacket 32 | } 33 | 34 | func (h *AFPacketHandle) ReadPacketData() ([]byte, gopacket.CaptureInfo, error) { 35 | return h.tpacket.ReadPacketData() 36 | } 37 | 38 | func (h *AFPacketHandle) Close() { 39 | h.tpacket.Close() 40 | } 41 | 42 | func NewAFPacketHandle(ifName string, snaplen int32) (*AFPacketHandle, error) { 43 | tpacket, err := afpacket.NewTPacket( 44 | afpacket.OptInterface(ifName), 45 | afpacket.OptFrameSize(snaplen), 46 | afpacket.OptPollTimeout(1000), 47 | ) 48 | 49 | if err != nil { 50 | return nil, err 51 | } 52 | 53 | return &AFPacketHandle{tpacket: tpacket}, err 54 | } 55 | -------------------------------------------------------------------------------- /flow/request.proto: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | syntax = "proto3"; 24 | 25 | package flow; 26 | 27 | import "flow/set.proto"; 28 | 29 | message FlowSearchReply { 30 | FlowSet FlowSet = 1; 31 | } 32 | -------------------------------------------------------------------------------- /flow/server.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package flow 24 | 25 | import ( 26 | "encoding/json" 27 | 28 | shttp "github.com/skydive-project/skydive/http" 29 | "github.com/skydive-project/skydive/logging" 30 | ) 31 | 32 | const ( 33 | Namespace = "Flow" 34 | ) 35 | 36 | type TableServer struct { 37 | shttp.DefaultWSClientEventHandler 38 | WSAsyncClientPool *shttp.WSAsyncClientPool 39 | TableAllocator *TableAllocator 40 | } 41 | 42 | func (s *TableServer) OnTableQuery(c *shttp.WSAsyncClient, msg shttp.WSMessage) { 43 | var query TableQuery 44 | if err := json.Unmarshal([]byte(*msg.Obj), &query); err != nil { 45 | logging.GetLogger().Errorf("Unable to decode search flow message %v", msg) 46 | return 47 | } 48 | 49 | result := s.TableAllocator.QueryTable(&query) 50 | reply := msg.Reply(result, "TableResult", result.status) 51 | c.SendWSMessage(reply) 52 | } 53 | 54 | func (s *TableServer) OnMessage(c *shttp.WSAsyncClient, msg shttp.WSMessage) { 55 | if msg.Namespace != Namespace { 56 | return 57 | } 58 | 59 | switch msg.Type { 60 | case "TableQuery": 61 | s.OnTableQuery(c, msg) 62 | } 63 | } 64 | 65 | func NewServer(allocator *TableAllocator, wspool *shttp.WSAsyncClientPool) *TableServer { 66 | s := &TableServer{ 67 | TableAllocator: allocator, 68 | WSAsyncClientPool: wspool, 69 | } 70 | wspool.AddEventHandler(s) 71 | 72 | return s 73 | } 74 | -------------------------------------------------------------------------------- /flow/set.proto: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | syntax = "proto3"; 24 | 25 | package flow; 26 | 27 | import "flow/flow.proto"; 28 | 29 | message FlowSet { 30 | repeated Flow Flows = 1; 31 | int64 Start = 2; 32 | int64 End = 3; 33 | } 34 | -------------------------------------------------------------------------------- /flow/storage/storage.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2015 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package storage 24 | 25 | import ( 26 | "errors" 27 | "fmt" 28 | 29 | "github.com/skydive-project/skydive/config" 30 | "github.com/skydive-project/skydive/filters" 31 | "github.com/skydive-project/skydive/flow" 32 | "github.com/skydive-project/skydive/flow/storage/elasticsearch" 33 | "github.com/skydive-project/skydive/flow/storage/orientdb" 34 | "github.com/skydive-project/skydive/logging" 35 | ) 36 | 37 | var ( 38 | NoStorageConfigured error = errors.New("No storage backend has been configured") 39 | ) 40 | 41 | type Storage interface { 42 | Start() 43 | StoreFlows(flows []*flow.Flow) error 44 | SearchFlows(fsq filters.SearchQuery) (*flow.FlowSet, error) 45 | SearchMetrics(fsq filters.SearchQuery, metricFilter *filters.Filter) (map[string][]*flow.FlowMetric, error) 46 | Stop() 47 | } 48 | 49 | func NewStorage(backend string) (s Storage, err error) { 50 | switch backend { 51 | case "elasticsearch": 52 | s, err = elasticsearch.New() 53 | if err != nil { 54 | logging.GetLogger().Fatalf("Can't connect to ElasticSearch server: %v", err) 55 | } 56 | case "orientdb": 57 | s, err = orientdb.New() 58 | if err != nil { 59 | logging.GetLogger().Fatalf("Can't connect to OrientDB server: %v", err) 60 | } 61 | case "": 62 | logging.GetLogger().Infof("Using no storage") 63 | return 64 | default: 65 | err = fmt.Errorf("Storage type unknown: %s", backend) 66 | logging.GetLogger().Fatalf(err.Error()) 67 | return 68 | } 69 | 70 | logging.GetLogger().Infof("Using %s as storage", backend) 71 | return 72 | } 73 | 74 | func NewStorageFromConfig() (s Storage, err error) { 75 | return NewStorage(config.GetConfig().GetString("analyzer.storage")) 76 | } 77 | -------------------------------------------------------------------------------- /flow/traversal/traversal_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package traversal 24 | 25 | import ( 26 | "reflect" 27 | "testing" 28 | 29 | "github.com/skydive-project/skydive/flow" 30 | ) 31 | 32 | func TestFlowMetricsAggregates(t *testing.T) { 33 | step := MetricsTraversalStep{ 34 | metrics: map[string][]*flow.FlowMetric{ 35 | "aa": { 36 | { 37 | ABBytes: 1, 38 | ABPackets: 1, 39 | BABytes: 1, 40 | BAPackets: 1, 41 | Start: 10, 42 | Last: 20, 43 | }, 44 | { 45 | ABBytes: 2, 46 | ABPackets: 2, 47 | BABytes: 2, 48 | BAPackets: 2, 49 | Start: 20, 50 | Last: 30, 51 | }, 52 | }, 53 | "bb": { 54 | { 55 | ABBytes: 4, 56 | ABPackets: 4, 57 | BABytes: 4, 58 | BAPackets: 4, 59 | Start: 15, 60 | Last: 25, 61 | }, 62 | { 63 | ABBytes: 8, 64 | ABPackets: 8, 65 | BABytes: 8, 66 | BAPackets: 8, 67 | Start: 40, 68 | Last: 50, 69 | }, 70 | }, 71 | "cc": { 72 | { 73 | ABBytes: 16, 74 | ABPackets: 16, 75 | BABytes: 16, 76 | BAPackets: 16, 77 | Start: 48, 78 | Last: 58, 79 | }, 80 | }, 81 | }, 82 | } 83 | 84 | expected := MetricsTraversalStep{ 85 | metrics: map[string][]*flow.FlowMetric{ 86 | "Aggregated": { 87 | { 88 | ABBytes: 5, 89 | ABPackets: 5, 90 | BABytes: 5, 91 | BAPackets: 5, 92 | Start: 10, 93 | Last: 20, 94 | }, 95 | { 96 | ABBytes: 2, 97 | ABPackets: 2, 98 | BABytes: 2, 99 | BAPackets: 2, 100 | Start: 20, 101 | Last: 30, 102 | }, 103 | { 104 | ABBytes: 24, 105 | ABPackets: 24, 106 | BABytes: 24, 107 | BAPackets: 24, 108 | Start: 40, 109 | Last: 50, 110 | }, 111 | }, 112 | }, 113 | } 114 | 115 | got := step.Aggregates() 116 | 117 | if !reflect.DeepEqual(expected.Values(), got.Values()) { 118 | e, _ := expected.MarshalJSON() 119 | g, _ := got.MarshalJSON() 120 | t.Errorf("Metrics mismatch, expected: \n\n%s\n\ngot: \n\n%s", string(e), string(g)) 121 | } 122 | } 123 | -------------------------------------------------------------------------------- /http/basic.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package http 24 | 25 | import ( 26 | "encoding/base64" 27 | "net/http" 28 | "os" 29 | 30 | "github.com/abbot/go-http-auth" 31 | "github.com/gorilla/context" 32 | "github.com/skydive-project/skydive/config" 33 | ) 34 | 35 | const ( 36 | basicAuthRealm string = "Skydive Authentication" 37 | ) 38 | 39 | type BasicAuthenticationBackend struct { 40 | *auth.BasicAuth 41 | } 42 | 43 | func (b *BasicAuthenticationBackend) Authenticate(username string, password string) (string, error) { 44 | request := &http.Request{Header: make(map[string][]string)} 45 | creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) 46 | request.Header.Set("Authorization", "Basic "+creds) 47 | 48 | if username := b.CheckAuth(request); username == "" { 49 | return "", WrongCredentials 50 | } 51 | 52 | return creds, nil 53 | } 54 | 55 | func (b *BasicAuthenticationBackend) Wrap(wrapped auth.AuthenticatedHandlerFunc) http.HandlerFunc { 56 | return func(w http.ResponseWriter, r *http.Request) { 57 | cookie, err := r.Cookie("authtok") 58 | if err == nil { 59 | r.Header.Set("Authorization", "Basic "+cookie.Value) 60 | } 61 | 62 | if username := b.CheckAuth(r); username == "" { 63 | unauthorized(w, r) 64 | } else { 65 | ar := &auth.AuthenticatedRequest{Request: *r, Username: username} 66 | copyRequestVars(r, &ar.Request) 67 | wrapped(w, ar) 68 | context.Clear(&ar.Request) 69 | } 70 | } 71 | } 72 | 73 | func NewBasicAuthenticationBackend(file string) (*BasicAuthenticationBackend, error) { 74 | if _, err := os.Stat(file); err != nil { 75 | return nil, err 76 | } 77 | 78 | // TODO(safchain) add more providers 79 | h := auth.HtpasswdFileProvider(file) 80 | return &BasicAuthenticationBackend{ 81 | auth.NewBasicAuthenticator(basicAuthRealm, h), 82 | }, nil 83 | } 84 | 85 | func NewBasicAuthenticationBackendFromConfig() (*BasicAuthenticationBackend, error) { 86 | f := config.GetConfig().GetString("auth.basic.file") 87 | return NewBasicAuthenticationBackend(f) 88 | } 89 | -------------------------------------------------------------------------------- /http/noauth.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package http 24 | 25 | import ( 26 | "net/http" 27 | 28 | "github.com/abbot/go-http-auth" 29 | "github.com/gorilla/context" 30 | ) 31 | 32 | type NoAuthenticationBackend struct { 33 | } 34 | 35 | func (h *NoAuthenticationBackend) Authenticate(username string, password string) (string, error) { 36 | return "", nil 37 | } 38 | 39 | func (h *NoAuthenticationBackend) Wrap(wrapped auth.AuthenticatedHandlerFunc) http.HandlerFunc { 40 | return func(w http.ResponseWriter, r *http.Request) { 41 | ar := &auth.AuthenticatedRequest{Request: *r, Username: ""} 42 | copyRequestVars(r, &ar.Request) 43 | wrapped(w, ar) 44 | context.Clear(&ar.Request) 45 | } 46 | } 47 | 48 | func NewNoAuthenticationBackend() *NoAuthenticationBackend { 49 | return &NoAuthenticationBackend{} 50 | } 51 | -------------------------------------------------------------------------------- /probe/bundle.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package probe 24 | 25 | type Probe interface { 26 | Start() 27 | Stop() 28 | } 29 | 30 | type ProbeBundle struct { 31 | Probes map[string]Probe 32 | } 33 | 34 | func (p *ProbeBundle) Start() { 35 | for _, p := range p.Probes { 36 | p.Start() 37 | } 38 | } 39 | 40 | func (p *ProbeBundle) Stop() { 41 | for _, p := range p.Probes { 42 | p.Stop() 43 | } 44 | } 45 | 46 | func (p *ProbeBundle) GetProbe(k string) Probe { 47 | if probe, ok := p.Probes[k]; ok { 48 | return probe 49 | } 50 | return nil 51 | } 52 | 53 | func NewProbeBundle(p map[string]Probe) *ProbeBundle { 54 | return &ProbeBundle{ 55 | Probes: p, 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /scripts/ci/create-release.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Must be provided by Jenkins credentials plugin: 4 | # GITHUB_USERNAME 5 | # GITHUB_TOKEN 6 | # DOCKER_PASSWORD 7 | # COPR_LOGIN 8 | # COPR_TOKEN 9 | 10 | if [ -z "$GITHUB_USERNAME" ] || [ -z "$GITHUB_TOKEN" ] || 11 | [ -z "$DOCKER_PASSWORD" ] || [ -z "$COPR_LOGIN" ] || 12 | [ -z "$COPR_TOKEN" ] 13 | then 14 | echo "The environment variables GITHUB_USERNAME, GITHUB_TOKEN, DOCKER_PASSWORD, COPR_LOGIN and COPR_TOKEN need to be defined" 15 | exit 1 16 | fi 17 | 18 | set -v 19 | 20 | export TAG=`echo $ZUUL_REF | awk -F '/' '{print $NF}'` 21 | export VERSION=`echo $TAG | tr -d [a-z]` 22 | export DOCKER_IMAGE=skydive/skydive 23 | export DOCKER_EMAIL=skydivesoftware@gmail.com 24 | export DOCKER_USERNAME=skydiveproject 25 | export DOCKER_TAG=$VERSION 26 | export COPR_USERNAME=skydive 27 | 28 | dir="$(dirname "$0")" 29 | 30 | . "${dir}/install-go.sh" 31 | . "${dir}/install-requirements.sh" 32 | . "${dir}/install-static-requirements.sh" 33 | 34 | cd ${GOPATH}/src/github.com/skydive-project/skydive 35 | 36 | echo "--- DOCKER IMAGE ---" 37 | make docker-image DOCKER_IMAGE=${DOCKER_IMAGE} DOCKER_TAG=${DOCKER_TAG} 38 | sudo docker login -e "${DOCKER_EMAIL}" -u "${DOCKER_USERNAME}" -p "${DOCKER_PASSWORD}" 39 | sudo docker tag ${DOCKER_IMAGE}:${DOCKER_TAG} ${DOCKER_IMAGE}:latest 40 | sudo docker push ${DOCKER_IMAGE}:${DOCKER_TAG} 41 | sudo docker push ${DOCKER_IMAGE}:latest 42 | 43 | echo "--- COPR ---" 44 | sudo dnf -y install copr-cli rpm-build 45 | mkdir -p ~/.config 46 | cat > ~/.config/copr < " % (sys.argv[0],)) 7 | sys.exit(1) 8 | 9 | changelog = open(sys.argv[1]).readlines() 10 | version = sys.argv[2] 11 | 12 | start = 0 13 | end = -1 14 | 15 | for i, line in enumerate(changelog): 16 | if line.startswith("## "): 17 | if line.startswith("## [%s]" % (version,)) and start == 0: 18 | start = i 19 | elif start != 0: 20 | end = i 21 | break 22 | 23 | if start != 0: 24 | print ''.join(changelog[start+1:end]).strip() 25 | -------------------------------------------------------------------------------- /scripts/ci/install-elasticsearch.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -v 4 | 5 | sudo rpm --import https://packages.elastic.co/GPG-KEY-elasticsearch 6 | sudo tee /etc/yum.repos.d/elasticsearch.repo < vendor.fetch.list 34 | cat vendor.fetch.list | xargs tar -xvzf /tmp/vendor.tgz --exclude "vendor/vendor.json" 35 | # remove installed 36 | find vendor/ -mindepth 2 -type f | xargs dirname | sort -u > vendor.installed.list 37 | echo "package to be removed/cleanup" 38 | diff -u vendor.fetch.list vendor.installed.list | grep '^\+v' | perl -pe 's|^\+(.*)|\1|' | tee /dev/stdout | xargs -n 1 rm -rf 39 | rm -f vendor.fetch.list vendor.installed.list 40 | popd 41 | -------------------------------------------------------------------------------- /scripts/ci/install-orientdb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -v 4 | 5 | # Install OrientDB server 6 | cd ${HOME} 7 | curl -s -L "http://orientdb.com/download.php?email=unknown@unknown.com&file=orientdb-community-2.2.10.tar.gz&os=linux" > orientdb-server.tar.gz 8 | tar xf orientdb-server.tar.gz 9 | export ORIENTDBPATH=${HOME}/orientdb-community-2.2.10 10 | -------------------------------------------------------------------------------- /scripts/ci/install-requirements.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -v 4 | 5 | # Install requirements 6 | sudo yum install -y https://www.rdoproject.org/repos/rdo-release.rpm 7 | sudo yum -y install make openvswitch unzip docker libpcap-devel etcd libxml2-devel jq 8 | sudo service docker start 9 | sudo service openvswitch start 10 | sudo service etcd start 11 | 12 | rpm -qi openvswitch 13 | 14 | mkdir ${HOME}/protoc 15 | pushd ${HOME}/protoc 16 | wget https://github.com/google/protobuf/releases/download/v3.1.0/protoc-3.1.0-linux-x86_64.zip 17 | unzip protoc-3.1.0-linux-x86_64.zip 18 | popd 19 | export PATH=${HOME}/protoc/bin:${PATH} 20 | -------------------------------------------------------------------------------- /scripts/ci/install-static-requirements.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -v 4 | 5 | sudo dnf -y install gcc glibc-static xz-static zlib-static flex bison byacc libxml2-static 6 | 7 | git clone https://github.com/the-tcpdump-group/libpcap.git 8 | cd libpcap 9 | git checkout libpcap-1.5.3 10 | ./configure --prefix=/usr/local --disable-shared --disable-dbus --disable-bluetooth --disable-canusb 11 | make 12 | sudo make install 13 | cd .. 14 | -------------------------------------------------------------------------------- /scripts/ci/run-devstack.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -v 4 | 5 | git checkout -b software-factory 6 | SKYDIVE_PATH=`pwd` 7 | 8 | sudo yum -y install git iproute net-tools 9 | git clone https://git.openstack.org/openstack-dev/devstack devstack.git 10 | cd devstack.git 11 | 12 | export PATH=$PATH:/usr/sbin 13 | host_ip_iface=${host_ip_iface:-$(ip -f inet route | awk '/default/ {print $5}' | head -1)} 14 | host_ips=$(LC_ALL=C ip -f inet addr show ${host_ip_iface} | sed /temporary/d |awk /inet'/ {split($2,parts,"/"); print parts[1]}') 15 | echo "host_ip_iface=$host_ip_iface" 16 | echo "host_ips=$host_ips" 17 | 18 | cat << EOF > local.conf 19 | [[local|localrc]] 20 | 21 | DATABASE_PASSWORD=password 22 | RABBIT_PASSWORD=password 23 | SERVICE_PASSWORD=password 24 | SERVICE_TOKEN=password 25 | ADMIN_PASSWORD=password 26 | 27 | HOST_IP=$host_ips 28 | HOST_IP_IFACE=eth0 29 | 30 | # Disable glance 31 | disable_service g-api 32 | disable_service g-reg 33 | 34 | # Disable nova 35 | disable_service n-api 36 | disable_service n-crt 37 | disable_service n-cpu 38 | disable_service n-net 39 | disable_service n-cond 40 | disable_service n-sch 41 | disable_service n-cauth 42 | 43 | # Enable Neutron 44 | enable_service q-svc 45 | enable_service q-dhcp 46 | enable_service q-meta 47 | enable_service q-agt 48 | enable_service q-l3 49 | 50 | # Disable tempest 51 | disable_service tempest 52 | 53 | # Disable cinder 54 | disable_service c-sch 55 | disable_service c-api 56 | disable_service c-vol 57 | 58 | # Do not use horizon 59 | disable_service horizon 60 | 61 | ENABLE_ISOLATED_METADATA=True 62 | 63 | # Skydive 64 | enable_plugin skydive file://$SKYDIVE_PATH software-factory 65 | enable_service skydive-analyzer skydive-agent 66 | 67 | SKYDIVE_ANALYZER_LISTEN=0.0.0.0:8082 68 | SKYDIVE_AGENT_LISTEN=0.0.0.0:8081 69 | EOF 70 | 71 | ./stack.sh 72 | 73 | set -e 74 | 75 | source openrc admin admin 76 | export PATH=$PATH:/opt/go/bin:/opt/stack/go/bin:/opt/stack/protoc/bin 77 | export GOROOT=/opt/go 78 | export GOPATH=/opt/stack/go 79 | export GO_VERSION=1.6 80 | cd /opt/stack/go/src/github.com/skydive-project/skydive/ 81 | SKYDIVE_ANALYZERS=localhost:8082 make test.functionals TAGS="neutron" VERBOSE=true TIMEOUT=2m TEST_PATTERN=Neutron 82 | -------------------------------------------------------------------------------- /scripts/ci/run-functional-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -v 4 | 5 | dir="$(dirname "$0")" 6 | 7 | . "${dir}/install-requirements.sh" 8 | . "${dir}/install-go.sh" 9 | 10 | GOFLAGS="-race" 11 | 12 | case "$BACKEND" in 13 | "orientdb") 14 | . "${dir}/install-orientdb.sh" 15 | cd ${ORIENTDBPATH} 16 | export ORIENTDB_ROOT_PASSWORD=root 17 | ${ORIENTDBPATH}/bin/server.sh & 18 | sleep 5 19 | ARGS="-graph.backend orientdb -storage.backend orientdb" 20 | GOFLAGS="$GOFLAGS" 21 | TAGS="storage" 22 | ;; 23 | "elasticsearch") 24 | . "${dir}/install-elasticsearch.sh" 25 | ARGS="-graph.backend elasticsearch -storage.backend elasticsearch" 26 | GOFLAGS="$GOFLAGS" 27 | TAGS="storage" 28 | ;; 29 | esac 30 | 31 | set -e 32 | cd ${GOPATH}/src/github.com/skydive-project/skydive 33 | make test.functionals TAGS="$TAGS" GOFLAGS="$GOFLAGS" GORACE="history_size=5" VERBOSE=true TIMEOUT=2m ARGS="$ARGS -etcd.server http://localhost:2379" 34 | -------------------------------------------------------------------------------- /scripts/ci/run-go-fmt.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -v 4 | dir="$(dirname "$0")" 5 | 6 | . "${dir}/install-requirements.sh" 7 | . "${dir}/install-go.sh" 8 | 9 | cd ${GOPATH}/src/github.com/skydive-project/skydive 10 | 11 | make fmt 12 | make vet 13 | 14 | # check if unused package are listed in the vendor directory 15 | if [ -n "$(${GOPATH}/bin/govendor list +u)" ]; then 16 | echo "You must remove these usused packages :" 17 | ${GOPATH}/bin/govendor list +u 18 | exit 1 19 | fi 20 | -------------------------------------------------------------------------------- /scripts/ci/run-unit-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -v 4 | dir="$(dirname "$0")" 5 | 6 | . "${dir}/install-requirements.sh" 7 | . "${dir}/install-go.sh" 8 | 9 | set -e 10 | cd ${GOPATH}/src/github.com/skydive-project/skydive 11 | make test GOFLAGS=-race VERBOSE=true TIMEOUT=1m 12 | -------------------------------------------------------------------------------- /scripts/multinode.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # create/delete a test topology 4 | # syntax: 5 | # ./multinode.sh start [tunnel endpoint ip] 6 | # ./multinode.sh stop 7 | # 8 | # options: 9 | # tunnel endpoint ip can be used in a mutlinodes environnement 10 | 11 | 12 | function start() { 13 | set -x 14 | 15 | sudo ovs-vsctl add-br br-int 16 | 17 | sudo ip netns add vm1 18 | sudo ip link add vm1-eth0 type veth peer name eth0 netns vm1 19 | sudo ip link set vm1-eth0 up 20 | 21 | sudo ip netns exec vm1 ip link set eth0 up 22 | sudo ip netns exec vm1 ip address add $1 dev eth0 23 | 24 | sudo ovs-vsctl add-port br-int vm1-eth0 25 | 26 | if [ ! -z "$2" ]; then 27 | sudo ovs-vsctl add-br br-tun 28 | sudo ovs-vsctl add-port br-int patch-int -- set interface patch-int type=patch 29 | sudo ovs-vsctl add-port br-tun patch-tun -- set interface patch-tun type=patch 30 | sudo ovs-vsctl set interface patch-int option:peer=patch-tun 31 | sudo ovs-vsctl set interface patch-tun option:peer=patch-int 32 | sudo ovs-vsctl add-port br-tun mn-gre0 -- set interface mn-gre0 type=gre options:remote_ip=$2 33 | fi 34 | } 35 | 36 | function stop() { 37 | set -x 38 | 39 | sudo ovs-vsctl del-br br-tun 40 | sudo ovs-vsctl del-br br-int 41 | sudo ip link del vm1-eth0 42 | sudo ip netns del vm1 43 | } 44 | 45 | 46 | if [ "$1" == "start" ]; then 47 | if [ -z "$2" ]; then 48 | echo "Usage: $0 start [tunnel endpoint ip]" 49 | exit 1 50 | fi 51 | 52 | start $2 $3 53 | else 54 | stop 55 | fi 56 | -------------------------------------------------------------------------------- /scripts/simple.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # create/delete a test topology 4 | # syntax: 5 | # ./simple.sh start [ns2 ip/mask] 6 | # ./simple.sh stop 7 | # 8 | # options: 9 | # ns2 ip/mask: create a second ns 10 | 11 | 12 | function start() { 13 | set -x 14 | 15 | sudo ovs-vsctl add-br br-int 16 | 17 | sudo ip netns add vm1 18 | sudo ip link add vm1-eth0 type veth peer name eth0 netns vm1 19 | sudo ip link set vm1-eth0 up 20 | 21 | sudo ip netns exec vm1 ip link set eth0 up 22 | sudo ip netns exec vm1 ip address add $1 dev eth0 23 | 24 | sudo ovs-vsctl add-port br-int vm1-eth0 25 | 26 | if [ ! -z "$2" ]; then 27 | sudo ip netns add vm2 28 | sudo ip link add vm2-eth0 type veth peer name eth0 netns vm2 29 | sudo ip link set vm2-eth0 up 30 | 31 | sudo ip netns exec vm2 ip link set eth0 up 32 | sudo ip netns exec vm2 ip address add $2 dev eth0 33 | 34 | sudo ovs-vsctl add-port br-int vm2-eth0 35 | fi 36 | } 37 | 38 | function stop() { 39 | set -x 40 | 41 | sudo ovs-vsctl del-br br-int 42 | sudo ip link del vm1-eth0 43 | sudo ip netns del vm1 44 | sudo ip link del vm2-eth0 45 | sudo ip netns del vm2 46 | } 47 | 48 | 49 | if [ "$1" == "start" ]; then 50 | if [ -z "$2" ]; then 51 | echo "Usage: $0 start [ns2 ip/mask]" 52 | exit 1 53 | fi 54 | 55 | start $2 $3 56 | else 57 | stop 58 | fi 59 | -------------------------------------------------------------------------------- /scripts/topology.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function _sudo() 4 | { 5 | echo -n \$ $* 6 | read 7 | sudo $* 8 | } 9 | 10 | 11 | _sudo ip netns add vm1 12 | _sudo ip netns add vm2 13 | _sudo ip l add vm1-eth0 type veth peer name eth0 netns vm1 14 | _sudo ip l set vm1-eth0 up 15 | _sudo ip l add vm2-eth0 type veth peer name eth0 netns vm2 16 | _sudo ip l set vm2-eth0 up 17 | _sudo ip netns exec vm1 ip l set eth0 up 18 | _sudo ip netns exec vm1 ip a add 192.168.0.1/24 dev eth0 19 | _sudo ip netns exec vm2 ip l set eth0 up 20 | _sudo ip netns exec vm2 ip a add 192.168.0.2/24 dev eth0 21 | _sudo ovs-vsctl add-br br1 22 | _sudo ovs-vsctl add-br br2 23 | _sudo ovs-vsctl add-port br1 vm1-eth0 24 | _sudo ovs-vsctl add-port br2 vm2-eth0 25 | _sudo ip l add vm2-eth1 type veth peer name eth1 netns vm2 26 | _sudo ip l add vm1-eth1 type veth peer name eth1 netns vm1 27 | _sudo brctl addbr lb1 28 | _sudo brctl addif lb1 vm1-eth1 29 | _sudo brctl addif lb1 vm2-eth1 30 | _sudo ovs-vsctl add-port br1 patch-br2 -- set interface patch-br2 type=patch 31 | _sudo ovs-vsctl add-port br2 patch-br1 -- set interface patch-br1 type=patch 32 | _sudo ovs-vsctl set interface patch-br1 option:peer=patch-br2 33 | _sudo ovs-vsctl set interface patch-br2 option:peer=patch-br1 34 | _sudo ovs-vsctl add-br br3 35 | _sudo ovs-vsctl add-port br3 int -- set interface int type=internal 36 | _sudo ip l set int netns vm1 37 | 38 | _sudo ovs-vsctl show 39 | _sudo brctl show 40 | 41 | _sudo ovs-vsctl del-br br1 42 | _sudo ovs-vsctl del-br br2 43 | _sudo ovs-vsctl del-br br3 44 | _sudo ip netns del vm1 45 | _sudo ip netns del vm2 46 | _sudo ip l del lb1 47 | _sudo ip l del vm1-eth0 48 | _sudo ip l del vm1-eth1 49 | _sudo ip l del vm2-eth0 50 | _sudo ip l del vm2-eth1 51 | -------------------------------------------------------------------------------- /scripts/tunnel.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # create/delete a test topology 4 | # syntax: 5 | # ./tunnel.sh start gre|vxlan|geneve 6 | # ./tunnel.sh stop gre|vxlan|geneve 7 | 8 | function start() { 9 | set -x 10 | 11 | sudo ovs-vsctl add-br br-${1} 12 | 13 | sudo ip netns add vm1 14 | sudo ip link add vm1-eth0 type veth peer name eth0 netns vm1 15 | sudo ip link set vm1-eth0 up 16 | 17 | sudo ip netns exec vm1 ip link set eth0 up 18 | sudo ip netns exec vm1 ip address add 172.16.0.1/24 dev eth0 19 | 20 | sudo ip netns add vm2 21 | sudo ip link add vm2-eth0 type veth peer name eth0 netns vm2 22 | sudo ip link set vm2-eth0 up 23 | 24 | sudo ip netns exec vm2 ip link set eth0 up 25 | sudo ip netns exec vm2 ip address add 172.16.0.2/24 dev eth0 26 | 27 | sudo ovs-vsctl add-port br-${1} vm1-eth0 28 | sudo ovs-vsctl add-port br-${1} vm2-eth0 29 | 30 | 31 | if [ "$1" == "gre" ]; then 32 | sudo ip netns exec vm1 ip tunnel add ${1} mode gre remote 172.16.0.2 local 172.16.0.1 ttl 255 33 | elif [ "$1" == "geneve" ]; then 34 | sudo ip netns exec vm1 ip link add ${1} type geneve id 10 remote 172.16.0.2 35 | else 36 | sudo ip netns exec vm1 ip link add ${1} type vxlan id 10 group 239.0.0.10 ttl 10 dev eth0 dstport 4789 37 | fi 38 | sudo ip netns exec vm1 ip l set ${1} up 39 | sudo ip netns exec vm1 ip link add name in type dummy 40 | sudo ip netns exec vm1 ip l set in up 41 | sudo ip netns exec vm1 ip a add 192.168.0.1/32 dev in 42 | sudo ip netns exec vm1 ip r add 192.168.0.0/24 dev ${1} 43 | 44 | if [ "$1" == "gre" ]; then 45 | sudo ip netns exec vm2 ip tunnel add ${1} mode gre remote 172.16.0.1 local 172.16.0.2 ttl 255 46 | elif [ "$1" == "geneve" ]; then 47 | sudo ip netns exec vm2 ip link add ${1} type geneve id 10 remote 172.16.0.1 48 | else 49 | sudo ip netns exec vm2 ip link add ${1} type vxlan id 10 group 239.0.0.10 ttl 10 dev eth0 dstport 4789 50 | fi 51 | sudo ip netns exec vm2 ip l set ${1} up 52 | sudo ip netns exec vm2 ip link add name in type dummy 53 | sudo ip netns exec vm2 ip l set in up 54 | sudo ip netns exec vm2 ip a add 192.168.0.2/32 dev in 55 | sudo ip netns exec vm2 ip r add 192.168.0.0/24 dev ${1} 56 | } 57 | 58 | function stop() { 59 | set -x 60 | 61 | sudo ovs-vsctl del-br br-${1} 62 | 63 | sudo ip link del vm1-eth0 64 | sudo ip netns del vm1 65 | sudo ip link del vm2-eth0 66 | sudo ip netns del vm2 67 | } 68 | 69 | if [ "$2" != "gre" ] && [ "$2" != "vxlan" ] && [ "$2" != "geneve" ] 70 | then 71 | echo -n "Second argument must be 'gre' or 'vxlan'. Exiting." 72 | exit 1 73 | fi 74 | 75 | if [ "$1" == "start" ]; then 76 | start $2 77 | else 78 | stop $2 79 | fi 80 | -------------------------------------------------------------------------------- /skydive.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package main 24 | 25 | import "github.com/skydive-project/skydive/cmd" 26 | 27 | func main() { 28 | cmd.RootCmd.Execute() 29 | } 30 | -------------------------------------------------------------------------------- /statics/css/images/ui-bg_flat_0_aaaaaa_40x100.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/statics/css/images/ui-bg_flat_0_aaaaaa_40x100.png -------------------------------------------------------------------------------- /statics/css/images/ui-bg_flat_75_ffffff_40x100.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/statics/css/images/ui-bg_flat_75_ffffff_40x100.png -------------------------------------------------------------------------------- /statics/css/images/ui-bg_glass_55_fbf9ee_1x400.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/statics/css/images/ui-bg_glass_55_fbf9ee_1x400.png -------------------------------------------------------------------------------- /statics/css/images/ui-bg_glass_65_ffffff_1x400.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/statics/css/images/ui-bg_glass_65_ffffff_1x400.png -------------------------------------------------------------------------------- /statics/css/images/ui-bg_glass_75_dadada_1x400.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/statics/css/images/ui-bg_glass_75_dadada_1x400.png -------------------------------------------------------------------------------- /statics/css/images/ui-bg_glass_75_e6e6e6_1x400.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/statics/css/images/ui-bg_glass_75_e6e6e6_1x400.png -------------------------------------------------------------------------------- /statics/css/images/ui-bg_glass_95_fef1ec_1x400.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/statics/css/images/ui-bg_glass_95_fef1ec_1x400.png -------------------------------------------------------------------------------- /statics/css/images/ui-bg_highlight-soft_75_cccccc_1x100.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/statics/css/images/ui-bg_highlight-soft_75_cccccc_1x100.png -------------------------------------------------------------------------------- /statics/css/images/ui-icons_222222_256x240.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/statics/css/images/ui-icons_222222_256x240.png -------------------------------------------------------------------------------- /statics/css/images/ui-icons_2e83ff_256x240.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/statics/css/images/ui-icons_2e83ff_256x240.png -------------------------------------------------------------------------------- /statics/css/images/ui-icons_454545_256x240.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/statics/css/images/ui-icons_454545_256x240.png -------------------------------------------------------------------------------- /statics/css/images/ui-icons_888888_256x240.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/statics/css/images/ui-icons_888888_256x240.png -------------------------------------------------------------------------------- /statics/css/images/ui-icons_cd0a0a_256x240.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/statics/css/images/ui-icons_cd0a0a_256x240.png -------------------------------------------------------------------------------- /statics/css/jquery.jsonview.css: -------------------------------------------------------------------------------- 1 | @charset "UTF-8"; 2 | .jsonview { 3 | font-family: monospace; 4 | font-size: 1.1em; 5 | white-space: pre-wrap; } 6 | .jsonview .prop { 7 | font-weight: bold; } 8 | .jsonview .null { 9 | color: red; } 10 | .jsonview .bool { 11 | color: blue; } 12 | .jsonview .num { 13 | color: blue; } 14 | .jsonview .string { 15 | color: green; 16 | white-space: pre-wrap; } 17 | .jsonview .string.multiline { 18 | display: inline-block; 19 | vertical-align: text-top; } 20 | .jsonview .collapser { 21 | position: absolute; 22 | left: -1em; 23 | cursor: pointer; } 24 | .jsonview .collapsible { 25 | transition: height 1.2s; 26 | transition: width 1.2s; } 27 | .jsonview .collapsible.collapsed { 28 | height: .8em; 29 | width: 1em; 30 | display: inline-block; 31 | overflow: hidden; 32 | margin: 0; } 33 | .jsonview .collapsible.collapsed:before { 34 | content: "…"; 35 | width: 1em; 36 | margin-left: .2em; } 37 | .jsonview .collapser.collapsed { 38 | transform: rotate(0deg); } 39 | .jsonview .q { 40 | display: inline-block; 41 | width: 0px; 42 | color: transparent; } 43 | .jsonview li { 44 | position: relative; } 45 | .jsonview ul { 46 | list-style: none; 47 | margin: 0 0 0 2em; 48 | padding: 0; } 49 | .jsonview h1 { 50 | font-size: 1.2em; } 51 | 52 | /*# sourceMappingURL=jquery.jsonview.css.map */ 53 | -------------------------------------------------------------------------------- /statics/css/timeslider.css: -------------------------------------------------------------------------------- 1 | .axis { 2 | fill: gray; 3 | -webkit-user-select: none; 4 | -moz-user-select: none; 5 | user-select: none; 6 | } 7 | 8 | .axis .halo { 9 | stroke: gray; 10 | stroke-width: 2px; 11 | stroke-linecap: round; 12 | } 13 | 14 | .slider .handle path { 15 | stroke: white; 16 | stroke-width: 2px; 17 | stroke-linecap: round; 18 | pointer-events: none; 19 | } 20 | 21 | .disabled .slider .handle path { 22 | stroke: #333; 23 | } 24 | 25 | .slider .handle text { 26 | fill: white; 27 | text-align: center; 28 | font-size: 12px; 29 | } 30 | 31 | .disabled .slider .handle text { 32 | fill: #333; 33 | } 34 | 35 | .slider .tick text { 36 | fill: yellowgreen;; 37 | } 38 | 39 | .disabled .slider .tick text { 40 | fill: #333; 41 | } 42 | 43 | .topology-d3 { 44 | clear: both; 45 | } 46 | 47 | .timeslider { 48 | text-align: center; 49 | margin: auto; 50 | display: table; 51 | width: auto; 52 | padding-left: 15px; 53 | padding-right: 25px; 54 | padding-bottom: 5px; 55 | padding-top: 5px; 56 | background-color: #222; 57 | } 58 | 59 | .timeslider .bootstrap-switch { 60 | margin-right: 15px; 61 | width: 50px; 62 | } 63 | 64 | .timeslider .bootstrap-switch .bootstrap-switch-label { 65 | color: white; 66 | } 67 | 68 | .timeslider-div svg { 69 | overflow: visible; 70 | } 71 | -------------------------------------------------------------------------------- /statics/fonts/fontawesome-webfont.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/statics/fonts/fontawesome-webfont.woff2 -------------------------------------------------------------------------------- /statics/fonts/glyphicons-halflings-regular.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/statics/fonts/glyphicons-halflings-regular.woff2 -------------------------------------------------------------------------------- /statics/img/bridge.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/statics/img/bridge.png -------------------------------------------------------------------------------- /statics/img/collapse.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/statics/img/collapse.gif -------------------------------------------------------------------------------- /statics/img/docker.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/statics/img/docker.png -------------------------------------------------------------------------------- /statics/img/expand.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/statics/img/expand.gif -------------------------------------------------------------------------------- /statics/img/host.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/statics/img/host.png -------------------------------------------------------------------------------- /statics/img/intf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/statics/img/intf.png -------------------------------------------------------------------------------- /statics/img/media-record.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/statics/img/media-record.png -------------------------------------------------------------------------------- /statics/img/minus-outline-16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/statics/img/minus-outline-16.png -------------------------------------------------------------------------------- /statics/img/ns.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/statics/img/ns.png -------------------------------------------------------------------------------- /statics/img/openstack.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/statics/img/openstack.png -------------------------------------------------------------------------------- /statics/img/pin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/statics/img/pin.png -------------------------------------------------------------------------------- /statics/img/plus-16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/statics/img/plus-16.png -------------------------------------------------------------------------------- /statics/img/port.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/statics/img/port.png -------------------------------------------------------------------------------- /statics/img/record.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/statics/img/record.png -------------------------------------------------------------------------------- /statics/img/record_red.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/statics/img/record_red.png -------------------------------------------------------------------------------- /statics/img/refresh.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/statics/img/refresh.png -------------------------------------------------------------------------------- /statics/img/skydive-logo-16x16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/statics/img/skydive-logo-16x16.png -------------------------------------------------------------------------------- /statics/img/switch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/statics/img/switch.png -------------------------------------------------------------------------------- /statics/img/trash.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/statics/img/trash.png -------------------------------------------------------------------------------- /statics/img/veth.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/statics/img/veth.png -------------------------------------------------------------------------------- /statics/js/capture.js: -------------------------------------------------------------------------------- 1 | var CaptureAPI = { 2 | 3 | list: function() { 4 | return $.ajax({ 5 | dataType: "json", 6 | url: '/api/capture', 7 | contentType: "application/json; charset=utf-8", 8 | method: 'GET', 9 | }) 10 | .fail(function(e) { 11 | $.notify({ 12 | message: 'Capture list error: ' + e.responseText 13 | }, { 14 | type: 'danger' 15 | }); 16 | }); 17 | }, 18 | 19 | create: function(query, name, description) { 20 | return $.ajax({ 21 | dataType: "json", 22 | url: '/api/capture', 23 | data: JSON.stringify({"GremlinQuery": query, 24 | "Name": name || null, 25 | "Description": description || null}), 26 | contentType: "application/json; charset=utf-8", 27 | method: 'POST', 28 | }) 29 | .then(function(data) { 30 | $.notify({ 31 | message: 'Capture created' 32 | },{ 33 | type: 'success' 34 | }); 35 | return data; 36 | }) 37 | .fail(function(e) { 38 | $.notify({ 39 | message: 'Capture create error: ' + e.responseText 40 | },{ 41 | type: 'danger' 42 | }); 43 | }); 44 | }, 45 | 46 | delete: function(uuid) { 47 | return $.ajax({ 48 | dataType: 'text', 49 | url: '/api/capture/' + uuid + '/', 50 | method: 'DELETE', 51 | }) 52 | .fail(function(e) { 53 | $.notify({ 54 | message: 'Capture delete error: ' + e.responseText 55 | },{ 56 | type: 'danger' 57 | }); 58 | }); 59 | } 60 | 61 | }; 62 | -------------------------------------------------------------------------------- /statics/js/components/node-selector.js: -------------------------------------------------------------------------------- 1 | Vue.component('node-selector', { 2 | 3 | props: { 4 | value: { 5 | type: String, 6 | required: true, 7 | }, 8 | placeholder: { 9 | type: String, 10 | }, 11 | attr: { 12 | type: String, 13 | default: "Metadata.TID" 14 | } 15 | }, 16 | 17 | template: '\ 18 |
    \ 19 | \ 24 | \ 25 |
    \ 26 | ', 27 | 28 | methods: { 29 | 30 | select: function() { 31 | var self = this; 32 | $(".topology-d3").off('click'); 33 | $(".topology-d3").on('click', function(e) { 34 | var value, node; 35 | if (! e.target.__data__) { 36 | return; 37 | } else { 38 | node = value = e.target.__data__; 39 | } 40 | 41 | self.attr.split(".").forEach(function(key) { 42 | if (! value[key]) { 43 | return; 44 | } else { 45 | value = value[key]; 46 | } 47 | }); 48 | 49 | self.$emit('input', value); 50 | self.$emit('selected', node); 51 | e.preventDefault(); 52 | $(".topology-d3").off('click'); 53 | }); 54 | } 55 | 56 | } 57 | 58 | }); 59 | -------------------------------------------------------------------------------- /statics/js/components/object-detail.js: -------------------------------------------------------------------------------- 1 | /* jshint multistr: true */ 2 | 3 | Vue.component('object-detail', { 4 | 5 | name: 'object-detail', 6 | 7 | props: { 8 | 9 | object: { 10 | type: Object, 11 | required: true, 12 | } 13 | 14 | }, 15 | 16 | template: '\ 17 |
    \ 18 |
    \ 19 |
    \ 20 | {{key}}\ 21 | \ 22 |
    \ 23 |
    \ 24 | {{key}} :\ 25 | {{value}}\ 26 |
    \ 27 |
    \ 28 |
    \ 29 | ', 30 | 31 | }); 32 | -------------------------------------------------------------------------------- /statics/js/components/tabs.js: -------------------------------------------------------------------------------- 1 | /* jshint multistr: true */ 2 | 3 | Vue.component('tab-pane', { 4 | 5 | props: ['title'], 6 | 7 | template: '\ 8 |
    \ 11 |
    \ 12 |
    \ 13 | \ 14 |
    \ 15 |
    \ 16 |
    \ 17 | ', 18 | 19 | computed: { 20 | 21 | index: function() { 22 | return this.$parent.panes.indexOf(this); 23 | }, 24 | 25 | selected: function() { 26 | return this.index === this.$parent.selected; 27 | } 28 | 29 | }, 30 | 31 | created: function() { 32 | this.$parent.addPane(this); 33 | }, 34 | 35 | beforeDestroy: function() { 36 | this.$parent.removePane(this); 37 | }, 38 | 39 | }); 40 | 41 | Vue.component('tabs', { 42 | 43 | template: '\ 44 |
    \ 45 | \ 50 |
    \ 51 | \ 52 |
    \ 53 |
    \ 54 | ', 55 | 56 | data: function() { 57 | return { 58 | panes: [], 59 | selected: 0 60 | }; 61 | }, 62 | 63 | methods: { 64 | 65 | select: function(index) { 66 | this.selected = index; 67 | }, 68 | 69 | addPane: function(pane) { 70 | this.panes.push(pane); 71 | }, 72 | 73 | removePane: function(pane) { 74 | this.panes.splice(this.panes.indexOf(this), 1); 75 | }, 76 | 77 | } 78 | 79 | }); 80 | -------------------------------------------------------------------------------- /statics/js/lscache.min.js: -------------------------------------------------------------------------------- 1 | !function(a,b){"function"==typeof define&&define.amd?define([],b):"undefined"!=typeof module&&module.exports?module.exports=b():a.lscache=b()}(this,function(){function a(){var a="__lscachetest__",c=a;if(void 0!==n)return n;try{if(!localStorage)return!1}catch(d){return!1}try{h(a,c),i(a),n=!0}catch(e){n=b(e)&&localStorage.length?!0:!1}return n}function b(a){return a&&"QUOTA_EXCEEDED_ERR"===a.name||"NS_ERROR_DOM_QUOTA_REACHED"===a.name||"QuotaExceededError"===a.name?!0:!1}function c(){return void 0===o&&(o=null!=window.JSON),o}function d(a){return a.replace(/[[\]{}()*+?.\\^$|]/g,"\\$&")}function e(a){return a+q}function f(){return Math.floor((new Date).getTime()/s)}function g(a){return localStorage.getItem(p+u+a)}function h(a,b){localStorage.removeItem(p+u+a),localStorage.setItem(p+u+a,b)}function i(a){localStorage.removeItem(p+u+a)}function j(a){for(var b=new RegExp("^"+p+d(u)+"(.*)"),c=localStorage.length-1;c>=0;--c){var f=localStorage.key(c);f=f&&f.match(b),f=f&&f[1],f&&f.indexOf(q)<0&&a(f,e(f))}}function k(a){var b=e(a);i(a),i(b)}function l(a){var b=e(a),c=g(b);if(c){var d=parseInt(c,r);if(f()>=d)return i(a),i(b),!0}}function m(a,b){v&&"console"in window&&"function"==typeof window.console.warn&&(window.console.warn("lscache - "+a),b&&window.console.warn("lscache - The error was: "+b.message))}var n,o,p="lscache-",q="-cacheexpiration",r=10,s=6e4,t=Math.floor(864e13/s),u="",v=!1,w={set:function(d,l,n){if(a()){if("string"!=typeof l){if(!c())return;try{l=JSON.stringify(l)}catch(o){return}}try{h(d,l)}catch(o){if(!b(o))return void m("Could not add item with key '"+d+"'",o);var p,q=[];j(function(a,b){var c=g(b);c=c?parseInt(c,r):t,q.push({key:a,size:(g(a)||"").length,expiration:c})}),q.sort(function(a,b){return b.expiration-a.expiration});for(var s=(l||"").length;q.length&&s>0;)p=q.pop(),m("Cache is full, removing item with key '"+d+"'"),k(p.key),s-=p.size;try{h(d,l)}catch(o){return void m("Could not add item with key '"+d+"', perhaps it's too big?",o)}}n?h(e(d),(f()+n).toString(r)):i(e(d))}},get:function(b){if(!a())return null;if(l(b))return null;var d=g(b);if(!d||!c())return d;try{return JSON.parse(d)}catch(e){return d}},remove:function(b){a()&&k(b)},supported:function(){return a()},flush:function(){a()&&j(function(a){k(a)})},flushExpired:function(){a()&&j(function(a){l(a)})},setBucket:function(a){u=a},resetBucket:function(){u=""},enableWarnings:function(a){v=a}};return w}); 2 | -------------------------------------------------------------------------------- /statics/js/timeslider.js: -------------------------------------------------------------------------------- 1 | var timesliderLastTime = 0; 2 | var timesliderIntervalId; 3 | var slowMotionEffectId; 4 | function SetupTimeSlider() { 5 | var slowMotion = function() { 6 | $('body').addClass('slowmotion-effect-in'); 7 | $('body').removeClass('slowmotion-effect-out'); 8 | if (slowMotionEffectId) { 9 | clearTimeout(slowMotionEffectId); 10 | } 11 | slowMotionEffectId = setTimeout(function(){ 12 | $('body').removeClass('slowmotion-effect-in'); 13 | $('body').addClass('slowmotion-effect-out'); 14 | }, 1000); 15 | }; 16 | 17 | var changeTime = function() { 18 | var time = new Date(); 19 | time.setMinutes(time.getMinutes() + slider.getValue()); 20 | time.setSeconds(0); 21 | 22 | var at = time.getTime(); 23 | 24 | if (at == timesliderLastTime) 25 | return; 26 | 27 | topologyLayout.SyncRequest(at); 28 | 29 | if (timesliderIntervalId) { 30 | clearInterval(timesliderIntervalId); 31 | timesliderIntervalId = null; 32 | } 33 | 34 | timesliderIntervalId = setInterval(function() { 35 | slider.setValue(slider.getValue() - 1); 36 | }, 60 * 1000); 37 | 38 | timesliderLastTime = at; 39 | }; 40 | 41 | var slider = new Slider("#timeslider", { 42 | tooltip: 'always', 43 | formatter: function(value) { 44 | return -value + ' min. ago'; 45 | } 46 | }) 47 | .on('slide', function() { 48 | slowMotion(); 49 | }) 50 | .on('change', function() { 51 | if (!slider.isEnabled()) 52 | return; 53 | 54 | slowMotion(); 55 | changeTime(); 56 | }); 57 | 58 | $("[name='live-switch']").bootstrapSwitch({ 59 | onSwitchChange: function(event, state) { 60 | if (state && topologyLayout.live === false) { 61 | topologyLayout.SyncRequest(null); 62 | } 63 | 64 | if (state) { 65 | slider.disable(); 66 | slider.setValue(0); 67 | 68 | if (timesliderIntervalId) { 69 | clearInterval(timesliderIntervalId); 70 | timesliderIntervalId = null; 71 | } 72 | 73 | $(".flow-ops-panel").show(); 74 | } 75 | else { 76 | slider.enable(); 77 | 78 | changeTime(); 79 | 80 | $(".flow-ops-panel").hide(); 81 | } 82 | 83 | topologyLayout.live = state; 84 | return true; 85 | } 86 | }); 87 | $("[name='live-switch']").bootstrapSwitch('state', true, false); 88 | } 89 | -------------------------------------------------------------------------------- /statics/js/topology.js: -------------------------------------------------------------------------------- 1 | var TopologyAPI = { 2 | 3 | query: function(gremlinQuery) { 4 | return $.ajax({ 5 | dataType: "json", 6 | url: '/api/topology', 7 | data: JSON.stringify({"GremlinQuery": gremlinQuery}), 8 | contentType: "application/json; charset=utf-8", 9 | method: 'POST', 10 | }) 11 | .then(function(data) { 12 | if (data === null) 13 | return []; 14 | // Result can be [Node] or [[Node, Node]] 15 | if (data.length > 0 && data[0] instanceof Array) 16 | data = data[0]; 17 | return data; 18 | }); 19 | } 20 | }; 21 | -------------------------------------------------------------------------------- /statics/js/utils.js: -------------------------------------------------------------------------------- 1 | 2 | function debounce(func, wait, immediate) { 3 | var timeout; 4 | return function() { 5 | var context = this, args = arguments; 6 | var later = function() { 7 | timeout = null; 8 | if (!immediate) func.apply(context, args); 9 | }; 10 | var callNow = immediate && !timeout; 11 | clearTimeout(timeout); 12 | timeout = setTimeout(later, wait); 13 | if (callNow) func.apply(context, args); 14 | }; 15 | } 16 | -------------------------------------------------------------------------------- /statics/js/vue-sidebar.js: -------------------------------------------------------------------------------- 1 | var VueSidebar = { 2 | el: "#vue-sidebar", 3 | 4 | data: { 5 | service: null, 6 | currentNode: null, 7 | }, 8 | 9 | created: function() { 10 | var self = this; 11 | this.$on('NODE_SELECTED', function(node) { 12 | self.currentNode = node; 13 | }); 14 | this.$on('NODE_DELETED', function(node) { 15 | if (self.currentNode && node.ID === self.currentNode.ID) { 16 | self.currentNode = null; 17 | } 18 | }); 19 | }, 20 | 21 | computed: { 22 | 23 | currentNodeFlowsQuery: function() { 24 | if (this.currentNode) 25 | return "G.V('" + this.currentNode.ID + "').Flows().Dedup()"; 26 | return ""; 27 | }, 28 | 29 | } 30 | 31 | }; 32 | -------------------------------------------------------------------------------- /statics/login.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | Signin to Skydive 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 29 | 30 | 31 | 32 | 45 | 46 |
    47 | 55 |
    56 | 57 | 58 | -------------------------------------------------------------------------------- /tests/pcaptraces/eth-ip4-arp-dns-req-http-google.pcap: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cip/skydive/ca2e15546e98f7768f71dcebf0e0ff4d6315242c/tests/pcaptraces/eth-ip4-arp-dns-req-http-google.pcap -------------------------------------------------------------------------------- /tests/tests.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2015 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package tests 24 | -------------------------------------------------------------------------------- /topology/graph/filters.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package graph 24 | 25 | import ( 26 | "time" 27 | 28 | "github.com/skydive-project/skydive/common" 29 | "github.com/skydive-project/skydive/filters" 30 | ) 31 | 32 | func NewFilterForMetadata(m Metadata) (*filters.Filter, error) { 33 | var termFilters []*filters.Filter 34 | for k, v := range m { 35 | switch v := v.(type) { 36 | case *filters.Filter: 37 | termFilters = append(termFilters, v) 38 | case int64: 39 | termFilters = append(termFilters, filters.NewTermInt64Filter(k, v)) 40 | case string: 41 | termFilters = append(termFilters, filters.NewTermStringFilter(k, v)) 42 | default: 43 | i, err := common.ToInt64(v) 44 | if err != nil { 45 | return nil, err 46 | } 47 | termFilters = append(termFilters, filters.NewTermInt64Filter(k, i)) 48 | } 49 | } 50 | return filters.NewAndFilter(termFilters...), nil 51 | } 52 | 53 | func NewFilterForEdge(parent Identifier, child Identifier) *filters.Filter { 54 | return filters.NewOrFilter( 55 | filters.NewTermStringFilter("Parent", string(parent)), 56 | filters.NewTermStringFilter("Child", string(child)), 57 | ) 58 | } 59 | 60 | func NewFilterForTimeSlice(t *common.TimeSlice) *filters.Filter { 61 | if t == nil { 62 | return NewFilterForTime(time.Now()) 63 | } 64 | 65 | return filters.NewAndFilter( 66 | filters.NewLteInt64Filter("CreatedAt", t.Last), 67 | filters.NewOrFilter( 68 | filters.NewTermInt64Filter("DeletedAt", 0), 69 | filters.NewGteInt64Filter("DeletedAt", t.Start), 70 | ), 71 | ) 72 | } 73 | 74 | func NewFilterForTime(t time.Time) *filters.Filter { 75 | u := t.UTC().Unix() 76 | return NewFilterForTimeSlice(common.NewTimeSlice(u, u)) 77 | } 78 | -------------------------------------------------------------------------------- /topology/graph/memory_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2015 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package graph 24 | 25 | import ( 26 | "testing" 27 | ) 28 | 29 | func TestAddEdgeMissingNode(t *testing.T) { 30 | b, err := NewMemoryBackend() 31 | if err != nil { 32 | t.Error(err.Error()) 33 | } 34 | 35 | e := &Edge{ 36 | parent: "aaa", 37 | child: "bbb", 38 | graphElement: graphElement{ 39 | ID: GenID(), 40 | host: "host", 41 | }, 42 | } 43 | 44 | if b.AddEdge(e) { 45 | t.Error("Edge inserted with missing nodes") 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /topology/graph/message.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package graph 24 | 25 | import ( 26 | "bytes" 27 | "encoding/json" 28 | 29 | "github.com/skydive-project/skydive/common" 30 | shttp "github.com/skydive-project/skydive/http" 31 | ) 32 | 33 | func UnmarshalWSMessage(msg shttp.WSMessage) (string, interface{}, error) { 34 | var obj interface{} 35 | if err := common.JsonDecode(bytes.NewReader([]byte(*msg.Obj)), &obj); err != nil { 36 | return "", msg, err 37 | } 38 | 39 | switch msg.Type { 40 | case "SyncRequest": 41 | m := obj.(map[string]interface{}) 42 | var context GraphContext 43 | switch v := m["Time"].(type) { 44 | case json.Number: 45 | i, err := v.Int64() 46 | if err != nil { 47 | return "", msg, err 48 | } 49 | context.TimeSlice = common.NewTimeSlice(i/1000, i/1000) 50 | } 51 | return msg.Type, context, nil 52 | 53 | case "HostGraphDeleted": 54 | return msg.Type, obj, nil 55 | case "NodeUpdated", "NodeDeleted", "NodeAdded": 56 | var node Node 57 | if err := node.Decode(obj); err != nil { 58 | return "", msg, err 59 | } 60 | 61 | return msg.Type, &node, nil 62 | case "EdgeUpdated", "EdgeDeleted", "EdgeAdded": 63 | var edge Edge 64 | if err := edge.Decode(obj); err != nil { 65 | return "", msg, err 66 | } 67 | 68 | return msg.Type, &edge, nil 69 | } 70 | 71 | return "", msg, nil 72 | } 73 | -------------------------------------------------------------------------------- /topology/graph/traversal/traversal_extension.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package traversal 24 | 25 | type GremlinTraversalExtension interface { 26 | ScanIdent(s string) (Token, bool) 27 | ParseStep(t Token, p GremlinTraversalContext) (GremlinTraversalStep, error) 28 | } 29 | -------------------------------------------------------------------------------- /topology/probes/peering.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package probes 24 | 25 | import ( 26 | "github.com/skydive-project/skydive/logging" 27 | "github.com/skydive-project/skydive/topology/graph" 28 | ) 29 | 30 | type PeeringProbe struct { 31 | graph.DefaultGraphListener 32 | graph *graph.Graph 33 | peers map[string]*graph.Node 34 | } 35 | 36 | func (p *PeeringProbe) onNodeEvent(n *graph.Node) { 37 | if mac, ok := n.Metadata()["MAC"]; ok { 38 | if node, ok := p.peers[mac.(string)]; ok { 39 | if !p.graph.AreLinked(node, n, layer2Metadata) { 40 | p.graph.Link(node, n, layer2Metadata) 41 | } 42 | return 43 | } 44 | } 45 | if mac, ok := n.Metadata()["PeerIntfMAC"]; ok { 46 | nodes := p.graph.GetNodes(graph.Metadata{"MAC": mac}) 47 | switch len(nodes) { 48 | case 1: 49 | if !p.graph.AreLinked(n, nodes[0], layer2Metadata) { 50 | p.graph.Link(n, nodes[0], layer2Metadata) 51 | } 52 | fallthrough 53 | case 0: 54 | p.peers[mac.(string)] = n 55 | default: 56 | logging.GetLogger().Errorf("Multiple peer MAC found: %s", mac.(string)) 57 | } 58 | 59 | } 60 | } 61 | 62 | func (p *PeeringProbe) OnNodeUpdated(n *graph.Node) { 63 | p.onNodeEvent(n) 64 | } 65 | 66 | func (p *PeeringProbe) OnNodeAdded(n *graph.Node) { 67 | p.onNodeEvent(n) 68 | } 69 | 70 | func (p *PeeringProbe) OnNodeDeleted(n *graph.Node) { 71 | for mac, node := range p.peers { 72 | if n.ID == node.ID { 73 | delete(p.peers, mac) 74 | } 75 | } 76 | } 77 | 78 | func (p *PeeringProbe) Start() { 79 | } 80 | 81 | func (p *PeeringProbe) Stop() { 82 | p.graph.RemoveEventListener(p) 83 | } 84 | 85 | func NewPeeringProbe(g *graph.Graph) *PeeringProbe { 86 | probe := &PeeringProbe{ 87 | graph: g, 88 | peers: make(map[string]*graph.Node), 89 | } 90 | g.AddEventListener(probe) 91 | 92 | return probe 93 | } 94 | -------------------------------------------------------------------------------- /topology/topology_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package topology 24 | 25 | import ( 26 | "testing" 27 | 28 | "github.com/skydive-project/skydive/topology/graph" 29 | ) 30 | 31 | func newGraph(t *testing.T) *graph.Graph { 32 | b, err := graph.NewMemoryBackend() 33 | if err != nil { 34 | t.Error(err.Error()) 35 | } 36 | 37 | return graph.NewGraphFromConfig(b) 38 | } 39 | 40 | func TestMarshal(t *testing.T) { 41 | g := newGraph(t) 42 | 43 | n1 := g.NewNode(graph.GenID(), graph.Metadata{"Name": "N1", "Type": "T1"}) 44 | n2 := g.NewNode(graph.GenID(), graph.Metadata{"Name": "N2", "Type": "T2"}) 45 | n3 := g.NewNode(graph.GenID(), graph.Metadata{"Name": "N3", "Type": "T3"}) 46 | 47 | g.Link(n1, n2, nil) 48 | g.Link(n2, n3, nil) 49 | 50 | r := g.LookupShortestPath(n3, graph.Metadata{"Name": "N1"}, nil) 51 | if len(r) == 0 { 52 | t.Errorf("Wrong nodes returned: %v", r) 53 | } 54 | 55 | path := NodePath(r).Marshal() 56 | if path != "N1[Type=T1]/N2[Type=T2]/N3[Type=T3]" { 57 | t.Errorf("Wrong path returned: %s", path) 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /topology/topology_traversal_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package topology 24 | 25 | import ( 26 | "strings" 27 | "testing" 28 | 29 | "github.com/skydive-project/skydive/topology/graph" 30 | "github.com/skydive-project/skydive/topology/graph/traversal" 31 | ) 32 | 33 | func TestGraphPathTraversal(t *testing.T) { 34 | g := newGraph(t) 35 | 36 | n1 := g.NewNode(graph.GenID(), graph.Metadata{"Type": "host", "Name": "localhost"}) 37 | n2 := g.NewNode(graph.GenID(), graph.Metadata{"Name": "N2", "Type": "T2"}) 38 | n3 := g.NewNode(graph.GenID(), graph.Metadata{"Name": "N3", "Type": "T3"}) 39 | 40 | g.Link(n1, n2, graph.Metadata{"RelationType": "ownership"}) 41 | g.Link(n2, n3, graph.Metadata{"RelationType": "ownership"}) 42 | 43 | query := `G.V().Has("Name", "N3").GraphPath()` 44 | 45 | tp := traversal.NewGremlinTraversalParser(g) 46 | tp.AddTraversalExtension(NewTopologyTraversalExtension()) 47 | 48 | ts, err := tp.Parse(strings.NewReader(query)) 49 | if err != nil { 50 | t.Fatal(err.Error()) 51 | } 52 | 53 | res, err := ts.Exec() 54 | if err != nil { 55 | t.Fatal(err.Error()) 56 | } 57 | 58 | if len(res.Values()) != 1 || res.Values()[0].(string) != "localhost[Type=host]/N2[Type=T2]/N3[Type=T3]" { 59 | t.Fatalf("Should return 1 path, returned: %v", res.Values()) 60 | } 61 | } 62 | 63 | func TestRegexPredicate(t *testing.T) { 64 | g := newGraph(t) 65 | g.NewNode(graph.GenID(), graph.Metadata{"Type": "host", "Name": "localhost"}) 66 | 67 | query := `G.V().Has("Name", Regex("^local.*st$")).Count()` 68 | 69 | tp := traversal.NewGremlinTraversalParser(g) 70 | tp.AddTraversalExtension(NewTopologyTraversalExtension()) 71 | 72 | ts, err := tp.Parse(strings.NewReader(query)) 73 | if err != nil { 74 | t.Fatal(err.Error()) 75 | } 76 | 77 | res, err := ts.Exec() 78 | if err != nil { 79 | t.Fatal(err.Error()) 80 | } 81 | 82 | if len(res.Values()) != 1 || res.Values()[0].(int) != 1 { 83 | t.Fatalf("Regex should exactly match 1 node, returned: %v", res.Values()) 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /validator/validator.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Red Hat, Inc. 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, 15 | * software distributed under the License is distributed on an 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | * KIND, either express or implied. See the License for the 18 | * specific language governing permissions and limitations 19 | * under the License. 20 | * 21 | */ 22 | 23 | package validator 24 | 25 | import ( 26 | "errors" 27 | "fmt" 28 | "net" 29 | "strings" 30 | 31 | valid "gopkg.in/validator.v2" 32 | 33 | ftraversal "github.com/skydive-project/skydive/flow/traversal" 34 | "github.com/skydive-project/skydive/topology" 35 | "github.com/skydive-project/skydive/topology/graph" 36 | "github.com/skydive-project/skydive/topology/graph/traversal" 37 | ) 38 | 39 | type Validator interface { 40 | Validate() error 41 | } 42 | 43 | var ( 44 | skydiveValidator = valid.NewValidator() 45 | 46 | IPNotValid = func() error { 47 | return valid.TextErr{Err: errors.New("Not a IP addr")} 48 | } 49 | GremlinNotValid = func(err error) error { 50 | return valid.TextErr{Err: fmt.Errorf("Not a valid Gremlin expression: %s", err.Error())} 51 | } 52 | ) 53 | 54 | func isIP(v interface{}, param string) error { 55 | ip, ok := v.(string) 56 | if !ok { 57 | return IPNotValid() 58 | } 59 | /* Parse/Check IPv4 and IPv6 address */ 60 | if n := net.ParseIP(ip); n == nil { 61 | return IPNotValid() 62 | } 63 | return nil 64 | } 65 | 66 | func isGremlinExpr(v interface{}, param string) error { 67 | query, ok := v.(string) 68 | if !ok { 69 | return GremlinNotValid(errors.New("not a string")) 70 | } 71 | 72 | tr := traversal.NewGremlinTraversalParser(&graph.Graph{}) 73 | tr.AddTraversalExtension(topology.NewTopologyTraversalExtension()) 74 | tr.AddTraversalExtension(ftraversal.NewFlowTraversalExtension(nil, nil)) 75 | 76 | if _, err := tr.Parse(strings.NewReader(query)); err != nil { 77 | return GremlinNotValid(err) 78 | } 79 | 80 | return nil 81 | } 82 | 83 | func Validate(v interface{}) error { 84 | if err := skydiveValidator.Validate(v); err != nil { 85 | return err 86 | } 87 | 88 | if obj, ok := v.(Validator); ok { 89 | return obj.Validate() 90 | } 91 | 92 | return nil 93 | } 94 | 95 | func init() { 96 | skydiveValidator.SetValidationFunc("isIP", isIP) 97 | skydiveValidator.SetValidationFunc("isGremlinExpr", isGremlinExpr) 98 | skydiveValidator.SetTag("valid") 99 | } 100 | -------------------------------------------------------------------------------- /version/version.go: -------------------------------------------------------------------------------- 1 | package version 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "os" 7 | ) 8 | 9 | // Package is the overall, canonical project import path under which the 10 | // package was built. 11 | var Package = "github.com/skydive-project/skydive" 12 | 13 | // Version indicates which version of the binary is running. 14 | var Version = "v0.9.0" 15 | 16 | // FprintVersion outputs the version string to the writer, in the following 17 | // format, followed by a newline: 18 | // 19 | // 20 | // 21 | // For example, a binary "registry" built from github.com/docker/distribution 22 | // with version "v1.0.0" would print the following: 23 | // 24 | // skydive_agent github.com/skydive-project/skydive v1.0.0 25 | // 26 | func FprintVersion(w io.Writer) { 27 | fmt.Fprintln(w, os.Args[0], Package, Version) 28 | } 29 | 30 | // PrintVersion outputs the version information, from Fprint, to stdout. 31 | func PrintVersion() { 32 | FprintVersion(os.Stdout) 33 | } 34 | --------------------------------------------------------------------------------