', methods=['PUT'])
71 | def edit_device(id):
72 | device = Device.query.get_or_404(id)
73 | device.import_data(request.json)
74 | db.session.add(device)
75 | db.session.commit()
76 | return jsonify({})
77 |
78 |
79 | if __name__ == '__main__':
80 | db.create_all()
81 | app.run(host='0.0.0.0', debug=True)
82 |
83 |
84 |
85 |
86 |
87 |
--------------------------------------------------------------------------------
/Chapter05/Vaults/host_vars/localhost:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 66366365366233396161616631653930616330303834343665613237653733353939326238313035
3 | 3065643838646562386239666336323233313262663538350a656135303031313239353731626234
4 | 30656432326130613566353364393936636135633335303332363031653964356563393235373032
5 | 6130363661393330650a303163666637333766363463393766613862626665376338353661663464
6 | 35373765313730366330383036366233316335363165333163623330643262306135323834623736
7 | 61613337323836623364653631316533383236643865643435633365633731633836353130643633
8 | 39633034356638396530643232323235613931663136343630636261306335653638366133636534
9 | 31663038303230623336333537326338326330313866656434663736336330623361653239623264
10 | 34336465636366363533373533306564343437633733613439613537633730303435393436303931
11 | 66393164306362646562326564393036653764653962363531333331616563663264383965306234
12 | 39636361353536373637613233353864633938346361386430396639313431323534666362373633
13 | 31633664306538353665323933656237663734633461656363393638343430646132646638356334
14 | 65313535356432386263616334393531386464346564663230363531373431346565343265333736
15 | 64343964346332346133633236613065336565363134633139303137333535353562383139303437
16 | 38626262336338313437303563323634366430626438303733386133373134336632643661303636
17 | 66326639383539363239663136613166366232383131363032663263663639336237663735623530
18 | 36333161356233363261326266363631303237633037623161323738383763323631333030366135
19 | 34313032656662636536636134363436663438376164623031323330626638656234303665626537
20 | 33396330613333363763306131393365386131663638373165663031356133366530326464373062
21 | 62386130636262666164323331666535386266663863633239343330393738393235383830346130
22 | 35613738363464343866323638303432306638313031663663623266396533613634306561363330
23 | 62393534633839656262653635363332653033623730313936366236663161376139616237636464
24 | 63353136613332313465666635643962336464376565383065363061363961353735636666316134
25 | 66316435663434663764333433316331336335613732633465383537393436356335623064636335
26 | 35353162643964373562356262366536336139343537336536323435623036363366306538386236
27 | 66346631666364353661616261346133623061386134376330353433363961396636656433623563
28 | 39343066323539313035396537616634643430636163646562306332656266613466336638303264
29 | 36333732613531646135666637323966616435303462303031323562393364393832373166653038
30 | 38646532623734353732366164333263636133646431393931356430313837393966396433323230
31 | 35323833333231373365396164623233313632663166366231373037363637623336666632363266
32 | 63306534643138343961353435393234346235666465616438623533313862313536653962333064
33 | 35663833646263343766313131306230663666393732323134613730663436393066356534616461
34 | 37326236656339373735323738633239356431636538663236616130653964626437373061303735
35 | 3561356666653666316639633831333564393438316237356330
36 |
--------------------------------------------------------------------------------
/Chapter13/chapter13_jsonrpc_1.py:
--------------------------------------------------------------------------------
1 | # Based on ryu/services/protocols/bgp/api/jsonrpc.py
2 |
3 | from ryu.base import app_manager
4 | from ryu.lib import hub
5 | from ryu.app.wsgi import websocket, ControllerBase, WSGIApplication
6 | from ryu.app.wsgi import rpc_public, WebSocketRPCServer
7 | from ryu.services.protocols.bgp.api.base import call
8 | from ryu.services.protocols.bgp.api.base import PREFIX
9 | from ryu.services.protocols.bgp.rtconf.common import LOCAL_AS
10 | from ryu.services.protocols.bgp.rtconf.common import ROUTER_ID
11 | from ryu.services.protocols.bgp.rtconf import neighbors
12 |
13 |
14 | bgp_instance_name = 'bgp_api_app'
15 | url = '/bgp/ws'
16 |
17 |
18 | class BgpWSJsonRpc(app_manager.RyuApp):
19 | _CONTEXTS = {
20 | 'wsgi': WSGIApplication,
21 | }
22 |
23 | def __init__(self, *args, **kwargs):
24 | super(BgpWSJsonRpc, self).__init__(*args, **kwargs)
25 |
26 | wsgi = kwargs['wsgi']
27 | wsgi.register(
28 | BgpWSJsonRpcController,
29 | data={bgp_instance_name: self},
30 | )
31 | self._ws_manager = wsgi.websocketmanager
32 |
33 | @rpc_public('core.start')
34 | def _core_start(self, as_number=64512, router_id='10.0.0.1'):
35 | common_settings = {}
36 | common_settings[LOCAL_AS] = as_number
37 | common_settings[ROUTER_ID] = str(router_id)
38 | waiter = hub.Event()
39 | call('core.start', waiter=waiter, **common_settings)
40 | waiter.wait()
41 | return {}
42 |
43 | @rpc_public('neighbor.create')
44 | def _neighbor_create(self, ip_address='192.168.177.32',
45 | remote_as=64513, is_route_reflector_client=False):
46 | bgp_neighbor = {}
47 | bgp_neighbor[neighbors.IP_ADDRESS] = str(ip_address)
48 | bgp_neighbor[neighbors.REMOTE_AS] = remote_as
49 | bgp_neighbor[neighbors.IS_ROUTE_REFLECTOR_CLIENT] = bool(is_route_reflector_client)
50 | call('neighbor.create', **bgp_neighbor)
51 | return {}
52 |
53 | @rpc_public('network.add')
54 | def _prefix_add(self, prefix='10.20.0.0/24'):
55 | networks = {}
56 | networks[PREFIX] = str(prefix)
57 | call('network.add', **networks)
58 | return {}
59 |
60 | @rpc_public('neighbors.get')
61 | def _neighbors_get(self):
62 | return call('neighbors.get')
63 |
64 | @rpc_public('show.rib')
65 | def _show_rib(self, family='ipv4'):
66 | show = {}
67 | show['params'] = ['rib', family]
68 | return call('operator.show', **show)
69 |
70 |
71 | class BgpWSJsonRpcController(ControllerBase):
72 | def __init__(self, req, link, data, **config):
73 | super(BgpWSJsonRpcController, self).__init__(
74 | req, link, data, **config)
75 | self.bgp_api_app = data[bgp_instance_name]
76 |
77 | @websocket('bgp', url)
78 | def _websocket_handler(self, ws):
79 | rpc_server = WebSocketRPCServer(ws, self.bgp_api_app)
80 | rpc_server.serve_forever()
81 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | # Mastering Python Networking
5 | This is the code repository for [Mastering Python Networking](https://www.packtpub.com/networking-and-servers/mastering-python-networking?utm_source=github&utm_medium=repository&utm_campaign=9781784397005), published by [Packt](https://www.packtpub.com/?utm_source=github). It contains all the supporting project files necessary to work through the book from start to finish.
6 | ## About the Book
7 | This book begins with a review of the TCP/ IP protocol suite and a refresher of the core elements of the Python language. Next, you will start using Python and supported libraries to automate network tasks from the current major network vendors. We will look at automating traditional network devices based on the command-line interface, as well as newer devices with API support, with hands-on labs. We will then learn the concepts and practical use cases of the Ansible framework in order to achieve your network goals.
8 |
9 | ## Errata
10 | * Page 42 (2nd bullet point): **It is also the popular kid on the bock** _should be_ **It is also the popular kid on the block**
11 |
12 | ## Instructions and Navigation
13 | All of the code is organized into folders. Each folder starts with a number followed by the application name. For example, Chapter02.
14 |
15 | Chapter 12 does not have any code files.
16 |
17 | The code will look like the following:
18 | ```
19 | # This is a comment
20 | print("hello world")
21 | ```
22 |
23 | It is strongly recommended that you follow and practice the examples given in this book. To complete the examples, you will need a host machine that runs Python 2.7 and 3.4, with enough administrative permissions to install the tools introduced in the book. The host machine can be a virtual machine and should preferably run a flavor of Linux. In the book, we'll use Ubuntu 16.04, but other Linux distributions should work as well. You might need to tweak your settings accordingly. In addition, either physical or virtual network equipment is needed to test your code.
24 |
25 | ## Related Products
26 | * [Python: Master the Art of Design Patterns](https://www.packtpub.com/application-development/python-master-art-design-patterns?utm_source=github&utm_medium=repository&utm_campaign=9781787125186)
27 |
28 | * [Neural Network Programming with Python](https://www.packtpub.com/big-data-and-business-intelligence/neural-network-programming-python?utm_source=github&utm_medium=repository&utm_campaign=9781784398217)
29 |
30 | * [Mastering Python - Second Edition [Video]](https://www.packtpub.com/application-development/mastering-python-second-edition-video?utm_source=github&utm_medium=repository&utm_campaign=9781786463746)
31 |
32 | ### Download a free PDF
33 |
34 | If you have already purchased a print or Kindle version of this book, you can get a DRM-free PDF version at no cost.
Simply click on the link to claim your free PDF.
35 | https://packt.link/free-ebook/9781784397005
--------------------------------------------------------------------------------
/Chapter09/chapter9_7.py:
--------------------------------------------------------------------------------
1 | # This example referenced Miguel Grinberg's code on Github:
2 | # https://github.com/miguelgrinberg/oreilly-flask-apis-video/commit/98855d48f52f4dc0f9728c841bdd0645810d708e
3 | #
4 |
5 | from flask import Flask, url_for, jsonify, request
6 | from flask.ext.sqlalchemy import SQLAlchemy
7 | from chapter9_pexpect_1 import show_version
8 |
9 | app = Flask(__name__)
10 | app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///network.db'
11 | db = SQLAlchemy(app)
12 |
13 | class ValidationError(ValueError):
14 | pass
15 |
16 |
17 | class Device(db.Model):
18 | __tablename__ = 'devices'
19 | id = db.Column(db.Integer, primary_key=True)
20 | hostname = db.Column(db.String(64), unique=True)
21 | loopback = db.Column(db.String(120), unique=True)
22 | mgmt_ip = db.Column(db.String(120), unique=True)
23 | role = db.Column(db.String(64))
24 | vendor = db.Column(db.String(64))
25 | os = db.Column(db.String(64))
26 |
27 | def get_url(self):
28 | return url_for('get_device', id=self.id, _external=True)
29 |
30 | def export_data(self):
31 | return {
32 | 'self_url': self.get_url(),
33 | 'hostname': self.hostname,
34 | 'loopback': self.loopback,
35 | 'mgmt_ip': self.mgmt_ip,
36 | 'role': self.role,
37 | 'vendor': self.vendor,
38 | 'os': self.os
39 | }
40 |
41 | def import_data(self, data):
42 | try:
43 | self.hostname = data['hostname']
44 | self.loopback = data['loopback']
45 | self.mgmt_ip = data['mgmt_ip']
46 | self.role = data['role']
47 | self.vendor = data['vendor']
48 | self.os = data['os']
49 | except KeyError as e:
50 | raise ValidationError('Invalid device: missing ' + e.args[0])
51 | return self
52 |
53 |
54 | @app.route('/devices/', methods=['GET'])
55 | def get_devices():
56 | return jsonify({'device': [device.get_url()
57 | for device in Device.query.all()]})
58 |
59 | @app.route('/devices/', methods=['GET'])
60 | def get_device(id):
61 | return jsonify(Device.query.get_or_404(id).export_data())
62 |
63 |
64 | @app.route('/devices//version', methods=['GET'])
65 | def get_device_version(id):
66 | device = Device.query.get_or_404(id)
67 | hostname = device.hostname
68 | ip = device.mgmt_ip
69 | prompt = hostname+"#"
70 | result = show_version(hostname, prompt, ip, 'cisco', 'cisco')
71 | return jsonify({"version": str(result)})
72 |
73 | @app.route('/devices//version', methods=['GET'])
74 | def get_role_version(device_role):
75 | device_id_list = [device.id for device in Device.query.all() if device.role == device_role]
76 | result = {}
77 | for id in device_id_list:
78 | device = Device.query.get_or_404(id)
79 | hostname = device.hostname
80 | ip = device.mgmt_ip
81 | prompt = hostname + "#"
82 | device_result = show_version(hostname, prompt, ip, 'cisco', 'cisco')
83 | result[hostname] = str(device_result)
84 | return jsonify(result)
85 |
86 | @app.route('/devices/', methods=['POST'])
87 | def new_device():
88 | device = Device()
89 | device.import_data(request.json)
90 | db.session.add(device)
91 | db.session.commit()
92 | return jsonify({}), 201, {'Location': device.get_url()}
93 |
94 | @app.route('/devices/', methods=['PUT'])
95 | def edit_device(id):
96 | device = Device.query.get_or_404(id)
97 | device.import_data(request.json)
98 | db.session.add(device)
99 | db.session.commit()
100 | return jsonify({})
101 |
102 |
103 | if __name__ == '__main__':
104 | db.create_all()
105 | app.run(host='0.0.0.0', debug=True)
106 |
107 |
108 |
109 |
110 |
111 |
--------------------------------------------------------------------------------
/Chapter11/Chapter11_2.py:
--------------------------------------------------------------------------------
1 | from ryu.base import app_manager
2 | from ryu.controller import ofp_event
3 | from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
4 | from ryu.controller.handler import set_ev_cls
5 | from ryu.ofproto import ofproto_v1_3
6 | from ryu.lib.packet import packet
7 | import array
8 | from ryu.lib.packet import ethernet
9 | from ryu.lib.packet import ether_types
10 | from ryu.lib.packet import ipv4, tcp
11 |
12 |
13 | class SimpleSwitch13(app_manager.RyuApp):
14 | OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
15 |
16 | def __init__(self, *args, **kwargs):
17 | super(SimpleSwitch13, self).__init__(*args, **kwargs)
18 | self.mac_to_port = {}
19 |
20 | @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
21 | def switch_features_handler(self, ev):
22 | datapath = ev.msg.datapath
23 | ofproto = datapath.ofproto
24 | parser = datapath.ofproto_parser
25 |
26 | match = parser.OFPMatch()
27 | actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
28 | ofproto.OFPCML_NO_BUFFER)]
29 | self.add_flow(datapath, 0, match, actions)
30 |
31 | def add_flow(self, datapath, priority, match, actions, buffer_id=None):
32 | ofproto = datapath.ofproto
33 | parser = datapath.ofproto_parser
34 |
35 | inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
36 | actions)]
37 | if buffer_id:
38 | mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
39 | priority=priority, match=match,
40 | instructions=inst)
41 | else:
42 | mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
43 | match=match, instructions=inst)
44 | datapath.send_msg(mod)
45 |
46 | @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
47 | def _packet_in_handler(self, ev):
48 |
49 | # This is where we decode and print out the packet
50 | print("msg.data: {}".format(array.array('B', ev.msg.data)))
51 | pkt = packet.Packet(ev.msg.data)
52 | for p in pkt.protocols:
53 | print(p.protocol_name, p)
54 | if p.protocol_name == 'ipv4':
55 | print('IP: src {} dst {}'.format(p.src, p.dst))
56 |
57 | # Below is the original code from simple_switch_13.py
58 | msg = ev.msg
59 | datapath = msg.datapath
60 | ofproto = datapath.ofproto
61 | parser = datapath.ofproto_parser
62 | in_port = msg.match['in_port']
63 |
64 | pkt = packet.Packet(msg.data)
65 | eth = pkt.get_protocols(ethernet.ethernet)[0]
66 |
67 | if eth.ethertype == ether_types.ETH_TYPE_LLDP:
68 | # ignore lldp packet
69 | return
70 | dst = eth.dst
71 | src = eth.src
72 |
73 | dpid = datapath.id
74 | self.mac_to_port.setdefault(dpid, {})
75 |
76 | self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port)
77 |
78 | # learn a mac address to avoid FLOOD next time.
79 | self.mac_to_port[dpid][src] = in_port
80 |
81 | if dst in self.mac_to_port[dpid]:
82 | out_port = self.mac_to_port[dpid][dst]
83 | else:
84 | out_port = ofproto.OFPP_FLOOD
85 |
86 | actions = [parser.OFPActionOutput(out_port)]
87 |
88 | # install a flow to avoid packet_in next time
89 | if out_port != ofproto.OFPP_FLOOD:
90 | match = parser.OFPMatch(in_port=in_port, eth_dst=dst)
91 | # verify if we have a valid buffer_id, if yes avoid to send both
92 | # flow_mod & packet_out
93 | if msg.buffer_id != ofproto.OFP_NO_BUFFER:
94 | self.add_flow(datapath, 1, match, actions, msg.buffer_id)
95 | return
96 | else:
97 | self.add_flow(datapath, 1, match, actions)
98 | data = None
99 | if msg.buffer_id == ofproto.OFP_NO_BUFFER:
100 | data = msg.data
101 |
102 | out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
103 | in_port=in_port, actions=actions, data=data)
104 | datapath.send_msg(out)
105 |
106 |
--------------------------------------------------------------------------------
/Chapter03/Arista/eapi_2_acl.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # Copyright (c) 2014 Arista Networks
3 | #
4 | # Permission is hereby granted, free of charge, to any person obtaining a copy
5 | # of this software and associated documentation files (the "Software"), to deal
6 | # in the Software without restriction, including without limitation the rights
7 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | # copies of the Software, and to permit persons to whom the Software is
9 | # furnished to do so, subject to the following conditions:
10 | #
11 | # The above copyright notice and this permission notice shall be included in
12 | # all copies or substantial portions of the Software.
13 | #
14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 | # THE SOFTWARE.
21 |
22 |
23 | import argparse
24 | import jsonrpclib
25 | import os
26 | import sys
27 | import subprocess
28 |
29 | import ssl
30 | ssl._create_default_https_context = ssl._create_unverified_context
31 |
32 | # EAPI script to remotely edit an access list across multiple
33 | # Arista switches using your editor of choice.
34 |
35 | # From a central server with IP connectivity to your switch, run this
36 | # script and specify an ACL name and a series of switches you are
37 | # interested in editing. This then opens your $EDITOR (e.g. vi or emacs)
38 | # with the contents of the named ACL. When you're finished, close the file
39 | # and this script will update that ACL across all of the switches you specified.
40 | # No more dealing with annoying line numbers in the CLI!
41 |
42 | def main():
43 | parser = argparse.ArgumentParser(description="Edit Arista ACLs using your local editor")
44 | parser.add_argument("acl", metavar="ACL",
45 | help="Name of the access list to modify")
46 | parser.add_argument("switches", metavar="SWITCH", nargs="+",
47 | help="Hostname or IP of the switch to query")
48 | parser.add_argument("--username", help="Name of the user to connect as",
49 | default="admin")
50 | parser.add_argument("--password", help="The user's password")
51 | parser.add_argument("--https", help="Use HTTPS instead of HTTP",
52 | action="store_const", const="https", default="http")
53 | args = parser.parse_args()
54 |
55 | aclName = args.acl
56 | tmpfile = "/tmp/AclEditor-%s" % aclName
57 | apiEndpoints = getEndpoints(args.switches, args.https,
58 | args.username, args.password)
59 | prepopulateAclFile(tmpfile, aclName, apiEndpoints)
60 | edits = getEdits(tmpfile)
61 | applyChanges(aclName, apiEndpoints, edits)
62 | print
63 | print "Done!"
64 |
65 | def getEndpoints(switchHostnames, protocol, username, password):
66 | """ Check that each server is up, and return a mapping from
67 | hostname to jsonrpclib.Server """
68 | apiEndpoints = {} # mapping from hostname to the API endpoint
69 | for switch in switchHostnames:
70 | url = "{protocol}://{user}:{pw}@{hostname}/command-api".format(
71 | protocol=protocol, user=username, pw=password, hostname=switch)
72 | server = jsonrpclib.Server(url)
73 | try:
74 | # We should at least be able to 'enable'
75 | server.runCmds(1, ["enable"])
76 | except Exception as e:
77 | print "Unable to run 'enable' on switch", e
78 | sys.exit(1)
79 | apiEndpoints[switch] = server
80 | return apiEndpoints
81 |
82 | def prepopulateAclFile(filename, aclName, apiEndpoints):
83 | """ Given a jsonrpclib.Server called 'switch', prepopulate
84 | 'filename' with the ACL contents. If the ACL does not yet exist,
85 | just print a message """
86 |
87 | # Currently assume all switches have the same config, so just use a
88 | # random one as the sample.
89 | apiEndpoint = apiEndpoints.itervalues().next()
90 | responseList = apiEndpoint.runCmds(1, ["enable",
91 | "show ip access-lists %s" % aclName])
92 | response = responseList[1] # Only care about the ACL output.
93 | if not response["aclList"]:
94 | print "No existing access list named", aclName, "- creating new ACL"
95 | else:
96 | # Prepopulate the file with the existing config
97 | print "Editing existing access list:"
98 | with open(filename, "w") as f:
99 | for rule in response["aclList"][0]["sequence"]:
100 | line = str(rule["sequenceNumber"]) + " " + rule["text"] + "\n"
101 | print " ", line,
102 | f.write(line)
103 | print
104 |
105 | def getEdits(filename):
106 | """ Opens an editor for the user to edit the ACL, and returns a
107 | list of the new ACL contents """
108 | editor = os.environ.get("EDITOR", "vi") # default editor is "vi"
109 | ret = subprocess.Popen([editor, filename]).wait()
110 | if ret != 0:
111 | print "Bad editor exit. Aborting."
112 | sys.exit(1)
113 | # Read in the file as a list of lines
114 | aclContents = open(filename, "r").readlines()
115 | print "New access list:"
116 | print " ", " ".join(aclContents)
117 | print
118 | return aclContents
119 |
120 | def applyChanges(aclName, apiEndpoints, aclRules):
121 | """ Given the switch mapping and a list of the new ACL rules, apply
122 | the ACL to each switch """
123 | cmdList = ["enable",
124 | "configure",
125 | # Not the most efficient way to clear an ACL:
126 | "no ip access-list %s" % aclName,
127 | # Now enter configuration mode for the ACL:
128 | "ip access-list %s" % aclName]
129 | cmdList = cmdList + aclRules + ["exit"]
130 |
131 | for hostname, apiEndpoint in apiEndpoints.iteritems():
132 | print "Updating access list on switch", hostname, "....",
133 | try:
134 | apiEndpoint.runCmds(1, cmdList)
135 | except jsonrpclib.ProtocolError as e:
136 | print "[ERROR]"
137 | print " ", e
138 | # jsonrpclib isn't very friendly at getting the error data as
139 | # specified by the spec. This is a shortcut for getting the
140 | # last error:
141 | errorResponse = jsonrpclib.loads(jsonrpclib.history.response)
142 | print " Details:", errorResponse["error"]["data"][-1]["errors"]
143 | else:
144 | print "[SUCCESS]"
145 |
146 | if __name__ == "__main__":
147 | main()
148 |
149 |
--------------------------------------------------------------------------------
/Chapter13/chapter13_switch_1.py:
--------------------------------------------------------------------------------
1 | # Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12 | # implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 |
16 | from ryu.base import app_manager
17 | from ryu.controller import ofp_event
18 | from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
19 | from ryu.controller.handler import set_ev_cls
20 | from ryu.ofproto import ofproto_v1_3
21 | from ryu.lib.packet import packet
22 | from ryu.lib.packet import ethernet
23 |
24 | # new import
25 | from ryu.controller import dpset
26 | from ryu.controller.handler import HANDSHAKE_DISPATCHER
27 | import random
28 |
29 |
30 | class SimpleSwitch13(app_manager.RyuApp):
31 | OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
32 |
33 | def __init__(self, *args, **kwargs):
34 | super(SimpleSwitch13, self).__init__(*args, **kwargs)
35 | self.mac_to_port = {}
36 | self.gen_id = 0
37 | self.role_string_list = ['nochange', 'equal', 'master', 'slave', 'unknown']
38 |
39 |
40 | @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
41 | def switch_features_handler(self, ev):
42 | datapath = ev.msg.datapath
43 | ofproto = datapath.ofproto
44 | parser = datapath.ofproto_parser
45 |
46 | # install table-miss flow entry
47 | #
48 | # We specify NO BUFFER to max_len of the output action due to
49 | # OVS bug. At this moment, if we specify a lesser number, e.g.,
50 | # 128, OVS will send Packet-In with invalid buffer_id and
51 | # truncated packet data. In that case, we cannot output packets
52 | # correctly. The bug has been fixed in OVS v2.1.0.
53 | match = parser.OFPMatch()
54 | actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
55 | ofproto.OFPCML_NO_BUFFER)]
56 | self.add_flow(datapath, 0, match, actions)
57 |
58 | def add_flow(self, datapath, priority, match, actions, buffer_id=None):
59 | ofproto = datapath.ofproto
60 | parser = datapath.ofproto_parser
61 |
62 | inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
63 | actions)]
64 | if buffer_id:
65 | mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
66 | priority=priority, match=match,
67 | instructions=inst)
68 | else:
69 | mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
70 | match=match, instructions=inst)
71 | datapath.send_msg(mod)
72 |
73 |
74 | @set_ev_cls(ofp_event.EventOFPErrorMsg,
75 | [HANDSHAKE_DISPATCHER, CONFIG_DISPATCHER, MAIN_DISPATCHER])
76 | def on_error_msg(self, ev):
77 | msg = ev.msg
78 | print 'receive a error message: %s' % (msg)
79 |
80 |
81 | @set_ev_cls(dpset.EventDP, MAIN_DISPATCHER)
82 | def on_dp_change(self, ev):
83 | if ev.enter:
84 | dp = ev.dp
85 | dpid = dp.id
86 | ofp = dp.ofproto
87 | ofp_parser = dp.ofproto_parser
88 |
89 | print 'dp entered, id is %s' % (dpid)
90 | self.send_role_request(dp, ofp.OFPCR_ROLE_MASTER, self.gen_id)
91 |
92 |
93 | @set_ev_cls(ofp_event.EventOFPRoleReply, MAIN_DISPATCHER)
94 | def on_role_reply(self, ev):
95 | msg = ev.msg
96 | dp = msg.datapath
97 | ofp = dp.ofproto
98 | role = msg.role
99 |
100 | # unknown role
101 | if role < 0 or role > 3:
102 | role = 4
103 | print ''
104 | print 'get a role reply: %s, generation: %d' % (self.role_string_list[role], msg.generation_id)
105 |
106 |
107 | def send_role_request(self, datapath, role, gen_id):
108 | ofp_parser = datapath.ofproto_parser
109 | print 'send a role change request'
110 | print 'role: %s, gen_id: %d' % (self.role_string_list[role], gen_id)
111 | msg = ofp_parser.OFPRoleRequest(datapath, role, gen_id)
112 | datapath.send_msg(msg)
113 |
114 | @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
115 | def _packet_in_handler(self, ev):
116 | # If you hit this you might want to increase
117 | # the "miss_send_length" of your switch
118 | if ev.msg.msg_len < ev.msg.total_len:
119 | self.logger.debug("packet truncated: only %s of %s bytes",
120 | ev.msg.msg_len, ev.msg.total_len)
121 | msg = ev.msg
122 | datapath = msg.datapath
123 | ofproto = datapath.ofproto
124 | parser = datapath.ofproto_parser
125 | in_port = msg.match['in_port']
126 |
127 | pkt = packet.Packet(msg.data)
128 | eth = pkt.get_protocols(ethernet.ethernet)[0]
129 |
130 | dst = eth.dst
131 | src = eth.src
132 |
133 | dpid = datapath.id
134 | self.mac_to_port.setdefault(dpid, {})
135 |
136 | self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port)
137 |
138 | # learn a mac address to avoid FLOOD next time.
139 | self.mac_to_port[dpid][src] = in_port
140 |
141 | if dst in self.mac_to_port[dpid]:
142 | out_port = self.mac_to_port[dpid][dst]
143 | else:
144 | out_port = ofproto.OFPP_FLOOD
145 |
146 | actions = [parser.OFPActionOutput(out_port)]
147 |
148 | # install a flow to avoid packet_in next time
149 | if out_port != ofproto.OFPP_FLOOD:
150 | match = parser.OFPMatch(in_port=in_port, eth_dst=dst)
151 | # verify if we have a valid buffer_id, if yes avoid to send both
152 | # flow_mod & packet_out
153 | if msg.buffer_id != ofproto.OFP_NO_BUFFER:
154 | self.add_flow(datapath, 1, match, actions, msg.buffer_id)
155 | return
156 | else:
157 | self.add_flow(datapath, 1, match, actions)
158 | data = None
159 | if msg.buffer_id == ofproto.OFP_NO_BUFFER:
160 | data = msg.data
161 |
162 | out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
163 | in_port=in_port, actions=actions, data=data)
164 | datapath.send_msg(out)
165 |
--------------------------------------------------------------------------------
/Chapter13/chapter13_switch_2.py:
--------------------------------------------------------------------------------
1 | # Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12 | # implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 |
16 | # OFPRoleRequest code referenced from
17 | # https://github.com/TakeshiTseng/SDN-Work/blob/master/MultiControl/ModStatusApp.py
18 |
19 | from ryu.base import app_manager
20 | from ryu.controller import ofp_event
21 | from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
22 | from ryu.controller.handler import set_ev_cls
23 | from ryu.ofproto import ofproto_v1_3
24 | from ryu.lib.packet import packet
25 | from ryu.lib.packet import ethernet
26 |
27 | # new import
28 | from ryu.controller import dpset
29 | from ryu.controller.handler import HANDSHAKE_DISPATCHER
30 | import random
31 |
32 |
33 | class SimpleSwitch13(app_manager.RyuApp):
34 | OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
35 |
36 | def __init__(self, *args, **kwargs):
37 | super(SimpleSwitch13, self).__init__(*args, **kwargs)
38 | self.mac_to_port = {}
39 | self.gen_id = 0
40 | self.role_string_list = ['nochange', 'equal', 'master', 'slave', 'unknown']
41 |
42 | @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
43 | def switch_features_handler(self, ev):
44 | datapath = ev.msg.datapath
45 | ofproto = datapath.ofproto
46 | parser = datapath.ofproto_parser
47 |
48 |
49 | # install table-miss flow entry
50 | #
51 | # We specify NO BUFFER to max_len of the output action due to
52 | # OVS bug. At this moment, if we specify a lesser number, e.g.,
53 | # 128, OVS will send Packet-In with invalid buffer_id and
54 | # truncated packet data. In that case, we cannot output packets
55 | # correctly. The bug has been fixed in OVS v2.1.0.
56 | match = parser.OFPMatch()
57 | actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
58 | ofproto.OFPCML_NO_BUFFER)]
59 | self.add_flow(datapath, 0, match, actions)
60 |
61 |
62 | def add_flow(self, datapath, priority, match, actions, buffer_id=None):
63 | ofproto = datapath.ofproto
64 | parser = datapath.ofproto_parser
65 |
66 | inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
67 | actions)]
68 | if buffer_id:
69 | mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
70 | priority=priority, match=match,
71 | instructions=inst)
72 | else:
73 | mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
74 | match=match, instructions=inst)
75 | datapath.send_msg(mod)
76 |
77 |
78 | @set_ev_cls(ofp_event.EventOFPErrorMsg,
79 | [HANDSHAKE_DISPATCHER, CONFIG_DISPATCHER, MAIN_DISPATCHER])
80 | def on_error_msg(self, ev):
81 | msg = ev.msg
82 | print 'receive a error message: %s' % (msg)
83 |
84 |
85 | @set_ev_cls(dpset.EventDP, MAIN_DISPATCHER)
86 | def on_dp_change(self, ev):
87 |
88 | if ev.enter:
89 | dp = ev.dp
90 | dpid = dp.id
91 | ofp = dp.ofproto
92 | ofp_parser = dp.ofproto_parser
93 |
94 | print 'dp entered, id is %s' % (dpid)
95 | self.send_role_request(dp, ofp.OFPCR_ROLE_SLAVE, self.gen_id)
96 |
97 | @set_ev_cls(ofp_event.EventOFPRoleReply, MAIN_DISPATCHER)
98 | def on_role_reply(self, ev):
99 | msg = ev.msg
100 | dp = msg.datapath
101 | ofp = dp.ofproto
102 | role = msg.role
103 |
104 | # unknown role
105 | if role < 0 or role > 3:
106 | role = 4
107 | print ''
108 | print 'get a role reply: %s, generation: %d' % (self.role_string_list[role], msg.generation_id)
109 |
110 |
111 | def send_role_request(self, datapath, role, gen_id):
112 | ofp_parser = datapath.ofproto_parser
113 | print 'send a role change request'
114 | print 'role: %s, gen_id: %d' % (self.role_string_list[role], gen_id)
115 | msg = ofp_parser.OFPRoleRequest(datapath, role, gen_id)
116 | datapath.send_msg(msg)
117 |
118 | @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
119 | def _packet_in_handler(self, ev):
120 | # If you hit this you might want to increase
121 | # the "miss_send_length" of your switch
122 | if ev.msg.msg_len < ev.msg.total_len:
123 | self.logger.debug("packet truncated: only %s of %s bytes",
124 | ev.msg.msg_len, ev.msg.total_len)
125 | msg = ev.msg
126 | datapath = msg.datapath
127 | ofproto = datapath.ofproto
128 | parser = datapath.ofproto_parser
129 | in_port = msg.match['in_port']
130 |
131 | pkt = packet.Packet(msg.data)
132 | eth = pkt.get_protocols(ethernet.ethernet)[0]
133 |
134 | dst = eth.dst
135 | src = eth.src
136 |
137 | dpid = datapath.id
138 | self.mac_to_port.setdefault(dpid, {})
139 |
140 | self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port)
141 |
142 | # learn a mac address to avoid FLOOD next time.
143 | self.mac_to_port[dpid][src] = in_port
144 |
145 | if dst in self.mac_to_port[dpid]:
146 | out_port = self.mac_to_port[dpid][dst]
147 | else:
148 | out_port = ofproto.OFPP_FLOOD
149 |
150 | actions = [parser.OFPActionOutput(out_port)]
151 |
152 | # install a flow to avoid packet_in next time
153 | if out_port != ofproto.OFPP_FLOOD:
154 | match = parser.OFPMatch(in_port=in_port, eth_dst=dst)
155 | # verify if we have a valid buffer_id, if yes avoid to send both
156 | # flow_mod & packet_out
157 | if msg.buffer_id != ofproto.OFP_NO_BUFFER:
158 | self.add_flow(datapath, 1, match, actions, msg.buffer_id)
159 | return
160 | else:
161 | self.add_flow(datapath, 1, match, actions)
162 | data = None
163 | if msg.buffer_id == ofproto.OFP_NO_BUFFER:
164 | data = msg.data
165 |
166 | out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
167 | in_port=in_port, actions=actions, data=data)
168 | datapath.send_msg(out)
169 |
--------------------------------------------------------------------------------
/Chapter09/chapter9_8.py:
--------------------------------------------------------------------------------
1 | # This example referenced Miguel Grinberg's code on Github:
2 | # https://github.com/miguelgrinberg/oreilly-flask-apis-video/blob/master/camera/camera.py
3 | #
4 |
5 | from flask import Flask, url_for, jsonify, request,\
6 | make_response, copy_current_request_context
7 | from flask.ext.sqlalchemy import SQLAlchemy
8 | from chapter9_pexpect_1 import show_version
9 | import uuid
10 | import functools
11 | from threading import Thread
12 |
13 | app = Flask(__name__)
14 | app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///network.db'
15 | db = SQLAlchemy(app)
16 |
17 | background_tasks = {}
18 | app.config['AUTO_DELETE_BG_TASKS'] = True
19 |
20 |
21 | class ValidationError(ValueError):
22 | pass
23 |
24 |
25 | class Device(db.Model):
26 | __tablename__ = 'devices'
27 | id = db.Column(db.Integer, primary_key=True)
28 | hostname = db.Column(db.String(64), unique=True)
29 | loopback = db.Column(db.String(120), unique=True)
30 | mgmt_ip = db.Column(db.String(120), unique=True)
31 | role = db.Column(db.String(64))
32 | vendor = db.Column(db.String(64))
33 | os = db.Column(db.String(64))
34 |
35 | def get_url(self):
36 | return url_for('get_device', id=self.id, _external=True)
37 |
38 | def export_data(self):
39 | return {
40 | 'self_url': self.get_url(),
41 | 'hostname': self.hostname,
42 | 'loopback': self.loopback,
43 | 'mgmt_ip': self.mgmt_ip,
44 | 'role': self.role,
45 | 'vendor': self.vendor,
46 | 'os': self.os
47 | }
48 |
49 | def import_data(self, data):
50 | try:
51 | self.hostname = data['hostname']
52 | self.loopback = data['loopback']
53 | self.mgmt_ip = data['mgmt_ip']
54 | self.role = data['role']
55 | self.vendor = data['vendor']
56 | self.os = data['os']
57 | except KeyError as e:
58 | raise ValidationError('Invalid device: missing ' + e.args[0])
59 | return self
60 |
61 |
62 | def background(f):
63 | """Decorator that runs the wrapped function as a background task. It is
64 | assumed that this function creates a new resource, and takes a long time
65 | to do so. The response has status code 202 Accepted and includes a Location
66 | header with the URL of a task resource. Sending a GET request to the task
67 | will continue to return 202 for as long as the task is running. When the task
68 | has finished, a status code 303 See Other will be returned, along with a
69 | Location header that points to the newly created resource. The client then
70 | needs to send a DELETE request to the task resource to remove it from the
71 | system."""
72 | @functools.wraps(f)
73 | def wrapped(*args, **kwargs):
74 | # The background task needs to be decorated with Flask's
75 | # copy_current_request_context to have access to context globals.
76 | @copy_current_request_context
77 | def task():
78 | global background_tasks
79 | try:
80 | # invoke the wrapped function and record the returned
81 | # response in the background_tasks dictionary
82 | background_tasks[id] = make_response(f(*args, **kwargs))
83 | except:
84 | # the wrapped function raised an exception, return a 500
85 | # response
86 | background_tasks[id] = make_response(internal_server_error())
87 |
88 | # store the background task under a randomly generated identifier
89 | # and start it
90 | global background_tasks
91 | id = uuid.uuid4().hex
92 | background_tasks[id] = Thread(target=task)
93 | background_tasks[id].start()
94 |
95 | # return a 202 Accepted response with the location of the task status
96 | # resource
97 | return jsonify({}), 202, {'Location': url_for('get_task_status', id=id)}
98 | return wrapped
99 |
100 |
101 | @app.route('/devices/', methods=['GET'])
102 | def get_devices():
103 | return jsonify({'device': [device.get_url()
104 | for device in Device.query.all()]})
105 |
106 | @app.route('/devices/', methods=['GET'])
107 | def get_device(id):
108 | return jsonify(Device.query.get_or_404(id).export_data())
109 |
110 |
111 | @app.route('/devices//version', methods=['GET'])
112 | @background
113 | def get_device_version(id):
114 | device = Device.query.get_or_404(id)
115 | hostname = device.hostname
116 | ip = device.mgmt_ip
117 | prompt = hostname+"#"
118 | result = show_version(hostname, prompt, ip, 'cisco', 'cisco')
119 | return jsonify({"version": str(result)})
120 |
121 | @app.route('/devices//version', methods=['GET'])
122 | @background
123 | def get_role_version(device_role):
124 | device_id_list = [device.id for device in Device.query.all() if device.role == device_role]
125 | result = {}
126 | for id in device_id_list:
127 | device = Device.query.get_or_404(id)
128 | hostname = device.hostname
129 | ip = device.mgmt_ip
130 | prompt = hostname + "#"
131 | device_result = show_version(hostname, prompt, ip, 'cisco', 'cisco')
132 | result[hostname] = str(device_result)
133 | return jsonify(result)
134 |
135 | @app.route('/devices/', methods=['POST'])
136 | def new_device():
137 | device = Device()
138 | device.import_data(request.json)
139 | db.session.add(device)
140 | db.session.commit()
141 | return jsonify({}), 201, {'Location': device.get_url()}
142 |
143 | @app.route('/devices/', methods=['PUT'])
144 | def edit_device(id):
145 | device = Device.query.get_or_404(id)
146 | device.import_data(request.json)
147 | db.session.add(device)
148 | db.session.commit()
149 | return jsonify({})
150 |
151 |
152 | @app.route('/status/', methods=['GET'])
153 | def get_task_status(id):
154 | """Query the status of an asynchronous task."""
155 | # obtain the task and validate it
156 | global background_tasks
157 | rv = background_tasks.get(id)
158 | if rv is None:
159 | return not_found(None)
160 |
161 | # if the task object is a Thread object that means that the task is still
162 | # running. In this case return the 202 status message again.
163 | if isinstance(rv, Thread):
164 | return jsonify({}), 202, {'Location': url_for('get_task_status', id=id)}
165 |
166 | # If the task object is not a Thread then it is assumed to be the response
167 | # of the finished task, so that is the response that is returned.
168 | # If the application is configured to auto-delete task status resources once
169 | # the task is done then the deletion happens now, if not the client is
170 | # expected to send a delete request.
171 | if app.config['AUTO_DELETE_BG_TASKS']:
172 | del background_tasks[id]
173 | return rv
174 |
175 |
176 |
177 | if __name__ == '__main__':
178 | db.create_all()
179 | app.run(host='0.0.0.0', debug=True)
180 |
181 |
182 |
183 |
184 |
185 |
--------------------------------------------------------------------------------
/Chapter11/Chapter11_3.py:
--------------------------------------------------------------------------------
1 | from ryu.base import app_manager
2 | from ryu.controller import ofp_event
3 | from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
4 | from ryu.controller.handler import set_ev_cls
5 | from ryu.ofproto import ofproto_v1_3
6 | from ryu.lib.packet import packet
7 | from ryu.lib.packet import ethernet
8 | from ryu.lib.packet import ether_types
9 |
10 | #new import
11 | from ryu.ofproto import ether
12 | from ryu.lib.packet import ipv4, arp
13 |
14 |
15 | class MySimpleStaticRouter(app_manager.RyuApp):
16 | OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
17 |
18 | def __init__(self, *args, **kwargs):
19 | super(MySimpleStaticRouter, self).__init__(*args, **kwargs)
20 | self.s1_gateway_mac = '00:00:00:00:00:02' # s1 gateway is spoofing h2
21 | self.s2_gateway_mac = '00:00:00:00:00:01' # s2 gateway is spoofing h1
22 |
23 | @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
24 | def switch_features_handler(self, ev):
25 | datapath = ev.msg.datapath
26 | ofproto = datapath.ofproto
27 | parser = datapath.ofproto_parser
28 |
29 | # install table-miss flow entry
30 | match = parser.OFPMatch()
31 | actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
32 | ofproto.OFPCML_NO_BUFFER)]
33 | self.add_flow(datapath, 0, match, actions)
34 |
35 | # push out static flows, note this is priority 1
36 | if datapath.id == 1:
37 | # flow from h1 to h2
38 | match = parser.OFPMatch(in_port=1,
39 | eth_type=ether.ETH_TYPE_IP,
40 | ipv4_src=('192.168.1.0', '255.255.255.0'),
41 | ipv4_dst=('192.168.2.0', '255.255.255.0'))
42 | out_port = 2
43 | actions = [parser.OFPActionOutput(out_port)]
44 | self.add_flow(datapath, 1, match, actions)
45 |
46 |
47 | # flow from h2 to h1
48 | match = parser.OFPMatch(in_port=2,
49 | eth_type=ether.ETH_TYPE_IP,
50 | ipv4_src=('192.168.2.0', '255.255.255.0'),
51 | ipv4_dst=('192.168.1.0', '255.255.255.0'))
52 | out_port = 1
53 | actions = [parser.OFPActionOutput(out_port)]
54 | self.add_flow(datapath, 1, match, actions)
55 |
56 | if datapath.id == 2:
57 | # flow from h1 to h2
58 | match = parser.OFPMatch(in_port=2,
59 | eth_type=ether.ETH_TYPE_IP,
60 | ipv4_src=('192.168.1.0', '255.255.255.0'),
61 | ipv4_dst=('192.168.2.0', '255.255.255.0'))
62 | out_port = 1
63 | # Can rewrite dst mac to h2 or spoof like we have done
64 | # parser.OFPActionSetField(eth_dst="00:00:00:00:00:02")]
65 | actions = [parser.OFPActionOutput(out_port)]
66 | self.add_flow(datapath, 1, match, actions)
67 |
68 | # from from h2 to h1
69 | match = parser.OFPMatch(in_port=1,
70 | eth_type=ether.ETH_TYPE_IP,
71 | ipv4_src=('192.168.2.0', '255.255.255.0'),
72 | ipv4_dst=('192.168.1.0', '255.255.255.0'))
73 | out_port = 2
74 | actions = [parser.OFPActionOutput(out_port)]
75 | self.add_flow(datapath, 1, match, actions)
76 |
77 |
78 | def add_flow(self, datapath, priority, match, actions, buffer_id=None):
79 | ofproto = datapath.ofproto
80 | parser = datapath.ofproto_parser
81 |
82 | inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
83 | actions)]
84 |
85 | # Note the addition of idle_timeout and hard_timeout
86 | if buffer_id:
87 | mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
88 | priority=priority, match=match,
89 | instructions=inst, idle_timeout=6000,
90 | hard_timeout=6000)
91 | else:
92 | mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
93 | match=match, instructions=inst,
94 | idle_timeout=6000, hard_timeout=6000)
95 | datapath.send_msg(mod)
96 |
97 |
98 | @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
99 | def _packet_in_handler(self, ev):
100 | msg = ev.msg
101 | datapath = msg.datapath
102 | ofproto = datapath.ofproto
103 | parser = datapath.ofproto_parser
104 | in_port = msg.match['in_port']
105 |
106 |
107 | pkt = packet.Packet(msg.data)
108 | eth = pkt.get_protocols(ethernet.ethernet)[0]
109 |
110 | # Answering ARP packets for packet destined for gateways
111 | if eth.ethertype == ether_types.ETH_TYPE_ARP:
112 | arp_packet = pkt.get_protocols(arp.arp)[0]
113 | ethernet_src = eth.src
114 |
115 | # answering arp for s2 gateway 192.168.2.1
116 | if arp_packet.dst_ip == '192.168.2.1' and datapath.id == 2:
117 | print('Received ARP for 192.168.2.1')
118 |
119 | # building packet
120 | e = ethernet.ethernet(dst=eth.src, src=self.s2_gateway_mac, ethertype=ether.ETH_TYPE_ARP)
121 | a = arp.arp(hwtype=1, proto=0x0800, hlen=6, plen=4, opcode=2,
122 | src_mac=self.s2_gateway_mac, src_ip='192.168.2.1',
123 | dst_mac=ethernet_src, dst_ip=arp_packet.src_ip)
124 |
125 | p = packet.Packet()
126 | p.add_protocol(e)
127 | p.add_protocol(a)
128 | p.serialize()
129 |
130 | # sending arp response for s2 gateway
131 | outPort = in_port
132 | actions = [datapath.ofproto_parser.OFPActionOutput(outPort, 0)]
133 | out = datapath.ofproto_parser.OFPPacketOut(
134 | datapath=datapath,
135 | buffer_id=0xffffffff,
136 | in_port=datapath.ofproto.OFPP_CONTROLLER,
137 | actions=actions,
138 | data=p.data)
139 | datapath.send_msg(out)
140 |
141 | # answring arp for s1 gateway 192.168.1.1
142 | elif arp_packet.dst_ip == '192.168.1.1' and datapath.id == 1:
143 | print('Received ARP for 192.168.1.1')
144 |
145 | # building packet
146 | e = ethernet.ethernet(dst=eth.src, src=self.s1_gateway_mac, ethertype=ether.ETH_TYPE_ARP)
147 | a = arp.arp(hwtype=1, proto=0x0800, hlen=6, plen=4, opcode=2,
148 | src_mac=self.s1_gateway_mac, src_ip='192.168.1.1',
149 | dst_mac=ethernet_src, dst_ip=arp_packet.src_ip)
150 |
151 | p = packet.Packet()
152 | p.add_protocol(e)
153 | p.add_protocol(a)
154 | p.serialize()
155 |
156 | # sending arp response for s1 gateway
157 | outPort = in_port
158 | actions = [datapath.ofproto_parser.OFPActionOutput(outPort, 0)]
159 | out = datapath.ofproto_parser.OFPPacketOut(
160 | datapath=datapath,
161 | buffer_id=0xffffffff,
162 | in_port=datapath.ofproto.OFPP_CONTROLLER,
163 | actions=actions,
164 | data=p.data)
165 | datapath.send_msg(out)
166 |
167 | # verbose iteration of packets
168 | try:
169 | for p in pkt.protocols:
170 | print(p.protocol_name, p)
171 | print("datapath: {} in_port: {}".format(datapath.id, in_port))
172 | except:
173 | pass
174 |
175 |
176 |
--------------------------------------------------------------------------------
/Chapter09/chapter9_9.py:
--------------------------------------------------------------------------------
1 | # This example referenced Miguel Grinberg's code on Github:
2 | # https://github.com/miguelgrinberg/oreilly-flask-apis-video
3 | #
4 |
5 | from flask import Flask, url_for, jsonify, request,\
6 | make_response, copy_current_request_context, g
7 | from flask.ext.sqlalchemy import SQLAlchemy
8 | from chapter9_pexpect_1 import show_version
9 | import uuid
10 | import functools
11 | from threading import Thread
12 | from werkzeug.security import generate_password_hash, check_password_hash
13 | from flask.ext.httpauth import HTTPBasicAuth
14 |
15 | app = Flask(__name__)
16 | app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///network.db'
17 | db = SQLAlchemy(app)
18 | auth = HTTPBasicAuth()
19 |
20 | background_tasks = {}
21 | app.config['AUTO_DELETE_BG_TASKS'] = True
22 |
23 |
24 | class ValidationError(ValueError):
25 | pass
26 |
27 | # The two password function came with Flask Werkzeug
28 | class User(db.Model):
29 | __tablename__ = 'users'
30 | id = db.Column(db.Integer, primary_key=True)
31 | username = db.Column(db.String(64), index=True)
32 | password_hash = db.Column(db.String(128))
33 |
34 | def set_password(self, password):
35 | self.password_hash = generate_password_hash(password)
36 |
37 | def verify_password(self, password):
38 | return check_password_hash(self.password_hash, password)
39 |
40 |
41 | class Device(db.Model):
42 | __tablename__ = 'devices'
43 | id = db.Column(db.Integer, primary_key=True)
44 | hostname = db.Column(db.String(64), unique=True)
45 | loopback = db.Column(db.String(120), unique=True)
46 | mgmt_ip = db.Column(db.String(120), unique=True)
47 | role = db.Column(db.String(64))
48 | vendor = db.Column(db.String(64))
49 | os = db.Column(db.String(64))
50 |
51 | def get_url(self):
52 | return url_for('get_device', id=self.id, _external=True)
53 |
54 | def export_data(self):
55 | return {
56 | 'self_url': self.get_url(),
57 | 'hostname': self.hostname,
58 | 'loopback': self.loopback,
59 | 'mgmt_ip': self.mgmt_ip,
60 | 'role': self.role,
61 | 'vendor': self.vendor,
62 | 'os': self.os
63 | }
64 |
65 | def import_data(self, data):
66 | try:
67 | self.hostname = data['hostname']
68 | self.loopback = data['loopback']
69 | self.mgmt_ip = data['mgmt_ip']
70 | self.role = data['role']
71 | self.vendor = data['vendor']
72 | self.os = data['os']
73 | except KeyError as e:
74 | raise ValidationError('Invalid device: missing ' + e.args[0])
75 | return self
76 |
77 |
78 | def background(f):
79 | """Decorator that runs the wrapped function as a background task. It is
80 | assumed that this function creates a new resource, and takes a long time
81 | to do so. The response has status code 202 Accepted and includes a Location
82 | header with the URL of a task resource. Sending a GET request to the task
83 | will continue to return 202 for as long as the task is running. When the task
84 | has finished, a status code 303 See Other will be returned, along with a
85 | Location header that points to the newly created resource. The client then
86 | needs to send a DELETE request to the task resource to remove it from the
87 | system."""
88 | @functools.wraps(f)
89 | def wrapped(*args, **kwargs):
90 | # The background task needs to be decorated with Flask's
91 | # copy_current_request_context to have access to context globals.
92 | @copy_current_request_context
93 | def task():
94 | global background_tasks
95 | try:
96 | # invoke the wrapped function and record the returned
97 | # response in the background_tasks dictionary
98 | background_tasks[id] = make_response(f(*args, **kwargs))
99 | except:
100 | # the wrapped function raised an exception, return a 500
101 | # response
102 | background_tasks[id] = make_response(internal_server_error())
103 |
104 | # store the background task under a randomly generated identifier
105 | # and start it
106 | global background_tasks
107 | id = uuid.uuid4().hex
108 | background_tasks[id] = Thread(target=task)
109 | background_tasks[id].start()
110 |
111 | # return a 202 Accepted response with the location of the task status
112 | # resource
113 | return jsonify({}), 202, {'Location': url_for('get_task_status', id=id)}
114 | return wrapped
115 |
116 | # g is the context request object from Flask
117 | @auth.verify_password
118 | def verify_password(username, password):
119 | g.user = User.query.filter_by(username=username).first()
120 | if g.user is None:
121 | return False
122 | return g.user.verify_password(password)
123 |
124 | @app.before_request
125 | @auth.login_required
126 | def before_request():
127 | pass
128 |
129 | # from HTTPAuath extension
130 | @auth.error_handler
131 | def unathorized():
132 | response = jsonify({'status': 401, 'error': 'unahtorized',
133 | 'message': 'please authenticate'})
134 | response.status_code = 401
135 | return response
136 |
137 |
138 | @app.route('/devices/', methods=['GET'])
139 | def get_devices():
140 | return jsonify({'device': [device.get_url()
141 | for device in Device.query.all()]})
142 |
143 | @app.route('/devices/', methods=['GET'])
144 | def get_device(id):
145 | return jsonify(Device.query.get_or_404(id).export_data())
146 |
147 |
148 | @app.route('/devices//version', methods=['GET'])
149 | @background
150 | def get_device_version(id):
151 | device = Device.query.get_or_404(id)
152 | hostname = device.hostname
153 | ip = device.mgmt_ip
154 | prompt = hostname+"#"
155 | result = show_version(hostname, prompt, ip, 'cisco', 'cisco')
156 | return jsonify({"version": str(result)})
157 |
158 | @app.route('/devices//version', methods=['GET'])
159 | @background
160 | def get_role_version(device_role):
161 | device_id_list = [device.id for device in Device.query.all() if device.role == device_role]
162 | result = {}
163 | for id in device_id_list:
164 | device = Device.query.get_or_404(id)
165 | hostname = device.hostname
166 | ip = device.mgmt_ip
167 | prompt = hostname + "#"
168 | device_result = show_version(hostname, prompt, ip, 'cisco', 'cisco')
169 | result[hostname] = str(device_result)
170 | return jsonify(result)
171 |
172 | @app.route('/devices/', methods=['POST'])
173 | def new_device():
174 | device = Device()
175 | device.import_data(request.json)
176 | db.session.add(device)
177 | db.session.commit()
178 | return jsonify({}), 201, {'Location': device.get_url()}
179 |
180 | @app.route('/devices/', methods=['PUT'])
181 | def edit_device(id):
182 | device = Device.query.get_or_404(id)
183 | device.import_data(request.json)
184 | db.session.add(device)
185 | db.session.commit()
186 | return jsonify({})
187 |
188 |
189 | @app.route('/status/', methods=['GET'])
190 | def get_task_status(id):
191 | """Query the status of an asynchronous task."""
192 | # obtain the task and validate it
193 | global background_tasks
194 | rv = background_tasks.get(id)
195 | if rv is None:
196 | return not_found(None)
197 |
198 | # if the task object is a Thread object that means that the task is still
199 | # running. In this case return the 202 status message again.
200 | if isinstance(rv, Thread):
201 | return jsonify({}), 202, {'Location': url_for('get_task_status', id=id)}
202 |
203 | # If the task object is not a Thread then it is assumed to be the response
204 | # of the finished task, so that is the response that is returned.
205 | # If the application is configured to auto-delete task status resources once
206 | # the task is done then the deletion happens now, if not the client is
207 | # expected to send a delete request.
208 | if app.config['AUTO_DELETE_BG_TASKS']:
209 | del background_tasks[id]
210 | return rv
211 |
212 |
213 |
214 | if __name__ == '__main__':
215 | db.create_all()
216 | app.run(host='0.0.0.0', debug=True)
217 |
218 |
219 |
220 |
221 |
222 |
--------------------------------------------------------------------------------
/Chapter07/results.txt:
--------------------------------------------------------------------------------
1 | {'Gig0-0_In_Octet': '3990616', 'Gig0-0_Out_uPackets': '60077', 'Gig0-0_In_uPackets': '42229', 'Gig0-0_Out_Octet': '5228254', 'Time': '2017-03-06T02:34:02.146245', 'hostname': 'iosv-1.virl.info'}
2 | {'Gig0-0_Out_uPackets': '60095', 'hostname': 'iosv-1.virl.info', 'Gig0-0_Out_Octet': '5229721', 'Time': '2017-03-06T02:35:02.072340', 'Gig0-0_In_Octet': '3991754', 'Gig0-0_In_uPackets': '42242'}
3 | {'hostname': 'iosv-1.virl.info', 'Gig0-0_Out_Octet': '5231484', 'Gig0-0_In_Octet': '3993129', 'Time': '2017-03-06T02:36:02.753134', 'Gig0-0_In_uPackets': '42257', 'Gig0-0_Out_uPackets': '60116'}
4 | {'Gig0-0_In_Octet': '3994504', 'Time': '2017-03-06T02:37:02.146894', 'Gig0-0_In_uPackets': '42272', 'Gig0-0_Out_uPackets': '60136', 'Gig0-0_Out_Octet': '5233187', 'hostname': 'iosv-1.virl.info'}
5 | {'Gig0-0_In_uPackets': '42284', 'Time': '2017-03-06T02:38:01.915432', 'Gig0-0_In_Octet': '3995585', 'Gig0-0_Out_Octet': '5234656', 'Gig0-0_Out_uPackets': '60154', 'hostname': 'iosv-1.virl.info'}
6 | {'Gig0-0_Out_Octet': '5236419', 'Time': '2017-03-06T02:39:01.646927', 'hostname': 'iosv-1.virl.info', 'Gig0-0_Out_uPackets': '60175', 'Gig0-0_In_Octet': '3996960', 'Gig0-0_In_uPackets': '42299'}
7 | {'Gig0-0_In_uPackets': '42311', 'hostname': 'iosv-1.virl.info', 'Time': '2017-03-06T02:40:02.456579', 'Gig0-0_Out_uPackets': '60193', 'Gig0-0_In_Octet': '3998041', 'Gig0-0_Out_Octet': '5237888'}
8 | {'hostname': 'iosv-1.virl.info', 'Gig0-0_In_Octet': '3999414', 'Gig0-0_In_uPackets': '42326', 'Gig0-0_Out_uPackets': '60215', 'Time': '2017-03-06T02:41:02.294267', 'Gig0-0_Out_Octet': '5239725'}
9 | {'Gig0-0_Out_Octet': '5241486', 'Gig0-0_Out_uPackets': '60236', 'Gig0-0_In_uPackets': '42341', 'Time': '2017-03-06T02:42:01.966146', 'Gig0-0_In_Octet': '4000786', 'hostname': 'iosv-1.virl.info'}
10 | {'Time': '2017-03-06T02:43:01.731416', 'Gig0-0_Out_uPackets': '60254', 'Gig0-0_Out_Octet': '5242952', 'Gig0-0_In_Octet': '4001865', 'Gig0-0_In_uPackets': '42353', 'hostname': 'iosv-1.virl.info'}
11 | {'Gig0-0_Out_Octet': '5244653', 'Gig0-0_Out_uPackets': '60274', 'Gig0-0_In_uPackets': '42368', 'hostname': 'iosv-1.virl.info', 'Time': '2017-03-06T02:44:02.521641', 'Gig0-0_In_Octet': '4003237'}
12 | {'Gig0-0_Out_Octet': '5246413', 'Gig0-0_Out_uPackets': '60295', 'Time': '2017-03-06T02:45:02.228500', 'hostname': 'iosv-1.virl.info', 'Gig0-0_In_Octet': '4004610', 'Gig0-0_In_uPackets': '42383'}
13 | {'Gig0-0_Out_uPackets': '60313', 'Gig0-0_Out_Octet': '5247880', 'Gig0-0_In_uPackets': '42395', 'hostname': 'iosv-1.virl.info', 'Time': '2017-03-06T02:46:01.960311', 'Gig0-0_In_Octet': '4005688'}
14 | {'Gig0-0_Out_uPackets': '60334', 'Gig0-0_Out_Octet': '5249643', 'Gig0-0_In_uPackets': '42410', 'Gig0-0_In_Octet': '4007063', 'hostname': 'iosv-1.virl.info', 'Time': '2017-03-06T02:47:01.861698'}
15 | {'hostname': 'iosv-1.virl.info', 'Gig0-0_In_uPackets': '42422', 'Gig0-0_Out_Octet': '5251109', 'Gig0-0_In_Octet': '4008142', 'Time': '2017-03-06T02:48:01.756321', 'Gig0-0_Out_uPackets': '60352'}
16 | {'Gig0-0_Out_Octet': '5252807', 'Time': '2017-03-06T02:49:01.902937', 'Gig0-0_In_Octet': '4009512', 'Gig0-0_Out_uPackets': '60372', 'hostname': 'iosv-1.virl.info', 'Gig0-0_In_uPackets': '42437'}
17 | {'Gig0-0_In_Octet': '4010588', 'Time': '2017-03-06T02:50:01.770566', 'Gig0-0_In_uPackets': '42449', 'Gig0-0_Out_uPackets': '60390', 'hostname': 'iosv-1.virl.info', 'Gig0-0_Out_Octet': '5254271'}
18 | {'Gig0-0_In_uPackets': '42464', 'Gig0-0_Out_Octet': '5256106', 'Time': '2017-03-06T02:51:02.005671', 'Gig0-0_In_Octet': '4011958', 'hostname': 'iosv-1.virl.info', 'Gig0-0_Out_uPackets': '60412'}
19 | {'hostname': 'iosv-1.virl.info', 'Gig0-0_In_Octet': '4013328', 'Time': '2017-03-06T02:52:01.809311', 'Gig0-0_Out_uPackets': '60432', 'Gig0-0_In_uPackets': '42479', 'Gig0-0_Out_Octet': '5257804'}
20 | {'Gig0-0_Out_Octet': '5259271', 'Gig0-0_In_uPackets': '42491', 'Time': '2017-03-06T02:53:01.530597', 'hostname': 'iosv-1.virl.info', 'Gig0-0_In_Octet': '4014406', 'Gig0-0_Out_uPackets': '60450'}
21 | {'Gig0-0_Out_uPackets': '60471', 'hostname': 'iosv-1.virl.info', 'Gig0-0_In_uPackets': '42506', 'Time': '2017-03-06T02:54:02.209868', 'Gig0-0_In_Octet': '4015781', 'Gig0-0_Out_Octet': '5261034'}
22 | {'Gig0-0_Out_uPackets': '60489', 'Gig0-0_In_uPackets': '42518', 'hostname': 'iosv-1.virl.info', 'Gig0-0_Out_Octet': '5262503', 'Time': '2017-03-06T02:55:02.205098', 'Gig0-0_In_Octet': '4016862'}
23 | {'hostname': 'iosv-1.virl.info', 'Gig0-0_In_uPackets': '42533', 'Gig0-0_Out_Octet': '5264263', 'Gig0-0_Out_uPackets': '60510', 'Time': '2017-03-06T02:56:02.246366', 'Gig0-0_In_Octet': '4018235'}
24 | {'Gig0-0_Out_Octet': '5266024', 'Time': '2017-03-06T02:57:02.057769', 'Gig0-0_Out_uPackets': '60531', 'Gig0-0_In_Octet': '4019607', 'hostname': 'iosv-1.virl.info', 'Gig0-0_In_uPackets': '42548'}
25 | {'Gig0-0_In_uPackets': '42560', 'Gig0-0_Out_uPackets': '60548', 'Time': '2017-03-06T02:58:01.759841', 'Gig0-0_Out_Octet': '5267430', 'hostname': 'iosv-1.virl.info', 'Gig0-0_In_Octet': '4020686'}
26 | {'Gig0-0_Out_uPackets': '60569', 'Time': '2017-03-06T02:59:01.500577', 'Gig0-0_Out_Octet': '5269191', 'Gig0-0_In_uPackets': '42575', 'hostname': 'iosv-1.virl.info', 'Gig0-0_In_Octet': '4022058'}
27 | {'Gig0-0_Out_uPackets': '60588', 'Gig0-0_In_uPackets': '42587', 'Gig0-0_Out_Octet': '5270737', 'hostname': 'iosv-1.virl.info', 'Gig0-0_In_Octet': '4023139', 'Time': '2017-03-06T03:00:02.368872'}
28 | {'hostname': 'iosv-1.virl.info', 'Gig0-0_In_Octet': '4024512', 'Gig0-0_In_uPackets': '42602', 'Time': '2017-03-06T03:01:02.150037', 'Gig0-0_Out_Octet': '5272497', 'Gig0-0_Out_uPackets': '60609'}
29 | {'Gig0-0_Out_Octet': '5274258', 'Gig0-0_Out_uPackets': '60630', 'Time': '2017-03-06T03:02:01.869521', 'Gig0-0_In_uPackets': '42617', 'hostname': 'iosv-1.virl.info', 'Gig0-0_In_Octet': '4025884'}
30 | {'hostname': 'iosv-1.virl.info', 'Gig0-0_In_Octet': '4026965', 'Gig0-0_Out_Octet': '5275727', 'Gig0-0_In_uPackets': '42629', 'Time': '2017-03-06T03:03:01.570950', 'Gig0-0_Out_uPackets': '60648'}
31 | {'hostname': 'iosv-1.virl.info', 'Gig0-0_In_uPackets': '42644', 'Time': '2017-03-06T03:04:02.255872', 'Gig0-0_Out_uPackets': '60669', 'Gig0-0_Out_Octet': '5277487', 'Gig0-0_In_Octet': '4028338'}
32 | {'hostname': 'iosv-1.virl.info', 'Gig0-0_Out_uPackets': '60690', 'Gig0-0_Out_Octet': '5279248', 'Time': '2017-03-06T03:05:01.962176', 'Gig0-0_In_uPackets': '42659', 'Gig0-0_In_Octet': '4029710'}
33 | {'hostname': 'iosv-1.virl.info', 'Gig0-0_In_Octet': '4030791', 'Gig0-0_Out_Octet': '5280717', 'Time': '2017-03-06T03:06:01.661128', 'Gig0-0_In_uPackets': '42671', 'Gig0-0_Out_uPackets': '60708'}
34 | {'Gig0-0_In_Octet': '4032164', 'Time': '2017-03-06T03:07:02.336401', 'hostname': 'iosv-1.virl.info', 'Gig0-0_Out_uPackets': '60728', 'Gig0-0_Out_Octet': '5282417', 'Gig0-0_In_uPackets': '42686'}
35 | {'Gig0-0_Out_Octet': '5283884', 'Gig0-0_In_Octet': '4033242', 'Gig0-0_In_uPackets': '42698', 'Time': '2017-03-06T03:08:02.144784', 'hostname': 'iosv-1.virl.info', 'Gig0-0_Out_uPackets': '60746'}
36 | {'Gig0-0_Out_Octet': '5285721', 'Time': '2017-03-06T03:09:02.042190', 'hostname': 'iosv-1.virl.info', 'Gig0-0_In_Octet': '4034615', 'Gig0-0_In_uPackets': '42713', 'Gig0-0_Out_uPackets': '60768'}
37 | {'Time': '2017-03-06T03:10:01.723442', 'Gig0-0_Out_uPackets': '60789', 'hostname': 'iosv-1.virl.info', 'Gig0-0_Out_Octet': '5287479', 'Gig0-0_In_Octet': '4035985', 'Gig0-0_In_uPackets': '42728'}
38 | {'Gig0-0_In_uPackets': '42740', 'Time': '2017-03-06T03:11:02.393732', 'Gig0-0_Out_uPackets': '60807', 'Gig0-0_In_Octet': '4037063', 'hostname': 'iosv-1.virl.info', 'Gig0-0_Out_Octet': '5288946'}
39 | {'Gig0-0_Out_uPackets': '60828', 'hostname': 'iosv-1.virl.info', 'Gig0-0_In_uPackets': '42755', 'Gig0-0_In_Octet': '4038438', 'Time': '2017-03-06T03:12:02.123784', 'Gig0-0_Out_Octet': '5290709'}
40 | {'Gig0-0_In_Octet': '4039813', 'Gig0-0_Out_uPackets': '60849', 'hostname': 'iosv-1.virl.info', 'Gig0-0_In_uPackets': '42770', 'Time': '2017-03-06T03:13:01.795363', 'Gig0-0_Out_Octet': '5292472'}
41 | {'Gig0-0_Out_Octet': '5293938', 'Gig0-0_Out_uPackets': '60867', 'Gig0-0_In_uPackets': '42782', 'Gig0-0_In_Octet': '4040892', 'Time': '2017-03-06T03:14:01.476548', 'hostname': 'iosv-1.virl.info'}
42 | {'Time': '2017-03-06T03:15:02.254936', 'hostname': 'iosv-1.virl.info', 'Gig0-0_Out_uPackets': '60888', 'Gig0-0_Out_Octet': '5295699', 'Gig0-0_In_uPackets': '42797', 'Gig0-0_In_Octet': '4042264'}
43 | {'Gig0-0_In_Octet': '4043343', 'Gig0-0_In_uPackets': '42809', 'Gig0-0_Out_uPackets': '60905', 'Time': '2017-03-06T03:16:02.177917', 'Gig0-0_Out_Octet': '5297105', 'hostname': 'iosv-1.virl.info'}
44 | {'Gig0-0_Out_uPackets': '60926', 'hostname': 'iosv-1.virl.info', 'Time': '2017-03-06T03:17:01.881498', 'Gig0-0_In_uPackets': '42824', 'Gig0-0_Out_Octet': '5298863', 'Gig0-0_In_Octet': '4044713'}
45 | {'hostname': 'iosv-1.virl.info', 'Gig0-0_Out_Octet': '5300621', 'Gig0-0_In_Octet': '4046083', 'Time': '2017-03-06T03:18:01.558740', 'Gig0-0_Out_uPackets': '60947', 'Gig0-0_In_uPackets': '42839'}
46 | {'Gig0-0_Out_uPackets': '60965', 'Gig0-0_Out_Octet': '5302088', 'hostname': 'iosv-1.virl.info', 'Gig0-0_In_Octet': '4047161', 'Time': '2017-03-06T03:19:02.253796', 'Gig0-0_In_uPackets': '42851'}
47 | {'Gig0-0_Out_uPackets': '60985', 'Time': '2017-03-06T03:20:01.966435', 'Gig0-0_Out_Octet': '5303791', 'Gig0-0_In_uPackets': '42866', 'Gig0-0_In_Octet': '4048536', 'hostname': 'iosv-1.virl.info'}
48 | {'Gig0-0_In_uPackets': '42878', 'Time': '2017-03-06T03:21:01.639857', 'Gig0-0_Out_uPackets': '61004', 'Gig0-0_Out_Octet': '5305337', 'hostname': 'iosv-1.virl.info', 'Gig0-0_In_Octet': '4049617'}
49 | {'Gig0-0_Out_Octet': '5307097', 'Gig0-0_Out_uPackets': '61025', 'Time': '2017-03-06T03:22:02.552536', 'hostname': 'iosv-1.virl.info', 'Gig0-0_In_Octet': '4050990', 'Gig0-0_In_uPackets': '42893'}
50 | {'Gig0-0_Out_Octet': '5308855', 'hostname': 'iosv-1.virl.info', 'Gig0-0_Out_uPackets': '61046', 'Gig0-0_In_uPackets': '42909', 'Time': '2017-03-06T03:23:02.386113', 'Gig0-0_In_Octet': '4052420'}
51 |
--------------------------------------------------------------------------------
/Chapter11/Chapter11_4.py:
--------------------------------------------------------------------------------
1 | # REST API
2 | #
3 | # Retrieve the switch stats
4 | #
5 | # get the list of all switches
6 | # GET /network/switches
7 | #
8 | # get the description of the switch
9 | # GET /network/desc/
10 | #
11 | # get flows stats of the switch
12 | # GET /network/flow/
13 | #
14 | # add a flow entry
15 | # POST /network/flowentry/add
16 | #
17 | # delete all matching flow entries
18 | # POST /network/flowentry/delete
19 | #
20 |
21 | from ryu.base import app_manager
22 | from ryu.controller import ofp_event
23 | from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
24 | from ryu.controller.handler import set_ev_cls
25 | from ryu.ofproto import ofproto_v1_3
26 | from ryu.lib.packet import packet
27 | from ryu.lib.packet import ethernet
28 | from ryu.lib.packet import ether_types
29 | from ryu.ofproto import ether
30 | from ryu.lib.packet import ipv4, arp
31 |
32 | # new imports
33 | from ryu.app.ofctl_rest import *
34 |
35 |
36 | class MySimpleRestRouter(app_manager.RyuApp):
37 | OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
38 |
39 | # new
40 | _CONTEXTS = {
41 | 'dpset': dpset.DPSet,
42 | 'wsgi': WSGIApplication
43 | }
44 |
45 | def __init__(self, *args, **kwargs):
46 | super(MySimpleRestRouter, self).__init__(*args, **kwargs)
47 | self.s1_gateway_mac = '00:00:00:00:00:02' # s1 gateway is spoofing h2
48 | self.s2_gateway_mac = '00:00:00:00:00:01' # s2 gateway is spoofing h1
49 |
50 | # new
51 | self.dpset = kwargs['dpset']
52 | wsgi = kwargs['wsgi']
53 | self.waiters = {}
54 | self.data = {}
55 | self.data['dpset'] = self.dpset
56 | self.data['waiters'] = self.waiters
57 | mapper = wsgi.mapper
58 |
59 | wsgi.registory['StatsController'] = self.data
60 | path = '/network'
61 |
62 | uri = path + '/switches'
63 | mapper.connect('stats', uri,
64 | controller=StatsController, action='get_dpids',
65 | conditions=dict(method=['GET']))
66 |
67 | uri = path + '/desc/{dpid}'
68 | mapper.connect('stats', uri,
69 | controller=StatsController, action='get_desc_stats',
70 | conditions=dict(method=['GET']))
71 |
72 | uri = path + '/flow/{dpid}'
73 | mapper.connect('stats', uri,
74 | controller=StatsController, action='get_flow_stats',
75 | conditions=dict(method=['GET']))
76 |
77 | uri = path + '/flowentry/{cmd}'
78 | mapper.connect('stats', uri,
79 | controller=StatsController, action='mod_flow_entry',
80 | conditions=dict(method=['POST']))
81 |
82 |
83 | @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
84 | def switch_features_handler(self, ev):
85 | datapath = ev.msg.datapath
86 | ofproto = datapath.ofproto
87 | parser = datapath.ofproto_parser
88 |
89 | # install table-miss flow entry
90 | match = parser.OFPMatch()
91 | actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
92 | ofproto.OFPCML_NO_BUFFER)]
93 | self.add_flow(datapath, 0, match, actions)
94 |
95 |
96 | def add_flow(self, datapath, priority, match, actions, buffer_id=None):
97 | ofproto = datapath.ofproto
98 | parser = datapath.ofproto_parser
99 |
100 | inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
101 | actions)]
102 |
103 | # Note the addition of idle_timeout and hard_timeout
104 | if buffer_id:
105 | mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
106 | priority=priority, match=match,
107 | instructions=inst)
108 | else:
109 | mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
110 | match=match, instructions=inst)
111 | datapath.send_msg(mod)
112 |
113 |
114 | @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
115 | def _packet_in_handler(self, ev):
116 | msg = ev.msg
117 | datapath = msg.datapath
118 | ofproto = datapath.ofproto
119 | parser = datapath.ofproto_parser
120 | in_port = msg.match['in_port']
121 |
122 |
123 | pkt = packet.Packet(msg.data)
124 | eth = pkt.get_protocols(ethernet.ethernet)[0]
125 |
126 | # Answering ARP packets for packet destined for gateways
127 | if eth.ethertype == ether_types.ETH_TYPE_ARP:
128 | arp_packet = pkt.get_protocols(arp.arp)[0]
129 | ethernet_src = eth.src
130 |
131 | # answering arp for s2 gateway 192.168.2.1
132 | if arp_packet.dst_ip == '192.168.2.1' and datapath.id == 2:
133 | print('Received ARP for 192.168.2.1')
134 |
135 | # building packet
136 | e = ethernet.ethernet(dst=eth.src, src=self.s2_gateway_mac, ethertype=ether.ETH_TYPE_ARP)
137 | a = arp.arp(hwtype=1, proto=0x0800, hlen=6, plen=4, opcode=2,
138 | src_mac=self.s2_gateway_mac, src_ip='192.168.2.1',
139 | dst_mac=ethernet_src, dst_ip=arp_packet.src_ip)
140 |
141 | p = packet.Packet()
142 | p.add_protocol(e)
143 | p.add_protocol(a)
144 | p.serialize()
145 |
146 | # sending arp response for s2 gateway
147 | outPort = in_port
148 | actions = [datapath.ofproto_parser.OFPActionOutput(outPort, 0)]
149 | out = datapath.ofproto_parser.OFPPacketOut(
150 | datapath=datapath,
151 | buffer_id=0xffffffff,
152 | in_port=datapath.ofproto.OFPP_CONTROLLER,
153 | actions=actions,
154 | data=p.data)
155 | datapath.send_msg(out)
156 |
157 | # answring arp for s1 gateway 192.168.1.1
158 | elif arp_packet.dst_ip == '192.168.1.1' and datapath.id == 1:
159 | print('Received ARP for 192.168.1.1')
160 |
161 | # building packet
162 | e = ethernet.ethernet(dst=eth.src, src=self.s1_gateway_mac, ethertype=ether.ETH_TYPE_ARP)
163 | a = arp.arp(hwtype=1, proto=0x0800, hlen=6, plen=4, opcode=2,
164 | src_mac=self.s1_gateway_mac, src_ip='192.168.1.1',
165 | dst_mac=ethernet_src, dst_ip=arp_packet.src_ip)
166 |
167 | p = packet.Packet()
168 | p.add_protocol(e)
169 | p.add_protocol(a)
170 | p.serialize()
171 |
172 | # sending arp response for s1 gateway
173 | outPort = in_port
174 | actions = [datapath.ofproto_parser.OFPActionOutput(outPort, 0)]
175 | out = datapath.ofproto_parser.OFPPacketOut(
176 | datapath=datapath,
177 | buffer_id=0xffffffff,
178 | in_port=datapath.ofproto.OFPP_CONTROLLER,
179 | actions=actions,
180 | data=p.data)
181 | datapath.send_msg(out)
182 |
183 | # verbose iteration of packets
184 | try:
185 | for p in pkt.protocols:
186 | print(p.protocol_name, p)
187 | print("datapath: {} in_port: {}".format(datapath.id, in_port))
188 | except:
189 | pass
190 |
191 | # new
192 | @set_ev_cls([ofp_event.EventOFPStatsReply,
193 | ofp_event.EventOFPDescStatsReply,
194 | ofp_event.EventOFPFlowStatsReply,
195 | ofp_event.EventOFPAggregateStatsReply,
196 | ofp_event.EventOFPTableStatsReply,
197 | ofp_event.EventOFPTableFeaturesStatsReply,
198 | ofp_event.EventOFPPortStatsReply,
199 | ofp_event.EventOFPQueueStatsReply,
200 | ofp_event.EventOFPQueueDescStatsReply,
201 | ofp_event.EventOFPMeterStatsReply,
202 | ofp_event.EventOFPMeterFeaturesStatsReply,
203 | ofp_event.EventOFPMeterConfigStatsReply,
204 | ofp_event.EventOFPGroupStatsReply,
205 | ofp_event.EventOFPGroupFeaturesStatsReply,
206 | ofp_event.EventOFPGroupDescStatsReply,
207 | ofp_event.EventOFPPortDescStatsReply
208 | ], MAIN_DISPATCHER)
209 | def stats_reply_handler(self, ev):
210 | msg = ev.msg
211 | dp = msg.datapath
212 |
213 | if dp.id not in self.waiters:
214 | return
215 | if msg.xid not in self.waiters[dp.id]:
216 | return
217 | lock, msgs = self.waiters[dp.id][msg.xid]
218 | msgs.append(msg)
219 |
220 | flags = 0
221 | if dp.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
222 | flags = dp.ofproto.OFPSF_REPLY_MORE
223 | elif dp.ofproto.OFP_VERSION == ofproto_v1_2.OFP_VERSION:
224 | flags = dp.ofproto.OFPSF_REPLY_MORE
225 | elif dp.ofproto.OFP_VERSION >= ofproto_v1_3.OFP_VERSION:
226 | flags = dp.ofproto.OFPMPF_REPLY_MORE
227 |
228 | if msg.flags & flags:
229 | return
230 | del self.waiters[dp.id][msg.xid]
231 | lock.set()
232 |
233 | @set_ev_cls([ofp_event.EventOFPQueueGetConfigReply,
234 | ofp_event.EventOFPRoleReply,
235 | ], MAIN_DISPATCHER)
236 | def features_reply_handler(self, ev):
237 | msg = ev.msg
238 | dp = msg.datapath
239 |
240 | if dp.id not in self.waiters:
241 | return
242 | if msg.xid not in self.waiters[dp.id]:
243 | return
244 | lock, msgs = self.waiters[dp.id][msg.xid]
245 | msgs.append(msg)
246 |
247 | del self.waiters[dp.id][msg.xid]
248 | lock.set()
249 |
250 |
251 |
252 |
--------------------------------------------------------------------------------
/Chapter07/pygal_example_3.svg:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------