├── README.md
├── b-test
├── .DS_Store
├── README.md
├── __pycache__
│ └── tcp_btest.cpython-37.pyc
├── tcp_btest.py
└── tcpdump_btest.py
├── geo-ip-check
├── README.md
├── __pycache__
│ └── geo_class.cpython-37.pyc
├── db_used.txt
├── geo_class.py
├── geo_ip.py
└── requirements.txt
├── isp-servis-geo.csv
├── marykirk.csv
├── mk-configs
├── mum_mangle.rsc
├── mum_queue.rsc
├── pcq_mangle.rsc
├── pcq_tree.rsc
├── pcq_types.rsc
├── simple.rsc
└── simple_parent.rsc
└── spl-scripts
├── net_stats.py
├── ppp-mikrotik.py
├── splynx_api.py
└── splynx_stats.py
/README.md:
--------------------------------------------------------------------------------
1 | # net-useful
2 | Useful Splynx, Mikrotik router OS and Linux scripts
3 |
4 | BTEST is a tcp bandwidth test for unix platforms. It can analyze TCP dump file that grabbed statistics from interface during the data transfer.
5 | The result shows the bandwidth usage of each single TCP session, it's duration and data transferred as well as the overall statistics
6 |
7 | GEO-IP is a tool to check how IP address (the whole /24 network) is displayed in top geo ip databases. Also it displays the data from whois database, that is based on RIR database.
8 |
9 | MK CONFIGS folder has different Mikrotik configurations for queues
10 |
11 | SPL SCRIPTS has few useful scripts that work with SPLYNX API:
12 | - ppp-mikrotik takes all Internet services from splynx and exports usernames and passwords to mikrotik PPP secrets configuration.
13 | - netstats.py is a script that is used to get the usage for selected period per /24 network. It takes all statistics of customers and aggregates them in the /24 networks. Useful for designing and playing with BGP load balancing.
14 |
15 | There are also a few files with geolocation data for our IP ranges
16 |
--------------------------------------------------------------------------------
/b-test/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/alexcherry83/net-useful/3f3c414c39b631ff26b0894d695a63cb1b026586/b-test/.DS_Store
--------------------------------------------------------------------------------
/b-test/README.md:
--------------------------------------------------------------------------------
1 | To start a p rogram you need to get a TCP file from certain interface first, please use command
2 | tcpdump -i en0 -e > test5.dump, where en0 is the interface to track
3 |
4 | then start a script tcpdump_btest.tcp test5.dump,
5 | it will show the bandwidth speed and also the bandwidth/speed per each session
6 |
--------------------------------------------------------------------------------
/b-test/__pycache__/tcp_btest.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/alexcherry83/net-useful/3f3c414c39b631ff26b0894d695a63cb1b026586/b-test/__pycache__/tcp_btest.cpython-37.pyc
--------------------------------------------------------------------------------
/b-test/tcp_btest.py:
--------------------------------------------------------------------------------
1 | import datetime
2 | import json
3 | from tabulate import tabulate
4 | from operator import itemgetter
5 |
6 | #Class for getting and sorting all TCP sessions
7 |
8 | class tcp_bandwidth_test:
9 | def __init__(self, data_list):
10 | self.data_list = data_list
11 |
12 | def get_tcp_stats(self):
13 | # Creating a list of unique SRC-DST combinations
14 | unique_combinations = []
15 | for i in self.data_list :
16 | unique_combinations.append(i['session'])
17 | u_combinations = set(unique_combinations)
18 |
19 | #Creating unique combination list where SRC is in first element and DST in the second
20 | session_list = []
21 | for line in u_combinations:
22 | data = line.split('<->')
23 | data_dict = {data[0]:data[1]}
24 | session_list.append(data_dict)
25 |
26 | #Select only unique sessions, select only one SRC-DST and DST-SRC combination
27 | final_sessions = []
28 |
29 | id=0
30 | for line in session_list:
31 | for key in line:
32 | SRC = key
33 | DST = line[key]
34 | for line1 in session_list:
35 | for key1 in line1:
36 | SRC1 = key1
37 | DST1 = line1[key1]
38 | if DST1 == SRC and SRC1==DST:
39 | final_sessions.append(line1)
40 | session_list[id][key]="USED_IPS"
41 | id = id + 1
42 |
43 | #Sort all stats according to sessions
44 | sorted_stats=[]
45 | for session in final_sessions:
46 | for key in session:
47 | SRC = key
48 | DST = session[key]
49 | for packet in self.data_list:
50 | if packet['src'] == SRC and packet['dst'] == DST:
51 | data = {SRC+" <-> "+DST : packet}
52 | sorted_stats.append(data)
53 | if packet['dst'] == SRC and packet['src'] == DST:
54 | data = {SRC+" <-> "+DST : packet}
55 | sorted_stats.append(data)
56 |
57 | #print(json.dumps(sorted_stats, indent=4))
58 | #print(len(sorted_stats))
59 |
60 | #Next step is to show each sessions:
61 | #0. Session name
62 | #1. Start time
63 | #2. End time
64 | #3. Amount of packets
65 | #4. Data transferred UP + Down
66 | #5. Speed UP+DOWN of each session
67 |
68 | final_statistics=[]
69 | for session in final_sessions:
70 | for key in session:
71 | name = key+" <-> "+session[key]
72 | time_counter = 0
73 | packet_counter = 0
74 | data_counter = 0
75 |
76 | for stat_line in sorted_stats:
77 | for stat_key in stat_line:
78 | if name==stat_key:
79 | packet_counter=packet_counter+1
80 | data_counter=data_counter+stat_line[stat_key]["frame_size"]
81 | if time_counter==0:
82 | start_time = stat_line[stat_key]["timestamp"]
83 | time_counter=time_counter+1
84 | end_time = stat_line[stat_key]["timestamp"]
85 | src_ip = stat_line[stat_key]["src_ip"]
86 | dst_ip = stat_line[stat_key]["dst_ip"]
87 |
88 | time_delta = datetime.datetime.strptime(end_time, "%H:%M:%S.%f") - datetime.datetime.strptime(start_time,"%H:%M:%S.%f")
89 | speed_mbps = round(data_counter*8/1024/1024/float(time_delta.total_seconds()),3)
90 | data = {'name': name, 'start_time': start_time, 'session_duration' : float(time_delta.total_seconds()), 'amount_packets' : packet_counter, 'data_transferred' : round(data_counter/1024/1024,6), 'speed' : str(speed_mbps)+" Mbps"}
91 | final_statistics.append(data)
92 |
93 |
94 | final_statistics.sort(key=itemgetter('amount_packets'),reverse=True)
95 | print(tabulate(final_statistics, headers='keys', tablefmt="grid"))
96 |
97 | dump_start_time = self.data_list[0]["timestamp"]
98 | dump_end_time = self.data_list[-1]["timestamp"]
99 | dump_time_delta = datetime.datetime.strptime(dump_end_time,"%H:%M:%S.%f") - datetime.datetime.strptime(dump_start_time,"%H:%M:%S.%f")
100 |
101 | print("Total duration of dump : " + str(float(dump_time_delta.total_seconds())) + " seconds")
102 |
103 | print("Amount of TCP sessions " + str(len(final_statistics)))
104 |
105 | packets=0
106 | for paq in final_statistics:
107 | packets = packets + paq['amount_packets']
108 |
109 | print("Amount of packets " + str(packets))
110 |
111 | all_data=0
112 | for line in final_statistics:
113 | all_data = all_data + line['data_transferred']
114 |
115 | print("Data transferred : " + str(all_data) + " MB")
116 | av_speed = round(all_data*8/float(dump_time_delta.total_seconds()),3)
117 |
118 | print("Average speed is " + str(av_speed) + " Mbps")
119 |
--------------------------------------------------------------------------------
/b-test/tcpdump_btest.py:
--------------------------------------------------------------------------------
1 | import datetime
2 | import json
3 | from tabulate import tabulate
4 | from operator import itemgetter
5 | import tcp_btest as b_test
6 | from sys import argv
7 | import time
8 |
9 | start_time = time.time()
10 |
11 | file_name = argv[1]
12 | with open(file_name, 'r') as f:
13 | tcp_dump = f.read().rstrip().split('\n')
14 |
15 | t_rows=[]
16 | for i in tcp_dump:
17 | data = i.split(",")
18 | t_rows.append(data)
19 | #print(json.dumps(t_rows,indent=4))
20 |
21 | #data = {'timestamp': str(datetime.datetime.utcfromtimestamp(ts)), 'src_ip': inet_to_str(ip.src),
22 | # 'dst_ip': inet_to_str(ip.dst), 'src': inet_to_str(ip.src) + ":" + str(src_port),
23 | # 'dst': inet_to_str(ip.dst) + ":" + str(dst_port), 'frame_size': len(eth),
24 | # 'session': inet_to_str(ip.src) + ":" + str(src_port) + '-' + inet_to_str(ip.dst) + ":" + str(dst_port)}
25 |
26 | data_list=[]
27 | tcp_rows=[]
28 | for a in t_rows:
29 | for item in a:
30 | if a.index(item) == 0:
31 | first_element = item.strip().split(' ')
32 | data = {'timestamp': (first_element[0])}
33 |
34 | if a.index(item) == 2:
35 | element = item.strip().split(':')
36 | length = element[0].split(' ')
37 | ip_addresses = element[1].strip().split('>')
38 | if len(ip_addresses)>1:
39 | src = ip_addresses[0].strip()
40 | dst = ip_addresses[1].strip()
41 | ip_no_port1 = src.split('.')
42 | ip_no_port2 = dst.split('.')
43 | src_ip = '.'.join(ip_no_port1[:-1])
44 | dst_ip = '.'.join(ip_no_port2[:-1])
45 |
46 | data1 = {'src_ip': src_ip, 'dst_ip': dst_ip, 'src' : src, 'dst' : dst, 'frame_size': (int(length[1])), 'session': src + '<->' + dst}
47 | data.update(data1)
48 | data_list.append(data)
49 |
50 | else :
51 | data.clear()
52 |
53 |
54 |
55 | #print(json.dumps(data_list, indent=2))
56 |
57 | my_test = b_test.tcp_bandwidth_test(data_list)
58 | my_test.get_tcp_stats()
59 |
60 | print("--- %s seconds ---" % (time.time() - start_time))
61 |
--------------------------------------------------------------------------------
/geo-ip-check/README.md:
--------------------------------------------------------------------------------
1 | Script that allows to check IP address information in most used GEO IP databases
2 |
3 | geo_ip class : connects to the RIR database to check what is the latest info of the IP address and the network.
4 | Separate class for maxmind connection and class for using REST API of 6 other most used geo ip databases
5 |
6 | please use pip install geo2ip to get maxmind module that is used to connect to it's webservice
7 | also IPWhois is used to get data from RIPE or other RIR databases (pip install ipwhois)
8 |
9 | Script geo_ip.py should be run with 2 or 1 paramenter :
10 | ./geo_ip.py 10.0.0.1 nocache
11 |
12 | or just
13 | ./geo_ip.py 10.0.0.1
14 |
--------------------------------------------------------------------------------
/geo-ip-check/__pycache__/geo_class.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/alexcherry83/net-useful/3f3c414c39b631ff26b0894d695a63cb1b026586/geo-ip-check/__pycache__/geo_class.cpython-37.pyc
--------------------------------------------------------------------------------
/geo-ip-check/db_used.txt:
--------------------------------------------------------------------------------
1 | IP2Location : https://www.ip2location.com/dashboard
2 | IPgeolocation.io : https://app.ipgeolocation.io
3 | AbstractAPI : https://app.abstractapi.com/#/
4 | IPregistry.com : https://dashboard.ipregistry.co/overview
5 | IPdata.co : https://dashboard.ipdata.co
6 | AstroIP: https://astroip.co/dashboard
7 | Maxmind : https://maxmind.com/ - geoip2.webservice python module used
8 | RIPE, APNIC, ARIN, AFRINIC, LACNIB: IPwhois python module
9 |
10 |
11 | Logic description
12 | Script geo_ip.py should be run with 2 paramenters :
13 | ./geo_ip.py 10.0.0.1 nocache
14 |
15 | or just
16 | ./geo_ip.py 10.0.0.1
17 |
18 | 1. Request is sent to RIR DB(for example RIPE) to get the detailed and latest assignment information.
19 | Output shows combine information from the Inetnum and Org objects.
20 | 2. Second request checks the maxmind database
21 | 3. The range is split into /24 prefixes and all other DBs are also storing the cache files, not not call always REST API (maxmind and RIR is always checked)
22 | 4. The API queries are sent to all databases and show the results
23 |
--------------------------------------------------------------------------------
/geo-ip-check/geo_class.py:
--------------------------------------------------------------------------------
1 | import requests
2 | import os
3 | import json
4 | from ipwhois import IPWhois
5 | import geoip2.webservice
6 |
7 | class parent_data:
8 | def __init__(self, ip_address):
9 | self.ip_address = ip_address
10 |
11 | '''
12 | Method is ip_check() is used to validate the IP address
13 | '''
14 | def ip_check(self):
15 | ip_address = self.ip_address
16 | ip = ip_address.split(".")
17 |
18 | if (ip.__len__()) != 4:
19 | print("You haven't entered the valid IP address with a '.' notation")
20 | self.ip_address = input("Please enter a valid IP address : ")
21 | ip_address = self.ip_check()
22 | else:
23 | try:
24 | for i in ip:
25 | if (int(i) < 0) or (int(i) > 255):
26 | self.ip_address = input("Please enter valid IP address : ")
27 | ip_address = self.ip_check()
28 | break
29 | except:
30 | print("Sorry, IP address has 4 fields with values 0-255 separated by comma, ")
31 | self.ip_address = input("please enter a valid IP address : ")
32 | ip_address = self.ip_check()
33 | return (ip_address)
34 |
35 | '''
36 | Method is find_net() is used to get the IP address of the /24 network, to use it then for caching purposes
37 | '''
38 |
39 | def find_net(self):
40 | ip = self.ip_address.split(".")
41 | net = [ip[0], ip[1], ip[2], "0"]
42 | net_address = ".".join(net)
43 | return (net_address)
44 |
45 | class geo_data(parent_data):
46 | def __init__(self, ip_address, service, apiKey, cache):
47 | super().__init__(ip_address)
48 | self.apiKey = apiKey
49 | self.service = service
50 | self.cache=cache
51 |
52 |
53 | '''
54 | Method connect_api() connects to the REST API of the GEO IP database and also saves a json file to cache folder to allow access it later.
55 | The file is created for the whole /24 network
56 | '''
57 |
58 | def connect_api(self):
59 | net_ip_address = self.find_net()
60 |
61 | if self.service == 'ip2location':
62 | self.url='https://api.ip2location.com/v2/'
63 |
64 | elif self.service == 'ipregistry':
65 | self.url = 'https://api.ipregistry.co/'+self.ip_address
66 |
67 | elif self.service == 'ipgeolocation':
68 | self.url = 'https://api.ipgeolocation.io/ipgeo/'
69 |
70 | elif self.service == 'abstractapi':
71 | self.url = 'https://ipgeolocation.abstractapi.com/v1/'
72 |
73 | elif self.service == 'ipdata':
74 | self.url = 'https://api.ipdata.co/'+self.ip_address
75 |
76 | elif self.service == 'astroip':
77 | self.url = 'https://api.astroip.co/'+self.ip_address
78 |
79 | try:
80 | url = self.url
81 |
82 | if self.service == 'ip2location':
83 | params = dict(key=self.apiKey, ip=self.ip_address, package='WS6', lang='en')
84 |
85 | elif self.service == 'ipgeolocation':
86 | params = dict(apiKey=self.apiKey, ip=self.ip_address, excludes='time_zone,currency')
87 |
88 | elif self.service == 'ipregistry':
89 | params = dict(key=self.apiKey)
90 |
91 | elif self.service == 'abstractapi':
92 | params = dict(api_key=self.apiKey, ip_address=self.ip_address)
93 |
94 | elif self.service == 'ipdata':
95 | params = {'api-key' : self.apiKey}
96 |
97 | elif self.service == 'astroip':
98 | params = dict(api_key=self.apiKey)
99 |
100 | getInfo = requests.get(url=url, params=params)
101 | ipInfo = getInfo.json()
102 | # This is the json received as an answer from selected GEO IP database REST API
103 | # print(json.dumps(ipInfo, indent=2))
104 |
105 | # Following is a creation of file inside the /cache/ folder to store the results of REST API answer, there is stored whole json content
106 | directory = './cache/'
107 | filename = str(self.service + '_' + net_ip_address + '.json')
108 | file_path = os.path.join(directory, filename)
109 | if not os.path.isdir(directory):
110 | os.mkdir(directory)
111 | with open(file_path, 'w') as f:
112 | f.write(json.dumps(ipInfo, indent=2))
113 | return (ipInfo)
114 | #
115 |
116 |
117 | except:
118 | print("Cannot connect to API")
119 |
120 |
121 | '''
122 | Method get_info() attempts to read the cache file first, if it doesn't exist, it calls connect_api() method and connects to REST API + saves the local cache file
123 | and returns the dictionary with content of the GEO IP data
124 | '''
125 |
126 | def get_info(self) :
127 | net_ip_address = self.find_net()
128 | ip_addr = dict
129 |
130 | # If the nocache attribute is not set, then always connect to the /cache/ folder and try to get data from cache file
131 | if self.cache != 'nocache':
132 | try:
133 | directory = './cache/'
134 | filename = self.service + '_' + net_ip_address + '.json'
135 | file_path = os.path.join(directory, filename)
136 |
137 | with open(file_path, 'r') as f:
138 | ip_json=f.read()
139 |
140 | ip_addr = json.loads(ip_json)
141 |
142 | # if the file doesn't exist, run the connect_api method
143 | except IOError:
144 | ip_addr = self.connect_api()
145 | # if cache == nocache then also connect to the api without using a cache
146 | else :
147 | ip_addr = self.connect_api()
148 |
149 | if self.service == 'ip2location':
150 | geo_info = {'database': 'ip2location.com',
151 | 'code': ip_addr["country_code"],
152 | 'country': ip_addr["country_name"],
153 | 'region': ip_addr["region_name"],
154 | 'city': ip_addr["city_name"],
155 | 'company': '',
156 | 'isp': ip_addr["isp"],
157 | 'latitude': ip_addr["latitude"],
158 | 'longitude': ip_addr["longitude"]}
159 |
160 | elif self.service == 'ipgeolocation':
161 | geo_info = {'database': 'ipgeolocation.io',
162 | 'code': ip_addr["country_code2"],
163 | 'country': ip_addr["country_name"],
164 | 'region': ip_addr["state_prov"],
165 | 'city': ip_addr["city"],
166 | 'company': ip_addr["organization"],
167 | 'isp': ip_addr["isp"],
168 | 'latitude': ip_addr["latitude"],
169 | 'longitude': ip_addr["longitude"]}
170 |
171 | elif self.service == 'ipregistry':
172 | geo_info = {'database': 'ipregistry.co',
173 | 'code': ip_addr["location"]["country"]["code"],
174 | 'country': ip_addr["location"]["country"]["name"],
175 | 'region': ip_addr["location"]["region"]["name"],
176 | 'city': ip_addr["location"]["city"],
177 | 'company': '',
178 | 'isp': ip_addr["connection"]["organization"],
179 | 'latitude': ip_addr["location"]["latitude"],
180 | 'longitude': ip_addr["location"]["longitude"]}
181 |
182 | elif self.service == 'abstractapi':
183 | geo_info = {'database': 'abstractapi.com',
184 | 'code': ip_addr["country_code"],
185 | 'country': ip_addr["country"],
186 | 'region': ip_addr["region"],
187 | 'city': ip_addr["city"],
188 | 'company': ip_addr["connection"]["organization_name"],
189 | 'isp': ip_addr["connection"]["isp_name"],
190 | 'latitude': ip_addr["latitude"],
191 | 'longitude': ip_addr["longitude"]}
192 |
193 | elif self.service == 'ipdata':
194 | geo_info = {'database': 'ipdata.co',
195 | 'code': ip_addr["country_code"],
196 | 'country': ip_addr["country_name"],
197 | 'region': ip_addr["region"],
198 | 'city': ip_addr["city"],
199 | 'company': '',
200 | 'isp': ip_addr["asn"]["name"] if ("asn" in ip_addr) else '',
201 | 'latitude': ip_addr["latitude"] if ("latitude" in ip_addr) else '',
202 | 'longitude': ip_addr["longitude"] if ("longitude" in ip_addr) else ''
203 | }
204 |
205 | elif self.service == 'astroip':
206 | geo_info = {'database': 'astroip.co',
207 | 'code': ip_addr["geo"]["country_code"],
208 | 'country': ip_addr["geo"]["country_name"],
209 | 'region': ip_addr["geo"]["region_name"],
210 | 'city': ip_addr["geo"]["city"],
211 | 'company': ip_addr["asn"]["organization"] if (ip_addr["asn"] is not None) else '',
212 | 'isp': ip_addr["asn"]["asn"] if (ip_addr["asn"] is not None) else '',
213 | 'latitude': ip_addr["geo"]["latitude"],
214 | 'longitude': ip_addr["geo"]["longitude"]}
215 |
216 |
217 |
218 | return (geo_info)
219 |
220 |
221 | class maxmind_data(parent_data):
222 | def __init__(self, ip_address, user, code):
223 | super().__init__(ip_address)
224 | self.user = user
225 | self.code = code
226 |
227 | def get_info(self) :
228 | self.ip_address = self.ip_check()
229 | maxmind_webservice = geoip2.webservice.Client(self.user, self.code)
230 | maxmind_json = maxmind_webservice.insights(self.ip_address)
231 |
232 | try :
233 | m_region = maxmind_json.subdivisions[0].names["en"]
234 | except :
235 | m_region = ''
236 |
237 | try :
238 | m_city = maxmind_json.subdivisions[1].names["en"]
239 | except :
240 | m_city = ''
241 |
242 | geo_info = {'network': maxmind_json.traits._network,
243 | 'database' : "Maxmind",
244 | 'code': maxmind_json.country.iso_code,
245 | 'country': maxmind_json.country.name,
246 | 'region': m_region,
247 | 'city': m_city,
248 | 'company': maxmind_json.traits.organization,
249 | 'isp': maxmind_json.traits.isp,
250 | 'latitude': maxmind_json.location.latitude,
251 | 'longitude': maxmind_json.location.longitude
252 | }
253 |
254 | return geo_info
255 |
256 | class rir_data(parent_data):
257 | def __init__(self, ip_address):
258 | super().__init__(ip_address)
259 |
260 | def get_info(self):
261 | obj = IPWhois(self.ip_address)
262 | ip_rir = obj.lookup_rdap(depth=2)
263 |
264 | # the lookup returns back json object, but the ORG is quite deep inside, so need to get it from the entities dictionary and then use for parsing
265 | entities = ip_rir["entities"]
266 | #print(entities)
267 | org_name = ''
268 | for i in entities :
269 | if i[0:3] == "ORG":
270 | org_name = i
271 |
272 | rir_info = { 'network' : ip_rir["network"]["cidr"],
273 | 'parent_network' : ip_rir["network"]["parent_handle"],
274 | 'database': ip_rir["asn_registry"],
275 | 'network_name': ip_rir["network"]["name"],
276 | 'code': ip_rir["network"]["country"],
277 | 'type': ip_rir["network"]["type"],
278 | 'org': org_name if (org_name != '') else '',
279 | 'company': ip_rir["objects"][org_name]["contact"]["name"] if (org_name != '') else '',
280 | 'address': ip_rir["objects"][org_name]["contact"]["address"][0]["value"] if (org_name!='') else '',
281 | 'asn' : ip_rir["asn"],
282 | 'asn_country': ip_rir["asn_country_code"],
283 | 'updated' : ip_rir["network"]["events"][0]["timestamp"]
284 | }
285 |
286 | return rir_info
287 |
288 |
--------------------------------------------------------------------------------
/geo-ip-check/geo_ip.py:
--------------------------------------------------------------------------------
1 | from sys import argv
2 | import time
3 | import geo_class as ip
4 | import geoip2.webservice
5 |
6 | start_time = time.time()
7 | data = []
8 | ip2location_apiKey = ""
9 | ipregistry_apiKey = ""
10 | ipgeolocation_apiKey = ""
11 | abstractapi_apiKey = ""
12 | ipdata_apiKey = ""
13 | astroip_apiKey = ""
14 | #XXXX below is the user account on maxmind webservice
15 | maxmind_user = XXXXX
16 | maxmind_code = ""
17 |
18 | ip_argv = argv[1]
19 | cache = 'yes'
20 | try :
21 | cache = argv[2]
22 | except:
23 | pass
24 |
25 | ip_object = ip.parent_data(ip_argv)
26 | ip_address = ip_object.ip_check()
27 |
28 | rir_location = ip.rir_data(ip_address)
29 | rir_data = rir_location.get_info()
30 |
31 | maxmind = ip.maxmind_data(ip_address, maxmind_user, maxmind_code)
32 | ip2location = ip.geo_data(ip_address, 'ip2location', ip2location_apiKey, cache)
33 | ipgeolocation = ip.geo_data(ip_address, 'ipgeolocation', ipgeolocation_apiKey, cache)
34 | ipregistry = ip.geo_data(ip_address, 'ipregistry', ipregistry_apiKey, cache)
35 | abstractapi = ip.geo_data(ip_address, 'abstractapi', abstractapi_apiKey, cache)
36 | ipdata = ip.geo_data(ip_address, 'ipdata', ipdata_apiKey, cache)
37 | astroip = ip.geo_data(ip_address, 'astroip', astroip_apiKey, cache)
38 |
39 | data.append(maxmind.get_info())
40 | data.append(ip2location.get_info())
41 | data.append(ipgeolocation.get_info())
42 | data.append(ipregistry.get_info())
43 | data.append(abstractapi.get_info())
44 | data.append(ipdata.get_info())
45 | data.append(astroip.get_info())
46 |
47 |
48 | print(f'''
49 | ------ {rir_data["database"].upper()} ------
50 | IP address : {rir_location.ip_address}
51 | Network : {rir_data["network"]}
52 | Type: {rir_data["type"]}
53 | Parent network : {rir_data["parent_network"]}
54 | Country code : {rir_data["code"]}
55 | Org object : {rir_data["org"]}
56 | Company : {rir_data["company"]}
57 | Address : {rir_data["address"]}
58 | ASN : AS{rir_data["asn"]}
59 | ASN Country : {rir_data["asn_country"]}
60 | Last updated : {rir_data["updated"]}
61 | '''
62 | )
63 |
64 |
65 | for i in range(7):
66 | print(f'''
67 | ------ {data[i]["database"].capitalize()} ------
68 | IP address : {ip2location.ip_address}
69 | Database : {data[i]["database"]}
70 | Code : {data[i]["code"]}
71 | Country : {data[i]["country"]}
72 | Region : {data[i]["region"]}
73 | City : {data[i]["city"]}
74 | ISP: {data[i]["isp"]}
75 | Company: {data[i]["company"]}
76 | Geolocation: {data[i]["latitude"]}, {data[i]["longitude"]}
77 | '''
78 |
79 | )
80 |
81 | print("--- %s seconds ---" % (time.time() - start_time))
82 |
--------------------------------------------------------------------------------
/geo-ip-check/requirements.txt:
--------------------------------------------------------------------------------
1 | geoip2
2 | tabulate
3 | ipwhois
4 | IP2Location
5 | requests
6 |
--------------------------------------------------------------------------------
/isp-servis-geo.csv:
--------------------------------------------------------------------------------
1 | # prefix,country_code,region_code,city,postal
2 | 194.26.19.0/24,GB,GB-ESS,Marykirk,AB301UT
3 | 45.140.40.0/24,GR,,Magnesia,45221
4 | 91.230.247.0/24,GR,,Lykourgou Chalandri,15231
5 | 176.97.196.0/24,GR,,Lykourgou Chalandri,15231
6 | 5.59.234.0/23,ES,,Barcelona,08530
7 | 185.30.162.0/23,ES,,Lloret de Mar, 17310
8 | 45.88.156.0/23,ES,,Lloret de Mar, 17310
9 | 109.205.241.0/24,CZ,,Revnice,25230
10 | 188.95.93.0/24,UA,,Dnipro,52071
11 | 91.223.194.0/24,UA,,Lviv,79003
12 | 194.26.226.0/24,RU,,Rostov,344048
13 |
--------------------------------------------------------------------------------
/marykirk.csv:
--------------------------------------------------------------------------------
1 | # prefix,country_code,region_code,city,postal
2 | 194.26.19.0/24,GB,GB-ESS,Marykirk,AB301UT
3 |
--------------------------------------------------------------------------------
/mk-configs/mum_mangle.rsc:
--------------------------------------------------------------------------------
1 | /ip firewall mangle
2 | add action=mark-connection chain=prerouting comment=ICMP connection-mark=no-mark new-connection-mark=icmp \
3 | passthrough=yes protocol=icmp
4 | add action=mark-packet chain=prerouting connection-mark=icmp new-packet-mark=icmp passthrough=no
5 | add action=mark-connection chain=prerouting comment=DNS connection-mark=!dns dst-port=53 log-prefix=DNS \
6 | new-connection-mark=dns passthrough=yes protocol=udp
7 | add action=mark-connection chain=output connection-mark=!dns dst-port=53 log-prefix=DNS new-connection-mark=\
8 | dns passthrough=yes protocol=udp
9 | add action=mark-packet chain=prerouting connection-mark=dns new-packet-mark=dns passthrough=no
10 | add action=mark-packet chain=output connection-mark=dns new-packet-mark=dns passthrough=no
11 | add action=mark-connection chain=input comment=WINBOX dst-port=8291 new-connection-mark=winbox passthrough=\
12 | yes protocol=tcp
13 | add action=mark-connection chain=output new-connection-mark=winbox passthrough=yes protocol=tcp src-port=8291
14 | add action=mark-packet chain=input connection-mark=winbox new-packet-mark=winbox passthrough=no
15 | add action=mark-packet chain=output connection-mark=winbox new-packet-mark=winbox passthrough=no
16 | add action=mark-connection chain=prerouting comment=BWTEST dst-address=100.100.100.100 new-connection-mark=\
17 | bwtest passthrough=yes
18 | add action=mark-packet chain=prerouting connection-mark=bwtest new-packet-mark=bwtest passthrough=no
19 | add action=mark-connection chain=prerouting comment=SpeedTest content=speedtest.net new-connection-mark=\
20 | speedtest.net passthrough=yes protocol=tcp
21 | add action=add-dst-to-address-list address-list=speedtest.net address-list-timeout=1h chain=prerouting \
22 | comment=SpeedTest disabled=yes dst-address-list=!NET-Privadas dst-port=8081,8080 protocol=tcp
23 | add action=mark-packet chain=prerouting connection-mark=speedtest.net new-packet-mark=speedtest.net \
24 | passthrough=no
25 | add action=mark-connection chain=prerouting comment=NetFLIX dst-address-list=Netflix new-connection-mark=\
26 | Netflix passthrough=yes
27 | add action=mark-packet chain=prerouting connection-mark=Netflix new-packet-mark=Netflix passthrough=no
28 | add action=mark-connection chain=prerouting comment=VoIP dst-port=5060-5070 new-connection-mark=sip \
29 | passthrough=yes protocol=udp
30 | add action=mark-packet chain=prerouting connection-mark=sip new-packet-mark=sip passthrough=no
31 | add action=mark-connection chain=prerouting comment=SSL connection-mark=!ssl dst-port=443 log-prefix=SSL \
32 | new-connection-mark=ssl passthrough=yes protocol=tcp
33 | add action=mark-connection chain=prerouting comment=SSL connection-mark=!ssl dst-port=22 log-prefix=SSL \
34 | new-connection-mark=ssl passthrough=yes protocol=tcp
35 | add action=mark-connection chain=prerouting comment=SSL connection-mark=!ssl dst-port=443 log-prefix=SSL \
36 | new-connection-mark=ssl passthrough=yes protocol=udp
37 | add action=mark-packet chain=prerouting connection-mark=ssl new-packet-mark=ssl passthrough=no
38 | add action=mark-connection chain=prerouting comment=WWW connection-mark=!www dst-port=80,5222-5228 \
39 | new-connection-mark=www passthrough=yes protocol=tcp
40 | add action=mark-packet chain=prerouting connection-mark=www new-packet-mark=www passthrough=no
41 | add action=mark-connection chain=prerouting comment=MAIL connection-mark=!mail dst-port=\
42 | 25,110,993,143,465,587 log-prefix=Mail--- new-connection-mark=mail passthrough=yes protocol=tcp
43 | add action=mark-packet chain=prerouting connection-mark=mail new-packet-mark=mail passthrough=no
44 | add action=mark-connection chain=prerouting comment=OTHER connection-mark=no-mark new-connection-mark=\
45 | resto_trafico passthrough=yes
46 | add action=mark-packet chain=prerouting connection-mark=resto_trafico disabled=yes new-packet-mark=\
47 | resto_trafico passthrough=no
48 |
--------------------------------------------------------------------------------
/mk-configs/mum_queue.rsc:
--------------------------------------------------------------------------------
1 | /queue tree
2 | add max-limit=10M name=#ROOT parent=global queue=synchronous-default
3 | add limit-at=512k max-limit=1M name=icmp packet-mark=icmp parent=#ROOT priority=1
4 | add limit-at=256k max-limit=512k name=DNS packet-mark=dns parent=#ROOT priority=1
5 | add limit-at=512k max-limit=2M name=VoIP packet-mark=sip parent=#ROOT priority=3
6 | add limit-at=1M max-limit=10M name=SSL packet-mark=ssl parent=#ROOT priority=1 queue=synchronous-default
7 | add limit-at=1M max-limit=10M name=WWW packet-mark=www parent=#ROOT priority=6 queue=default
8 | add max-limit=5M name=MAIL packet-mark=mail parent=#ROOT
9 | add max-limit=10M name=OTHER packet-mark=no-mark parent=#ROOT
10 | add limit-at=256k max-limit=1M name=WINBOX packet-mark=winbox parent=#ROOT priority=2
11 | add limit-at=1M max-limit=10M name=SpeedTest packet-mark=speedtest.net parent=#ROOT priority=4
12 | add limit-at=2M max-limit=5M name=NetFLIX packet-mark=Netflix parent=#ROOT priority=4 queue=default
13 | add limit-at=700k max-limit=1M name=BWTEST packet-mark=bwtest parent=#ROOT priority=1 queue=default
--------------------------------------------------------------------------------
/mk-configs/pcq_mangle.rsc:
--------------------------------------------------------------------------------
1 | # apr/05/2018 14:20:02 by RouterOS 6.41.3
2 | # software id =
3 | #
4 | #
5 | #
6 | /ip firewall mangle
7 | add action=mark-connection chain=prerouting comment=TEST_CUSTOMER \
8 | new-connection-mark=TEST_CUSTOMER passthrough=yes src-address=10.0.0.25
9 | add action=mark-packet chain=prerouting connection-mark=TEST_CUSTOMER \
10 | new-packet-mark=TEST_CUSTOMER passthrough=no
11 | add action=mark-connection chain=prerouting comment=TEST_CUSTOMER \
12 | dst-address=10.0.0.25 new-connection-mark=TEST_CUSTOMER_IN passthrough=\
13 | yes
14 | add action=mark-packet chain=prerouting connection-mark=TEST_CUSTOMER_IN \
15 | new-packet-mark=TEST_CUSTOMER_IN passthrough=no
16 | add chain=input comment="Splynx start mangle" disabled=yes src-address=\
17 | 0.0.0.0
18 | add action=mark-packet chain=postrouting comment=SpLPGMark_1-in \
19 | dst-address-list=SpLAL_1 new-packet-mark=SpLPGMark_1-in passthrough=no
20 | add action=mark-packet chain=forward comment=SpLPGMark_1-out new-packet-mark=\
21 | SpLPGMark_1-out passthrough=no src-address-list=SpLAL_1
22 | add action=mark-packet chain=postrouting comment=SpLPGMark_2-in \
23 | dst-address-list=SpLAL_2 new-packet-mark=SpLPGMark_2-in passthrough=no
24 | add action=mark-packet chain=forward comment=SpLPGMark_2-out new-packet-mark=\
25 | SpLPGMark_2-out passthrough=no src-address-list=SpLAL_2
26 | add action=mark-packet chain=postrouting comment=SpLPGMark_3-in \
27 | dst-address-list=SpLAL_3 new-packet-mark=SpLPGMark_3-in passthrough=no
28 | add action=mark-packet chain=forward comment=SpLPGMark_3-out new-packet-mark=\
29 | SpLPGMark_3-out passthrough=no src-address-list=SpLAL_3
30 | add action=mark-packet chain=postrouting comment=SpLPGMark_4-in \
31 | dst-address-list=SpLAL_4 new-packet-mark=SpLPGMark_4-in passthrough=no
32 | add action=mark-packet chain=forward comment=SpLPGMark_4-out new-packet-mark=\
33 | SpLPGMark_4-out passthrough=no src-address-list=SpLAL_4
34 | add action=mark-packet chain=postrouting comment=SpLPGMark_5-in \
35 | dst-address-list=SpLAL_5 new-packet-mark=SpLPGMark_5-in passthrough=no
36 | add action=mark-packet chain=forward comment=SpLPGMark_5-out new-packet-mark=\
37 | SpLPGMark_5-out passthrough=no src-address-list=SpLAL_5
38 | add action=mark-packet chain=postrouting comment=SpLPGMark_6-in \
39 | dst-address-list=SpLAL_6 new-packet-mark=SpLPGMark_6-in passthrough=no
40 | add action=mark-packet chain=forward comment=SpLPGMark_6-out new-packet-mark=\
41 | SpLPGMark_6-out passthrough=no src-address-list=SpLAL_6
42 | add chain=input comment="Splynx end mangle" disabled=yes src-address=0.0.0.0
43 |
--------------------------------------------------------------------------------
/mk-configs/pcq_tree.rsc:
--------------------------------------------------------------------------------
1 | # apr/05/2018 14:20:17 by RouterOS 6.41.3
2 | # software id =
3 | #
4 | #
5 | #
6 | /queue tree
7 | add limit-at=512 max-limit=5120 name=TEST_CUSTOMER_IN parent=global queue=\
8 | default
9 | add limit-at=256 max-limit=1024 name=TEST_CUSTOMER_OUT parent=global queue=\
10 | default
11 | add name=SpLMain_UP parent=global priority=5 queue=default
12 | add name=SpLMain_DOWN parent=global priority=5 queue=default
13 | add burst-time=10s limit-at=205k max-limit=2048k name=SpLPGroup_1-DOWN \
14 | packet-mark=SpLPGMark_1-in parent=SpLMain_DOWN priority=5 queue=\
15 | SpLPCQGroup_1-DOWN
16 | add burst-time=10s limit-at=51k max-limit=512k name=SpLPGroup_1-UP \
17 | packet-mark=SpLPGMark_1-out parent=SpLMain_UP priority=5 queue=\
18 | SpLPCQGroup_1-UP
19 | add burst-time=10s limit-at=500k max-limit=5M name=SpLPGroup_2-DOWN \
20 | packet-mark=SpLPGMark_2-in parent=SpLMain_DOWN priority=5 queue=\
21 | SpLPCQGroup_2-DOWN
22 | add burst-time=10s limit-at=102k max-limit=1024k name=SpLPGroup_2-UP \
23 | packet-mark=SpLPGMark_2-out parent=SpLMain_UP priority=5 queue=\
24 | SpLPCQGroup_2-UP
25 | add burst-time=10s limit-at=1024k max-limit=10240k name=SpLPGroup_3-DOWN \
26 | packet-mark=SpLPGMark_3-in parent=SpLMain_DOWN priority=5 queue=\
27 | SpLPCQGroup_3-DOWN
28 | add burst-time=10s limit-at=205k max-limit=2048k name=SpLPGroup_3-UP \
29 | packet-mark=SpLPGMark_3-out parent=SpLMain_UP priority=5 queue=\
30 | SpLPCQGroup_3-UP
31 | add burst-time=10s limit-at=1557k max-limit=15576k name=SpLPGroup_4-DOWN \
32 | packet-mark=SpLPGMark_4-in parent=SpLMain_DOWN priority=1 queue=\
33 | SpLPCQGroup_4-DOWN
34 | add burst-time=10s limit-at=615k max-limit=6144k name=SpLPGroup_4-UP \
35 | packet-mark=SpLPGMark_4-out parent=SpLMain_UP priority=1 queue=\
36 | SpLPCQGroup_4-UP
37 | add burst-time=10s limit-at=4096k max-limit=40960k name=SpLPGroup_5-DOWN \
38 | packet-mark=SpLPGMark_5-in parent=SpLMain_DOWN priority=1 queue=\
39 | SpLPCQGroup_5-DOWN
40 | add burst-time=10s limit-at=820k max-limit=8192k name=SpLPGroup_5-UP \
41 | packet-mark=SpLPGMark_5-out parent=SpLMain_UP priority=1 queue=\
42 | SpLPCQGroup_5-UP
43 | add burst-time=10s limit-at=205k max-limit=2048k name=SpLPGroup_6-DOWN \
44 | packet-mark=SpLPGMark_6-in parent=SpLMain_DOWN priority=5 queue=\
45 | SpLPCQGroup_6-DOWN
46 | add burst-time=10s limit-at=205k max-limit=2048k name=SpLPGroup_6-UP \
47 | packet-mark=SpLPGMark_6-out parent=SpLMain_UP priority=5 queue=\
48 | SpLPCQGroup_6-UP
49 |
--------------------------------------------------------------------------------
/mk-configs/pcq_types.rsc:
--------------------------------------------------------------------------------
1 | # apr/05/2018 14:20:47 by RouterOS 6.41.3
2 | # software id =
3 | #
4 | #
5 | #
6 | /queue type
7 | add kind=pcq name=SpLPCQGroup_1-DOWN pcq-classifier=dst-address pcq-rate=\
8 | 2048k
9 | add kind=pcq name=SpLPCQGroup_1-UP pcq-classifier=src-address pcq-rate=512k
10 | add kind=pcq name=SpLPCQGroup_2-DOWN pcq-classifier=dst-address pcq-rate=5M
11 | add kind=pcq name=SpLPCQGroup_2-UP pcq-classifier=src-address pcq-rate=1024k
12 | add kind=pcq name=SpLPCQGroup_3-DOWN pcq-classifier=dst-address pcq-rate=\
13 | 10240k
14 | add kind=pcq name=SpLPCQGroup_3-UP pcq-classifier=src-address pcq-rate=2048k
15 | add kind=pcq name=SpLPCQGroup_4-DOWN pcq-classifier=dst-address pcq-rate=\
16 | 5192k
17 | add kind=pcq name=SpLPCQGroup_4-UP pcq-classifier=src-address pcq-rate=2048k
18 | add kind=pcq name=SpLPCQGroup_5-DOWN pcq-classifier=dst-address pcq-rate=\
19 | 10240k
20 | add kind=pcq name=SpLPCQGroup_5-UP pcq-classifier=src-address pcq-rate=2048k
21 | add kind=pcq name=SpLPCQGroup_6-DOWN pcq-classifier=dst-address pcq-rate=\
22 | 2048k
23 | add kind=pcq name=SpLPCQGroup_6-UP pcq-classifier=src-address pcq-rate=2048k
24 |
--------------------------------------------------------------------------------
/mk-configs/simple.rsc:
--------------------------------------------------------------------------------
1 | # apr/05/2018 14:16:57 by RouterOS 6.41.3
2 | # software id =
3 | #
4 | #
5 | #
6 | /queue simple
7 | add burst-time=10s/10s comment=10001 limit-at=51k/205k max-limit=512k/2048k \
8 | name=SpLSQ_1-1 priority=5/5 queue=pcq-download-default/pcq-upload-default \
9 | target=192.168.100.2/32
10 | add burst-time=10s/10s comment=20002 limit-at=51k/205k max-limit=512k/2048k \
11 | name=SpLSQ_2-2 priority=5/5 queue=pcq-download-default/pcq-upload-default \
12 | target=192.168.100.3/32
13 | add burst-time=10s/10s comment=30003 limit-at=51k/205k max-limit=512k/2048k \
14 | name=SpLSQ_3-3 priority=5/5 queue=pcq-download-default/pcq-upload-default \
15 | target=192.168.100.4/32
16 | add burst-time=10s/10s comment=10004 limit-at=51k/205k max-limit=512k/2048k \
17 | name=SpLSQ_4-4 priority=5/5 queue=pcq-download-default/pcq-upload-default \
18 | target=192.168.100.5/32
19 | add burst-time=10s/10s comment=20005 limit-at=51k/205k max-limit=512k/2048k \
20 | name=SpLSQ_5-5 priority=5/5 queue=pcq-download-default/pcq-upload-default \
21 | target=192.168.100.6/32
22 | add burst-time=10s/10s comment=30006 limit-at=102k/500k max-limit=1024k/5M \
23 | name=SpLSQ_6-6 priority=5/5 queue=pcq-download-default/pcq-upload-default \
24 | target=192.168.100.7/32
25 | add burst-time=10s/10s comment=10007 limit-at=102k/500k max-limit=1024k/5M \
26 | name=SpLSQ_7-7 priority=5/5 queue=pcq-download-default/pcq-upload-default \
27 | target=192.168.100.8/32
28 | add burst-time=10s/10s comment=20008 limit-at=102k/500k max-limit=1024k/5M \
29 | name=SpLSQ_8-8 priority=5/5 queue=pcq-download-default/pcq-upload-default \
30 | target=192.168.100.9/32
31 | add burst-time=10s/10s comment=30009 limit-at=102k/500k max-limit=1024k/5M \
32 | name=SpLSQ_9-9 priority=5/5 queue=pcq-download-default/pcq-upload-default \
33 | target=192.168.100.10/32
34 | add burst-time=10s/10s comment=40010 limit-at=102k/500k max-limit=1024k/5M \
35 | name=SpLSQ_10-10 priority=5/5 queue=\
36 | pcq-download-default/pcq-upload-default target=192.168.100.11/32
37 | add burst-time=10s/10s comment=40011 limit-at=205k/1024k max-limit=\
38 | 2048k/10240k name=SpLSQ_11-11 priority=5/5 queue=\
39 | pcq-download-default/pcq-upload-default target=192.168.100.12/32
40 | add burst-time=10s/10s comment=50012 limit-at=205k/519k max-limit=2048k/5192k \
41 | name=SpLSQ_12-12 priority=1/1 queue=\
42 | pcq-download-default/pcq-upload-default target=192.168.100.13/32
43 | add burst-time=10s/10s comment=50013 limit-at=205k/519k max-limit=2048k/5192k \
44 | name=SpLSQ_13-13 priority=1/1 queue=\
45 | pcq-download-default/pcq-upload-default target=192.168.100.14/32
46 | add burst-time=10s/10s comment=50014 limit-at=205k/1024k max-limit=\
47 | 2048k/10240k name=SpLSQ_14-14 priority=1/1 queue=\
48 | pcq-download-default/pcq-upload-default target=192.168.100.17/32
49 | add burst-time=10s/10s comment=10015 limit-at=205k/205k max-limit=2048k/2048k \
50 | name=SpLSQ_15-15 priority=5/5 queue=\
51 | pcq-download-default/pcq-upload-default target=192.168.100.15/32
52 | add burst-time=10s/10s comment=10016 limit-at=205k/1024k max-limit=\
53 | 2048k/10240k name=SpLSQ_16-16 priority=1/1 queue=\
54 | pcq-download-default/pcq-upload-default target=192.168.100.16/32
55 | add burst-time=10s/10s comment=40017 limit-at=205k/1024k max-limit=\
56 | 2048k/10240k name=SpLSQ_17-17 priority=1/1 queue=\
57 | pcq-download-default/pcq-upload-default target=192.168.100.18/32
58 | add burst-time=10s/10s comment=30018 limit-at=205k/1024k max-limit=\
59 | 2048k/10240k name=SpLSQ_18-18 priority=1/1 queue=\
60 | pcq-download-default/pcq-upload-default target=192.168.100.19/32
61 | add burst-time=10s/10s comment=20019 limit-at=205k/519k max-limit=2048k/5192k \
62 | name=SpLSQ_19-19 priority=1/1 queue=\
63 | pcq-download-default/pcq-upload-default target=192.168.100.24/32
64 |
--------------------------------------------------------------------------------
/mk-configs/simple_parent.rsc:
--------------------------------------------------------------------------------
1 | # apr/16/2018 10:20:45 by RouterOS 6.41.3
2 | # software id =
3 | #
4 | #
5 | #
6 | /queue simple
7 | add burst-time=10s/10s comment="Tariff Main-Home Internet 2 Mbps" limit-at=\
8 | 51k/205k max-limit=512k/2048k name=SpLSTG_0-1 priority=5/5 queue=\
9 | pcq-download-default/pcq-upload-default target="192.168.100.2/32,192.168.1\
10 | 00.3/32,192.168.100.4/32,192.168.100.5/32,192.168.100.6/32"
11 | add burst-time=10s/10s comment="Tariff Main-Home Internet 5 Mbps" limit-at=\
12 | 102k/500k max-limit=1024k/5M name=SpLSTG_0-2 priority=5/5 queue=\
13 | pcq-download-default/pcq-upload-default target="192.168.100.7/32,192.168.1\
14 | 00.8/32,192.168.100.9/32,192.168.100.10/32,192.168.100.11/32"
15 | add burst-time=10s/10s comment="Tariff Main-Home Internet 10 Mbps" limit-at=\
16 | 205k/1024k max-limit=2048k/10240k name=SpLSTG_0-3 priority=5/5 queue=\
17 | pcq-download-default/pcq-upload-default target=192.168.100.12/32
18 | add burst-time=10s/10s comment="Tariff Main-Business 5 Mbps" limit-at=\
19 | 615k/1557k max-limit=6144k/15576k name=SpLSTG_0-4 priority=1/1 queue=\
20 | pcq-download-default/pcq-upload-default target=\
21 | 192.168.100.13/32,192.168.100.14/32,192.168.100.24/32
22 | add burst-time=10s/10s comment="Tariff Main-Business 10 Mbps" limit-at=\
23 | 820k/4096k max-limit=8192k/40960k name=SpLSTG_0-5 priority=1/1 queue=\
24 | pcq-download-default/pcq-upload-default target=\
25 | 192.168.100.17/32,192.168.100.16/32,192.168.100.18/32,192.168.100.19/32
26 | add burst-time=10s/10s comment="Tariff Main-Capped 2 Mbps, 3GB included" \
27 | limit-at=205k/205k max-limit=2048k/2048k name=SpLSTG_0-6 priority=5/5 \
28 | queue=pcq-download-default/pcq-upload-default target=192.168.100.15/32
29 | add burst-time=10s/10s comment=10001 limit-at=10200/41k max-limit=512k/2048k \
30 | name=SpLSTQ_1-1 parent=SpLSTG_0-1 priority=5/5 queue=\
31 | pcq-download-default/pcq-upload-default target=192.168.100.2/32
32 | add burst-time=10s/10s comment=20002 limit-at=10200/41k max-limit=512k/2048k \
33 | name=SpLSTQ_2-2 parent=SpLSTG_0-1 priority=5/5 queue=\
34 | pcq-download-default/pcq-upload-default target=192.168.100.3/32
35 | add burst-time=10s/10s comment=30003 limit-at=10200/41k max-limit=512k/2048k \
36 | name=SpLSTQ_3-3 parent=SpLSTG_0-1 priority=5/5 queue=\
37 | pcq-download-default/pcq-upload-default target=192.168.100.4/32
38 | add burst-time=10s/10s comment=10004 limit-at=10200/41k max-limit=512k/2048k \
39 | name=SpLSTQ_4-4 parent=SpLSTG_0-1 priority=5/5 queue=\
40 | pcq-download-default/pcq-upload-default target=192.168.100.5/32
41 | add burst-time=10s/10s comment=20005 limit-at=10200/41k max-limit=512k/2048k \
42 | name=SpLSTQ_5-5 parent=SpLSTG_0-1 priority=5/5 queue=\
43 | pcq-download-default/pcq-upload-default target=192.168.100.6/32
44 | add burst-time=10s/10s comment=30006 limit-at=20400/100k max-limit=1024k/5M \
45 | name=SpLSTQ_6-6 parent=SpLSTG_0-2 priority=5/5 queue=\
46 | pcq-download-default/pcq-upload-default target=192.168.100.7/32
47 | add burst-time=10s/10s comment=10007 limit-at=20400/100k max-limit=1024k/5M \
48 | name=SpLSTQ_7-7 parent=SpLSTG_0-2 priority=5/5 queue=\
49 | pcq-download-default/pcq-upload-default target=192.168.100.8/32
50 | add burst-time=10s/10s comment=20008 limit-at=20400/100k max-limit=1024k/5M \
51 | name=SpLSTQ_8-8 parent=SpLSTG_0-2 priority=5/5 queue=\
52 | pcq-download-default/pcq-upload-default target=192.168.100.9/32
53 | add burst-time=10s/10s comment=30009 limit-at=20400/100k max-limit=1024k/5M \
54 | name=SpLSTQ_9-9 parent=SpLSTG_0-2 priority=5/5 queue=\
55 | pcq-download-default/pcq-upload-default target=192.168.100.10/32
56 | add burst-time=10s/10s comment=40010 limit-at=20400/100k max-limit=1024k/5M \
57 | name=SpLSTQ_10-10 parent=SpLSTG_0-2 priority=5/5 queue=\
58 | pcq-download-default/pcq-upload-default target=192.168.100.11/32
59 | add burst-time=10s/10s comment=40011 limit-at=205k/1024k max-limit=\
60 | 2048k/10240k name=SpLSTQ_11-11 parent=SpLSTG_0-3 priority=5/5 queue=\
61 | pcq-download-default/pcq-upload-default target=192.168.100.12/32
62 | add burst-time=10s/10s comment=50012 limit-at=205k/519k max-limit=2048k/5192k \
63 | name=SpLSTQ_12-12 parent=SpLSTG_0-4 priority=1/1 queue=\
64 | pcq-download-default/pcq-upload-default target=192.168.100.13/32
65 | add burst-time=10s/10s comment=50013 limit-at=205k/519k max-limit=2048k/5192k \
66 | name=SpLSTQ_13-13 parent=SpLSTG_0-4 priority=1/1 queue=\
67 | pcq-download-default/pcq-upload-default target=192.168.100.14/32
68 | add burst-time=10s/10s comment=50014 limit-at=205k/1024k max-limit=\
69 | 2048k/10240k name=SpLSTQ_14-14 parent=SpLSTG_0-5 priority=1/1 queue=\
70 | pcq-download-default/pcq-upload-default target=192.168.100.17/32
71 | add burst-time=10s/10s comment=10015 limit-at=205k/205k max-limit=2048k/2048k \
72 | name=SpLSTQ_15-15 parent=SpLSTG_0-6 priority=5/5 queue=\
73 | pcq-download-default/pcq-upload-default target=192.168.100.15/32
74 | add burst-time=10s/10s comment=10016 limit-at=205k/1024k max-limit=\
75 | 2048k/10240k name=SpLSTQ_16-16 parent=SpLSTG_0-5 priority=1/1 queue=\
76 | pcq-download-default/pcq-upload-default target=192.168.100.16/32
77 | add burst-time=10s/10s comment=40017 limit-at=205k/1024k max-limit=\
78 | 2048k/10240k name=SpLSTQ_17-17 parent=SpLSTG_0-5 priority=1/1 queue=\
79 | pcq-download-default/pcq-upload-default target=192.168.100.18/32
80 | add burst-time=10s/10s comment=30018 limit-at=205k/1024k max-limit=\
81 | 2048k/10240k name=SpLSTQ_18-18 parent=SpLSTG_0-5 priority=1/1 queue=\
82 | pcq-download-default/pcq-upload-default target=192.168.100.19/32
83 | add burst-time=10s/10s comment=20019 limit-at=205k/519k max-limit=2048k/5192k \
84 | name=SpLSTQ_19-19 parent=SpLSTG_0-4 priority=1/1 queue=\
85 | pcq-download-default/pcq-upload-default target=192.168.100.24/32
86 |
--------------------------------------------------------------------------------
/spl-scripts/net_stats.py:
--------------------------------------------------------------------------------
1 | #!/usr/local/bin/python
2 | # Script connects to the file that was created by splynx_stats.py and gets usage for selected period
3 | # Run script this way: net_stats.py - for example net_stats.py 2021-01-01 2021-02-01, it will show all usage per /24 range from the selected period
4 | import splynx_stats as s
5 | from sys import argv
6 | import time
7 | from tabulate import tabulate
8 | import json
9 | from datetime import date
10 |
11 | start_time = time.time()
12 |
13 | start_date = str(argv[1])
14 | end_date = str(argv[2])
15 | isp_usage = s.splynx_stats()
16 |
17 | result = isp_usage.get_info(start_date, end_date)
18 |
19 | #print(json.dumps(result,indent=2))
20 | print(tabulate(result, headers='keys', tablefmt="grid"))
21 |
22 |
23 | print("--- %s seconds ---" % (time.time() - start_time))
24 |
25 |
--------------------------------------------------------------------------------
/spl-scripts/ppp-mikrotik.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # This is the tool for Backing up PPP customers from Splynx DB to Mikrotik. Tool grabs all Splynx customers and creates PPP secrets + Profiles for Mikrotik router
4 | import splynx_api
5 | import json
6 | import codecs
7 |
8 | # set API variables
9 |
10 | api_url = '' # please set your Splynx URL
11 | key = "" # please set your key
12 | secret = "" # please set your secret
13 |
14 |
15 | # Let's get all customers IDs from Splynx
16 | ApiUrlCust = "admin/customers/customer" #API customers URL
17 | api = splynx_api.SplynxApi(api_url, key, secret)
18 | api.api_call_get(ApiUrlCust)
19 |
20 | json_object = api.response
21 |
22 | # OK, customers are in JSON object now, we need to get all their IDs :
23 |
24 | cust_id_list = []
25 | for cust in json_object:
26 | cust_id_list.append(cust['id'])
27 |
28 |
29 | # Got IDs of all customers from DB, need to grab all active Internet services from Splynx
30 |
31 | ppp_array = []
32 | for i in range(8):
33 | ppp_array.append([])
34 |
35 | for id in cust_id_list:
36 | ApiServ = "admin/customers/customer/" + id + "/internet-services"
37 | api.api_call_get(ApiServ)
38 | json_service = api.response
39 |
40 | for cust_serv in json_service:
41 | if cust_serv['status'] == 'active':
42 | ppp_array[0].append(cust_serv['login'])
43 | ppp_array[1].append(cust_serv['password'])
44 | ppp_array[2].append(cust_serv['ipv4'])
45 | ppp_array[3].append(cust_serv['tariff_id'])
46 | ppp_array[4].append(0)
47 | ppp_array[5].append(0)
48 | ppp_array[6].append(cust_serv['customer_id'])
49 | ppp_array[7].append(0)
50 | print("Got service for customer ID " + cust_serv['customer_id'])
51 |
52 |
53 | # Get the speeds from tariff table Splynx and store in list "plan"
54 |
55 | ApiPlan = "admin/tariffs/internet"
56 | api.api_call_get(ApiPlan)
57 | json_plan = api.response
58 |
59 |
60 | count = 0
61 | for x in ppp_array[3]:
62 |
63 | for plan in json_plan:
64 | if x==plan['id']:
65 | # print(ppp_array[4][count] + " Download: " + plan['speed_download'] + ", Upload: " + plan['speed_upload'])
66 | ppp_array[4][count] = plan['speed_download']
67 | ppp_array[5][count] = plan['speed_upload']
68 |
69 | count = count + 1
70 |
71 |
72 |
73 | # Add Customer name to array + Customer password if service password is empty
74 |
75 | customer = []
76 | for i in range(3):
77 | customer.append([])
78 |
79 | for id in cust_id_list:
80 | ApiServ = "admin/customers/customer/" + id
81 | api.api_call_get(ApiServ)
82 | json_customer = api.response
83 | customer[0].append(json_customer['id'])
84 | customer[1].append(json_customer['password'])
85 | customer[2].append(json_customer['name'])
86 | print("Got customer, ID " + json_customer['id'])
87 |
88 | for i in range(len(ppp_array[6])):
89 | for x in range(len(customer[0])):
90 | if customer[0][x] == ppp_array[6][i]:
91 | ppp_array[7][i] = customer[2][x]
92 |
93 | if customer[0][x] == ppp_array[6][i] and len(ppp_array[1][i]) == 0 :
94 | ppp_array[1][i] = customer[1][x]
95 |
96 |
97 | # Create file PPP Secrets for Mikrotik router
98 |
99 | f_secrets = codecs.open('ppp_secret.rsc', 'w', encoding='utf-8')
100 | f_profile = codecs.open('ppp_profile.rsc', 'w', encoding='utf-8')
101 |
102 | for i in range(len(ppp_array[0])):
103 | if len(ppp_array[2][i]) != 0:
104 | f_secrets.write ('ppp secret add name="' + ppp_array[0][i] + '" password=' + ppp_array[1][i] + " remote-address=" + ppp_array[2][i] + " profile=Splynx_" + ppp_array[3][i] + ' comment="' + ppp_array[7][i] + '"\n')
105 | else:
106 | f_secrets.write ('ppp secret add name="' + ppp_array[0][i] + '" password=' + ppp_array[1][i] + " profile=Splynx_" + ppp_array[3][i] + ' comment="' + ppp_array[7][i] + '"\n')
107 | f_secrets.close()
108 |
109 |
110 | # Create file PPP Profile for Mikrotik router
111 |
112 | for plan in json_plan:
113 | f_profile.write ("/ppp profile add local-address=10.0.0.1 name=Splynx_" + plan['id'] + " rate-limit=" + plan['speed_download'] + "k/" + plan['speed_upload'] +"k \n")
114 | f_profile.close()
115 |
116 |
117 |
--------------------------------------------------------------------------------
/spl-scripts/splynx_api.py:
--------------------------------------------------------------------------------
1 | #!/usr/local/bin/python3.7
2 | import hashlib
3 | import hmac
4 | from datetime import datetime
5 | import time
6 | import math
7 | import requests
8 | from requests import request
9 | import urllib3
10 | import sys
11 | import logging
12 |
13 | """
14 | *Splynx API v. 1.0
15 | *REST API Class
16 | *Author: Narcisse Doudieu Siewe
17 | """
18 |
19 | try:
20 | import http.client as http_client
21 | except ImportError:
22 | # Python 2
23 | import httplib as http_client
24 | http_client.HTTPConnection.debuglevel = 1
25 |
26 | # You must initialize logging, otherwise you'll not see debug output.
27 | logging.basicConfig()
28 | logging.getLogger().setLevel(logging.DEBUG)
29 | requests_log = logging.getLogger("requests.packages.urllib3")
30 | requests_log.setLevel(logging.DEBUG)
31 | requests_log.propagate = True
32 |
33 | class SplynxApi:
34 | def __init__(self, url, api_key, api_secret):
35 | self._url = url
36 | self._api_key = api_key
37 | self._api_secret = api_secret
38 | self._sash = None
39 | self._nonce_v = None
40 | self._debug = None
41 | self._version = '2.0'
42 | self._administrator_id = None
43 | self._administrator_role = None
44 | self._administrator_partner = None
45 | self._test_method = {'POST':201, 'PUT':202, 'DELETE':204}
46 | self._nonce()
47 |
48 | @property
49 | def administrator_id(self):
50 | return self._administrator_id
51 |
52 | @administrator_id.setter
53 | def administrator_id(self, value):
54 | self._administrator_id = value
55 |
56 | @property
57 | def administrator_role(self):
58 | return self._administrator_role
59 |
60 | @administrator_role.setter
61 | def administrator_role(self,value):
62 | self._administrator_role = value
63 |
64 | @property
65 | def administrator_partner(self):
66 | return self._administrator_partner
67 |
68 | @administrator_partner.setter
69 | def administrator_partner(self,value):
70 | self._administrator_partner = value
71 |
72 | @property
73 | def debug(self):
74 | return self._debug
75 |
76 | @debug.setter
77 | def debug(self,value):
78 | self._debug = value
79 |
80 | @property
81 | def result(self):
82 | return self._result
83 |
84 | @result.setter
85 | def result(self,value):
86 | self._result = value
87 |
88 | @property
89 | def response(self):
90 | return self._response
91 |
92 | @response.setter
93 | def response(self,value):
94 | self._response = value
95 |
96 | @property
97 | def response_code(self):
98 | return self._response_code
99 |
100 | @response_code.setter
101 | def response_code(self,value):
102 | self._response_code = value
103 |
104 | @property
105 | def sash(self):
106 | return self._sash
107 |
108 | @sash.setter
109 | def sash(self,value):
110 | self._sash = value
111 |
112 | """
113 | *Create signature for API call validation
114 | *@return string hash
115 | """
116 | def _signature(self):
117 | if sys.version_info >= (3,0) :
118 | st = "%s%s"%(self._nonce_v, self._api_key)
119 | hsh = hmac.new(bytes(self._api_secret,'latin-1'), bytes(st,'latin-1'), hashlib.sha256).hexdigest()
120 | else :
121 | st = "%s%s"%(self._nonce_v, self._api_key)
122 | hsh = hmac.new(self._api_secret, st, hashlib.sha256).hexdigest()
123 | return hsh.upper()
124 |
125 | """
126 | *Set nonce as timestamp
127 | """
128 | def _nonce(self):
129 | t_now = datetime.now()
130 | self._nonce_v = round((time.mktime(t_now.timetuple()) + t_now.microsecond/1000000.0)*100)
131 | if self.debug:
132 | print("nonce", self._nonce_v)
133 |
134 | """
135 | *Send request request to Splynx API
136 | *@param method: get, delete, put, post
137 | *@param url
138 | *@param param
139 | *@return array JSON results
140 | """
141 | def _request_process(self, method, url, param = []):
142 | if self.debug:
143 | print("%s to %s\n"%(method, url))
144 | print(param, "\n")
145 | auth_str = self._make_auth()
146 | # print(auth_str)
147 | headers = {
148 | "Authorization":"Splynx-EA ("+auth_str+")",
149 | "user-agent":"Splynx Python API"+self._version
150 | }
151 | try:
152 | if method in ('POST','PUT'):
153 | rs = request(method, url, headers = headers, json = param)
154 | elif method in ('GET','DELETE'):
155 | rs = request(method, url, headers=headers)
156 | else:
157 | rs = requests.get(url, headers=headers, params=param)
158 | except requests.RequestException as e:
159 | if self.debug:
160 | print("response: ", e.response, '\n')
161 | print("request: ", e.request, '\n')
162 | return
163 | self.administrator_id = rs.headers.get('SpL-Administrator-Id') or self.administrator_id
164 | self.administrator_role = rs.headers.get('SpL-Administrator-Role') or self.administrator_role
165 | self._administrator_partner = rs.headers.get('SpL-Administrator-Partner') or self._administrator_partner
166 | self.response_code = rs.status_code
167 | if self.debug:
168 | print('Content: ',rs.content, '\n')
169 | self.result = False
170 | if self.response_code == 200 or self._test_method.get(method) == self.response_code:
171 | self.result = True
172 | self.response = rs.json()
173 | if self.debug:
174 | print("status code: %s method: %s\n"%(self.response_code, method))
175 | return self.response
176 |
177 | """
178 | *Make Splynx Extended Authorization string
179 | *
180 | *@return string of Splynx EA
181 | """
182 | def _make_auth(self):
183 | self._nonce_v += 1
184 | auth = {'key':self._api_key,
185 | 'signature':self._signature(),
186 | 'nonce':self._nonce_v}
187 | if self.sash != None:
188 | auth['sash'] = self.sash
189 | return urllib3.request.urlencode(auth)
190 |
191 | def _getUrl(self, pth, Id=None):
192 | url = self._url+'api/'+self._version +'/'+pth
193 | if Id:
194 | url += '/'+str(Id)
195 | return url
196 |
197 | """
198 | *Send API call GET to Splynx API
199 | *@param string path
200 | *@param integer Id
201 | *@param a list
202 | """
203 | def api_call_get(self, path, Id=None):
204 | return self._request_process('GET', self._getUrl(path, Id))
205 |
206 |
207 | def api_call_get_filter(self, path, param):
208 | return self._request_process('GET_FILTER', self._getUrl(path), param)
209 |
210 | """
211 | *Send API call DELETE to Splynx API
212 | *@param string path
213 | *@param integer Id
214 | *@return a list
215 | """
216 | def api_call_delete(self,path, Id=None):
217 | return self._request_process('DELETE', self._getUrl(path, Id))
218 |
219 | """
220 | *Send API call POST to Splynx API
221 | *@param string path
222 | *@param list param
223 | *@return a list
224 | """
225 | def api_call_post(self, path, param):
226 | return self._request_process('POST', self._getUrl(path), param)
227 |
228 | """
229 | *Send API call PUT to Splynx API
230 | *@param string path
231 | *@param integer Id
232 | *@param list param
233 | *@return a list
234 | """
235 | def api_call_put(self, path, Id, param):
236 | return self._request_process('PUT', self._getUrl(path, Id), param)
237 |
--------------------------------------------------------------------------------
/spl-scripts/splynx_stats.py:
--------------------------------------------------------------------------------
1 | # Run this file to connect to Splynx and save usage of all active customers to the file in /data/ folder
2 |
3 | import splynx_api
4 | import json
5 | import os
6 | from tabulate import tabulate
7 |
8 | api_url = '' # please set your Splynx URL
9 | key = "" # please set your key
10 | secret = "" # please set your secret
11 |
12 | api = splynx_api.SplynxApi(api_url, key, secret)
13 |
14 | class splynx_stats:
15 | def all_stats_to_file(self):
16 |
17 | ApiGetCustomers = "admin/customers/customer"
18 |
19 | api.api_call_get(ApiGetCustomers)
20 | customers_json = api.response
21 | a_customer_list = []
22 |
23 | for customer in customers_json:
24 | if customer['status'] == 'active' or customer['status'] == 'blocked':
25 | a_customer_list.append(customer['id'])
26 |
27 | directory = './data/'
28 | filename = str('all_stats.json')
29 | file_path = os.path.join(directory, filename)
30 | if not os.path.isdir(directory):
31 | os.mkdir(directory)
32 | with open(file_path, 'w') as f:
33 |
34 | for id in a_customer_list:
35 | if a_customer_list.index(id) == 0:
36 | f.write("[")
37 |
38 | ApiGetStats = "admin/customers/customer-statistics/" + id
39 | api.api_call_get(ApiGetStats)
40 | stats_json = api.response
41 | for session in stats_json:
42 | data = {"session id": session['session_id'], "date": session['end_date'], "ip": session['ipv4'],
43 | "download": session['in_bytes'], "upload": session['out_bytes']}
44 | if a_customer_list.index(id) == -1:
45 | f.write("]")
46 | else:
47 | f.write(str(data) + ",")
48 |
49 | def find_net(self, ip_address):
50 | ipaddress=ip_address
51 | ip = ip_address.split(".")
52 | net = [ip[0], ip[1], ip[2], "0"]
53 | net_address = ".".join(net)
54 | return (net_address)
55 |
56 | def get_info(self, start_date, end_date) :
57 | directory = './data/'
58 | filename = str('all_stats.json')
59 | file_path = os.path.join(directory, filename)
60 | with open(file_path, 'r') as f:
61 | my_json = f.read()
62 | all_stats = json.loads(my_json)
63 |
64 | net_stats=[]
65 | net_list=[]
66 | for session in all_stats :
67 | if start_date < session["date"] and end_date > session["date"]:
68 | session["ip"] = self.find_net(session["ip"])
69 | net_stats.append(session)
70 | net_list.append(session["ip"])
71 |
72 | networks = (list(set(net_list))) # this is the list of /24 networks
73 | networks.sort()
74 |
75 | final_stats=[]
76 | for net in networks :
77 | n_data = {"ip network" : net, "download" : 0, "upload" : 0}
78 | final_stats.append(n_data)
79 |
80 | for f in final_stats :
81 | for n in net_stats:
82 | if f["ip network"] == n["ip"] :
83 | f["download"] = f["download"] + int(n["download"])
84 | f["upload"] = f["upload"] + int(n["upload"])
85 |
86 | view_stats = []
87 | for line in final_stats :
88 | data = {"IP network : " : (line["ip network"]+"/24"), "Download TB" : round((line["download"]/1000000000000),2), "Upload TB" : round((line["upload"]/1000000000000),2)}
89 | view_stats.append(data)
90 |
91 | return(view_stats)
92 |
93 |
94 | # print(self.find_net(session["ip_address"]))
95 |
96 | # print(self.find_net(all_stats[0]["ip"]))
97 | # print(self.find_net(all_stats[-1]["ip"]))
98 |
99 |
100 | if __name__ == "__main__" :
101 | new_db = splynx_stats()
102 | new_db.all_stats_to_file()
103 |
--------------------------------------------------------------------------------