├── .gitignore ├── docs ├── images │ ├── ripencc-atlas-bq-0.png │ ├── ripencc-atlas-bq-1.png │ ├── ripencc-atlas-bq-2.png │ ├── ripencc-atlas-bq-3.png │ ├── ripencc-atlas-bq-4.png │ ├── ripencc-atlas-bq-5.png │ └── ripencc-atlas-bq-6.png ├── tuts.md ├── intro.md ├── tutorial_subclauses.md ├── measurements_sslcert.md ├── measurements_ping.md ├── measurements_http.md ├── gettingstarted.md ├── basics.md ├── measurements_ntp.md ├── measurements_traceroute.md ├── tutorial_min_pings.md ├── tutorial_cost_efficiency.md └── measurements_dns.md ├── tests ├── package.json ├── README.md ├── fixtures │ ├── toDo.json │ └── withErrors.json ├── fixtures.test.js └── dns_parser.test.js ├── .github └── workflows │ └── actions.yml ├── README.md └── scripts └── dns_parser.js /.gitignore: -------------------------------------------------------------------------------- 1 | tests/node_modules 2 | -------------------------------------------------------------------------------- /docs/images/ripencc-atlas-bq-0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RIPE-NCC/ripe-atlas-bigquery/HEAD/docs/images/ripencc-atlas-bq-0.png -------------------------------------------------------------------------------- /docs/images/ripencc-atlas-bq-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RIPE-NCC/ripe-atlas-bigquery/HEAD/docs/images/ripencc-atlas-bq-1.png -------------------------------------------------------------------------------- /docs/images/ripencc-atlas-bq-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RIPE-NCC/ripe-atlas-bigquery/HEAD/docs/images/ripencc-atlas-bq-2.png -------------------------------------------------------------------------------- /docs/images/ripencc-atlas-bq-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RIPE-NCC/ripe-atlas-bigquery/HEAD/docs/images/ripencc-atlas-bq-3.png -------------------------------------------------------------------------------- /docs/images/ripencc-atlas-bq-4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RIPE-NCC/ripe-atlas-bigquery/HEAD/docs/images/ripencc-atlas-bq-4.png -------------------------------------------------------------------------------- /docs/images/ripencc-atlas-bq-5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RIPE-NCC/ripe-atlas-bigquery/HEAD/docs/images/ripencc-atlas-bq-5.png -------------------------------------------------------------------------------- /docs/images/ripencc-atlas-bq-6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RIPE-NCC/ripe-atlas-bigquery/HEAD/docs/images/ripencc-atlas-bq-6.png -------------------------------------------------------------------------------- /tests/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "scripts": { 3 | "test": "jest dns_parser.test.js", 4 | "fixtures": "jest fixtures.test.js" 5 | }, 6 | "devDependencies": { 7 | "jest": "^27.0.6" 8 | }, 9 | "dependencies": {} 10 | } 11 | -------------------------------------------------------------------------------- /.github/workflows/actions.yml: -------------------------------------------------------------------------------- 1 | name: testing-dns-parser 2 | on: [push] 3 | jobs: 4 | test: 5 | runs-on: ubuntu-latest 6 | steps: 7 | - uses: actions/checkout@v2 8 | - uses: actions/setup-node@v1 9 | - run: npm install && npm run test 10 | working-directory: tests 11 | -------------------------------------------------------------------------------- /docs/tuts.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | * min time to any target 4 | * timeseries of packet loss 5 | * all IPs discovered 6 | * filtering rfc1918 7 | 8 | - building up queries 9 | 10 | 11 | 12 | In these examples I'll use the `samples` dataset. The same queries are valid 13 | against the `measurements` dataset, but these can be copied without incurring 14 | much cost. 15 | 16 | -------------------------------------------------------------------------------- /tests/README.md: -------------------------------------------------------------------------------- 1 | # How to test the dns_parser 2 | 3 | ## Simple test 4 | 5 | Run `npm install && npm run test` to run the current set of tests. These tests compare DNS payloads with the expected (parsed) output. Tests pickup the test cases from `fixtures/fixtures.json`. 6 | 7 | ## Adding functionality 8 | 9 | If adding new functionality to the parser, then you might need to add new test cases. You can re-run the parser over the existing DNS payloads with `npm run fixtures `. Once you're happy with your changes you can override the existing tests on `fixtures/fixtures.json`. Thanks for contributing! -------------------------------------------------------------------------------- /tests/fixtures/toDo.json: -------------------------------------------------------------------------------- 1 | { 2 | "qaCEAABAAEAAAAACGhvc3RuYW1lBGJpbmQAABAAA8AMABAAAwAAAAAACglubm4xLWxheDU=": { 3 | "id": 43424, 4 | "flags": { 5 | "query": false, 6 | "opcode": null, 7 | "authoritative_answer": true, 8 | "truncated": false, 9 | "recursion_desired": false, 10 | "recursion_available": false, 11 | "z": 0, 12 | "authentic_data": false, 13 | "checking_disabled": false, 14 | "rcode": "NoError" 15 | }, 16 | "qdcount": 64, 17 | "ancount": 64, 18 | "aucount": 0, 19 | "adcount": 2, 20 | "payload": [ 21 | { 22 | "error": "parse error in question section" 23 | } 24 | ] 25 | } 26 | } -------------------------------------------------------------------------------- /tests/fixtures.test.js: -------------------------------------------------------------------------------- 1 | /* 2 | Utility that generates new fixtures based on the current dns_parser. 3 | Useful when adding new features to the parser. 4 | 5 | It's based on the jest testing framework, and it can be invoked through: 6 | `npm run fixtures ` 7 | ...where filename defaults to 'fixtures.json' 8 | */ 9 | 10 | const fs = require('fs'); 11 | const parse_wire_message = require('../scripts/dns_parser.js'); 12 | const tests = require('./fixtures/fixtures.json'); 13 | 14 | let [_, filename] = process.argv.slice(2); 15 | filename = filename ? filename : 'fixtures.json' 16 | 17 | let entries = {} 18 | Object.entries(tests).map( 19 | ([wireMessage, knownOutput]) => { 20 | entries[wireMessage] = parse_wire_message(wireMessage) 21 | } 22 | ); 23 | 24 | fs.writeFile( 25 | filename, 26 | JSON.stringify( 27 | entries, 28 | 0, 2 // we're pretty-printing 29 | ), 30 | 'utf-8', 31 | () => {} // empty promise 32 | ) 33 | 34 | // we need to have at least 1 test in the suite 35 | // let's leave an empty test here... 36 | test('empty test', () => { 37 | expect(1).toStrictEqual(1) 38 | }) 39 | 40 | -------------------------------------------------------------------------------- /tests/dns_parser.test.js: -------------------------------------------------------------------------------- 1 | const parse_wire_message = require('../scripts/dns_parser.js'); 2 | const tests = require('./fixtures/fixtures.json'); 3 | const withErrors = require('./fixtures/withErrors.json'); 4 | 5 | // one test case per wireMessage 6 | Object.entries(tests).map( 7 | ([wireMessage, knownOutput]) => { 8 | test(`Testing parser output ${wireMessage.slice(0,8)}`, () => { 9 | 10 | expect( 11 | parse_wire_message(wireMessage) 12 | ).toStrictEqual( // .toBe() does a shallow comparison and fails... 13 | knownOutput 14 | ) 15 | }); 16 | } 17 | ); 18 | 19 | // the parser manages to work, although with errors 20 | Object.entries(withErrors).map( 21 | ([wireMessage, knownOutput]) => { 22 | 23 | const id = `${wireMessage.slice(0, 8)}`; // just a short id 24 | test(`Testing parser output ${id}`, () => { 25 | 26 | expect( 27 | parse_wire_message(wireMessage) 28 | ).toStrictEqual( // .toBe() does a shallow comparison and fails... 29 | knownOutput 30 | ) 31 | }); 32 | } 33 | ) 34 | -------------------------------------------------------------------------------- /docs/intro.md: -------------------------------------------------------------------------------- 1 | # RIPE Atlas in BigQuery 2 | 3 | Google BigQuery is a data warehousing platform with an SQL interface on top to allow fast query access to data. 4 | 5 | The RIPE Atlas network measurement platform conducts network measurements, and 6 | makes them available via the RIPE Atlas API. The API provides some 7 | opportunities to filter data, but little in terms of compute. 8 | 9 | To bridge this gap, we are now storing RIPE Atlas data in Google BigQuery. 10 | 11 | That offers a ridiculous advantage: using BigQuery, we can slice the data along any dimension we care about. 12 | 13 | We're going to start with the following offering: 14 | 15 | (list of tables) 16 | (list of time ranges) 17 | 18 | These documents are intended to bootstrap folks into querying this data, but 19 | they're definitely not everything you can do. We're hoping, after some time 20 | using this data, to learn more about how best to structure it, what tables 21 | would be useful for us to generate on your behalf, and how you're using it all. 22 | 23 | 24 | ## Datasets 25 | 26 | We're making the following datasets available on Google BigQuery: 27 | 28 | * [Ping](ping.md) 29 | * [Traceroute]() 30 | * [DNS]() 31 | * [HTTP]() 32 | * [SSL]() 33 | * [NTP]() 34 | 35 | -------------------------------------------------------------------------------- /docs/tutorial_subclauses.md: -------------------------------------------------------------------------------- 1 | ## Subqueries 2 | 3 | It can be tempting to write queries as compact and as concisely as possible, 4 | but often this is not optimal in terms of iterative development or code 5 | readability. 6 | 7 | A common approach when building queries is to construct a series of subqueries 8 | that form a sort of a pipeline: this has the advantage that each individual 9 | step can be self-contained, tested and verified separately, and becomes much 10 | simpler to reason about. 11 | 12 | Let's take a trivial example. In the [traceroute](measurements_traceroute.md) 13 | tables, IP addresses appear in multiple places. If we want to process all, 14 | having them in separate columns may not be ideal, but a simple select won't cut it: 15 | 16 | ```sql 17 | select src_addr, synth_addr, dst_addr, hop_addr 18 | from `ripencc-atlas`.samples.traceroute, unnest(hops) 19 | ``` 20 | 21 | This will obviously return two columns. If we want to simply retrieve a list of 22 | IP addresses, one of the easiest ways is to deconstruct the select into 23 | multiple subqueries. 24 | 25 | For example: 26 | 27 | ```sql 28 | with src_ips as 29 | ( 30 | select src_addr as ip 31 | from `ripencc-atlas`.samples.traceroute 32 | where date(start_time) = "2020-10-01" 33 | ), 34 | 35 | dst_ips as 36 | ( 37 | select dst_addr as ip 38 | from `ripencc-atlas`.samples.traceroute 39 | where date(start_time) = "2020-10-01" 40 | ), 41 | 42 | synth_ips as 43 | ( 44 | select synth_addr as ip 45 | from `ripencc-atlas`.samples.traceroute 46 | where date(start_time) = "2020-10-01" 47 | ), 48 | 49 | hop_ips as 50 | ( 51 | select hop_addr as ip 52 | from `ripencc-atlas`.samples.traceroute, unnest(hops) 53 | where date(start_time) = "2020-10-01" 54 | ), 55 | 56 | combined_ips as 57 | ( 58 | select * from src_ips 59 | union all 60 | select * from dst_ips 61 | union all 62 | select * from synth_ips 63 | union all 64 | select * from hop_ips 65 | ) 66 | 67 | select distinct ip 68 | from combined_ips 69 | ``` 70 | 71 | This ultimately splits out the IP addresses from each of the columns and 72 | renames them all as `ip`, before performing a union over all of the resulting 73 | tables from the subqueries, then finally running `distinct` over them at the 74 | end to create a list of unique IPs observed in this table. 75 | 76 | ## Filtering tables prior to processing 77 | 78 | Subqueries come into their own when you have multiple tables to work with and 79 | you want to filter data prior to attempting to do additional computation. 80 | 81 | Let's say we want to compare probes where we have DNS responses and ping RTTs 82 | to the same resolver. Conceptually it can be simplest to pull the relevant 83 | columns and dates early in the code, and manage them from there. We can build 84 | subqueries to perform queries like this as follows: 85 | 86 | ```sql 87 | with dns_rtts as 88 | ( 89 | select prb_id, dst_addr, response_time 90 | from `ripencc-atlas`.samples.dns 91 | where date(start_time) = "2020-10-01" 92 | ), 93 | 94 | ping_rtts as 95 | ( 96 | select prb_id, dst_addr, rtt 97 | from `ripencc-atlas`.samples.ping, unnest(pings) 98 | where date(start_time) = "2020-10-01" 99 | ) 100 | 101 | select * 102 | from ping_rtts 103 | join dns_rtts 104 | using(prb_id, dst_addr) 105 | ``` 106 | 107 | 108 | -------------------------------------------------------------------------------- /docs/measurements_sslcert.md: -------------------------------------------------------------------------------- 1 | ## Notes on the SSL Cert dataset 2 | 3 | This is the sslcert schema in our BigQuery tables. For more information on the raw data this is based upon, please also look at the [RIPE Atlas schema description](https://atlas.ripe.net/docs/data_struct/#v5000). 4 | 5 | ### Schema 6 | 7 | ``` 8 | +-------------------+---------------+--------------------------------------------------------------+ 9 | | field_path | data_type | description | 10 | +-------------------+---------------+--------------------------------------------------------------+ 11 | | af | INT64 | Address Family | 12 | | method | STRING | 'SSL' or 'TLS' | 13 | | prb_id | INT64 | RIPE Atlas probe id | 14 | | last_time_synced | INT64 | Discrepancy (seconds) between probe's clock and the RIPE | 15 | | | | Atlas controller's clock. The value -1 means unknown. | 16 | | msm_id | INT64 | RIPE Atlas measurement id | 17 | | group_id | INT64 | RIPE Atlas measurement-group id | 18 | | src_addr | STRING | IP address of the interface used by RIPE Atlas probe | 19 | | src_addr_bytes | BYTES | Value of the src_addr field as a bytes representation | 20 | | dst_addr | STRING | IP address of the destination | 21 | | dst_addr_bytes | BYTES | Value of the dst_addr field as a bytes representation | 22 | | dst_name | STRING | Hostname of the target as used by the probe | 23 | | synth_addr | STRING | IP address of the probe as synthesized by the RIPE Atlas | 24 | | | | controller | 25 | | synth_addr_bytes | BYTES | Value of the synth_addr field as a bytes representation | 26 | | start_time | TIMESTAMP | Start time of the measurement | 27 | | start_hour | INT64 | time hour of the day | 28 | | roundtrip_time | FLOAT64 | Duration from start connecting to receiving the certificate | 29 | | | | (in milliseconds) | 30 | | server_cipher | STRING | Cipher selected by the target (stringly hexadecimal) | 31 | | time_to_connect | FLOAT64 | Duration from time to start to time to connect (over TCP) to | 32 | | | | the target (in milliseconds) | 33 | | time_to_resolve | FLOAT64 | Duration for the DNS resolution process | 34 | | certificate | ARRAY | Certificate | 35 | | alert_level | INT64 | Alert Level | 36 | | alert_description | INT64 | Alert Description | 37 | +-------------------+---------------+--------------------------------------------------------------+ 38 | 39 | ``` 40 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # RIPE Atlas data in Google BigQuery 2 | 3 | ## Background 4 | 5 | [Google BigQuery](https://cloud.google.com/bigquery/) is a data warehousing 6 | platform with an SQL interface on top to allow fast query-based access to data. 7 | 8 | The [RIPE Atlas network measurement platform](https://atlas.ripe.net/) conducts 9 | network measurements, and makes the results of those measurements available via 10 | [the RIPE Atlas API](https://atlas.ripe.net/docs/api/v2/reference/) and [bulk 11 | downloads](https://data-store.ripe.net/datasets/atlas-daily-dumps/). 12 | 13 | The API provides some opportunities to filter data, but cannot offer 14 | significant compute cycles for calculation. To provide more scope for computation 15 | and analysis of this data, we are now storing RIPE Atlas data in Google BigQuery. 16 | 17 | For background information on the service, please refer to 18 | https://labs.ripe.net/tools/. 19 | 20 | In particular, note that your usage of this data falls under the [RIPE Atlas 21 | Terms and Conditions](https://atlas.ripe.net/legal/terms-conditions/). 22 | 23 | 24 | ## Datasets and data 25 | 26 | In order to get started, you need a Google account, and you need a project to 27 | run queries under. More information here: 28 | 29 | * [Getting started](docs/gettingstarted.md) 30 | 31 | If you just want to jump in: the public datasets are viewable from our public project: 32 | 33 | https://console.cloud.google.com/bigquery?project=ripencc-atlas 34 | 35 | Initially, we will offer two datasets: **samples**, and **measurements**. 36 | 37 | ### Samples 38 | 39 | The **samples** dataset contains six tables with a static, 1% sample of recent measurement results. The tables are: 40 | 41 | * ripencc-atlas.samples.dns ([schema](docs/measurements_dns.md)) 42 | * ripencc-atlas.samples.http ([schema](docs/measurements_http.md)) 43 | * ripencc-atlas.samples.ntp ([schema](docs/measurements_ntp.md)) 44 | * ripencc-atlas.samples.ping ([schema](docs/measurements_ping.md)) 45 | * ripencc-atlas.samples.sslcert ([schema](docs/measurements_sslcert.md)) 46 | * ripencc-atlas.samples.traceroute ([schema](docs/measurements_traceroute.md)) 47 | 48 | These are intended for you to test the service on trivial data volumes, to better understand what's in there quickly. 49 | 50 | ### Measurements 51 | 52 | The **measurements** dataset contains six public views that are continuously updated 53 | with public RIPE Atlas measurement results. Schemas are identical to the samples tables. 54 | 55 | The six views are: 56 | 57 | * ripencc-atlas.measurements.dns 58 | * ripencc-atlas.measurements.http 59 | * ripencc-atlas.measurements.ntp 60 | * ripencc-atlas.measurements.ping 61 | * ripencc-atlas.measurements.sslcert 62 | * ripencc-atlas.measurements.traceroute 63 | 64 | These tables contain measurement results starting from 1 January 2020. 65 | 66 | 67 | ## Tutorials 68 | 69 | These documents are intended to help bootstrap folks into querying this data, but 70 | they're definitely not everything you can do. 71 | 72 | 73 | * [Determine minimum RTT from any probe to any measured RTT](docs/tutorial_min_pings.md) 74 | * [Subclauses and iterating on query building](docs/tutorial_subclauses.md) 75 | * [Cost efficiency](docs/tutorial_cost_efficiency.md): the important options to minimise or estimate query costs 76 | 77 | 78 | ## Feedback 79 | 80 | We're hoping, after some time using this data, to learn more about how best to 81 | structure it, what tables would be useful for us to generate on your behalf, 82 | and how you're using it all. 83 | 84 | If you have comments, questions, suggestions, or problems to report, please 85 | email atlas-bq@ripe.net 86 | 87 | 88 | 89 | 90 | -------------------------------------------------------------------------------- /docs/measurements_ping.md: -------------------------------------------------------------------------------- 1 | ## Notes on the Ping dataset 2 | 3 | This is the ping schema in our BigQuery tables. For more information on the raw data this is based upon, please also look at the [RIPE Atlas schema description](https://atlas.ripe.net/docs/data_struct/#v5000). 4 | 5 | ### Schema 6 | 7 | ``` 8 | +------------------------------+-----------------+-------------------------------------------------------------+ 9 | | field_path | data_type | description | 10 | +------------------------------+-----------------+-------------------------------------------------------------+ 11 | | af | INT64 | Address Family | 12 | | prb_id | INT64 | RIPE Atlas probe ID | 13 | | last_time_synced | INT64 | Discrepancy (seconds) between probe's clock and the RIPE | 14 | | | | Atlas controller's clock. The value -1 means unknown. | 15 | | msm_id | INT64 | RIPE Atlas measurement id | 16 | | group_id | INT64 | RIPE Atlas measurement-group id | 17 | | src_addr | STRING | IP Address of the interface used by RIPE Atlas probe | 18 | | src_addr_bytes | BYTES | value of the src_addr field as a bytes representation | 19 | | dst_addr | STRING | IP address of the destination of the traceroute | 20 | | dst_addr_bytes | BYTES | value of the dst_addr fiels as a bytes representation | 21 | | synth_addr | STRING | IP address of the probe as synthesized by the RIPE Atlas | 22 | | | | controller | 23 | | synth_addr_bytes | BYTES | value of the synth_addr field as a bytes representation | 24 | | start_time | TIMESTAMP | start time of the measurement | 25 | | start_hour | INT64 | time hour of the day | 26 | | size | INT64 | size of the payload send in the ping | 27 | | packets_sent | INT64 | Number of ICMP packets sent to destination | 28 | | packets_received | INT64 | Number of ICMP packets received from destination | 29 | | pings | ARRAY> | | 40 | +------------------------------+---------------+---------------------------------------------------------------+ 41 | ``` 42 | -------------------------------------------------------------------------------- /docs/measurements_http.md: -------------------------------------------------------------------------------- 1 | ## Notes on the HTTP dataset 2 | 3 | This is the HTTP schema in our BigQuery tables. For more information on the raw data this is based upon, please also look at the [RIPE Atlas schema description](https://atlas.ripe.net/docs/data_struct/#v5000). 4 | 5 | ### Schema 6 | 7 | ``` 8 | +--------------------+-----------+---------------------------------------------------------------- + 9 | | field_path | data_type | description | 10 | +--------------------+-----------+-----------------------------------------------------------------+ 11 | | af | INT64 | Address Family | 12 | | prb_id | INT64 | RIPE Atlas probe ID | 13 | | last_time_synced | INT64 | Discrepancy (seconds) between probe's clock and the RIPE Atlas | 14 | | | | controller's clock. The value -1 means unknown. | 15 | | msm_id | INT64 | RIPE Atlas measurement id | 16 | | group_id | INT64 | RIPE Atlas measurement-group id | 17 | | src_addr | STRING | IP Address of the interface used by RIPE Atlas probe | 18 | | src_addr_bytes | BYTES | value of the src_addr field as a bytes representation | 19 | | dst_addr | STRING | IP address of the destination of the traceroute | 20 | | dst_addr_bytes | BYTES | value of the dst_addr fiels as a bytes representation | 21 | | synth_addr | STRING | IP address of the probe as synthesized by the RIPE Atlas | 22 | | | | controller | 23 | | synth_addr_bytes | BYTES | value of the synth_addr field as a bytes representation | 24 | | start_time | TIMESTAMP | start time of the measurement | 25 | | start_hour | INT64 | time hour of the day | 26 | | id | INT64 | serial number of the request/reponse cycle | 27 | | proto_version | STRING | Protocol used for traceroute: TCP, UDP or ICMP | 28 | | start_time_offset | FLOAT64 | Duration between start of connect and data received (in | 29 | | | | milliseconds) | 30 | | body_size | INT64 | size of the received body (in octets) | 31 | | header_size | INT64 | size of the received header (in octets) | 32 | | dns_error | STRING | error description in case the DNS resolution failed | 33 | | error | STRING | the error description in case an occured error other than DNS | 34 | | | | related | 35 | | http_url | STRING | requested URL (of the target) | 36 | | http_method | STRING | requested HTTP verb ('GET', 'HEAD', 'POST') | 37 | | http_status | INT64 | HTTP response state | 38 | | http_duration | FLOAT64 | Duration of the complete HTTP request/response cycle (so | 39 | | | | excluding DNS request) (in millisconds) | 40 | | time_to_connect | FLOAT64 | HTTP response state | 41 | | time_to_first_byte | FLOAT64 | Duration of the time between connecting to and receiving bytes | 42 | | | | from the target (in milliseconds) | 43 | | time_to_resolve | FLOAT64 | Duration of the DNS resolution process (in milliseconds) | 44 | +--------------------+-----------+-----------------------------------------------------------------+ 45 | ``` 46 | -------------------------------------------------------------------------------- /docs/gettingstarted.md: -------------------------------------------------------------------------------- 1 | # Access 2 | 3 | To gain access to this data via BigQuery, you'll need a Google account. 4 | 5 | Once you have a Google account, you should be able to view the following page: 6 | 7 | https://console.cloud.google.com/bigquery?project=ripencc-atlas 8 | 9 | Once you're in, the console looks something like the following. Note that if 10 | you're viewing the `ripencc-atlas` project by following the link above, then 11 | the project will also appear in the `Resources` sidebar on the left. 12 | 13 | ![Google BigQuery screenshot showing the BigQuery console on a fresh project](images/ripencc-atlas-bq-0.png) 14 | 15 | It may not stay there by default, so pin it by selecting the project in the 16 | sidebar, then tapping `pin project`: 17 | 18 | ![Google BigQuery screenshot showing the BigQuery console pinning a project](images/ripencc-atlas-bq-1.png) 19 | 20 | That'll keep the `ripencc-atlas` project in your sidebar when you navigate the interface. 21 | 22 | ## BigQuery Terminology: Projects, Datasets, and Tables 23 | 24 | In BigQuery terminology, the following classifications are important: 25 | 26 | * Projects 27 | * Datasets 28 | * Tables and Views 29 | 30 | 1. A project has users, it can contain multiple datasets, and it is the entity that costs are ultimately charged to. 31 | 2. A dataset can hold multiple tables. 32 | 3. A table is what stores the actual data. A view can often be treated like a table; it's backed by a real table, but relies on a query to calculate its contents. 33 | 34 | You're going to need a project under which to run queries. Enter the `Select a project` menu at the top, and you'll see this: 35 | 36 | ![Google BigQuery screenshot showing the project selection dialog](images/ripencc-atlas-bq-2.png) 37 | 38 | Go ahead and select `New project`, and give your project a name: 39 | 40 | ![Google BigQuery screenshot showing the project creation page](images/ripencc-atlas-bq-3.png) 41 | 42 | Once this is done, you'll be thrown to the main console for your project. From 43 | here you can manage everything: users, permissions, billing, and any other 44 | Google Cloud Platform service you start using. For now, go to the search bar 45 | and find BigQuery to get back to the query console. 46 | 47 | ![Google Cloud Platform main console screenshot](images/ripencc-atlas-bq-4.png) 48 | 49 | Having done all of that, you now have: 50 | 51 | * Pinned our public project, and 52 | * Created your own project to run queries under. 53 | 54 | Note that unless you've clicked around further, you've not yet added any 55 | billing information. You'll max out after 1TB of queries on the free tier. But 56 | that's enough to try things out: 57 | 58 | ![Google BigQuery console showing the result of a simple query](images/ripencc-atlas-bq-5.png) 59 | 60 | 61 | ## Datasets 62 | 63 | Our public project is `ripencc-atlas`, and the datasets are `measurements` 64 | (with six views) and `samples` (with six tables). From a usage standpoint, the 65 | difference between views and tables is barely relevant, but views are computed 66 | from other tables. These views give us control on what the tables look like 67 | publicly. 68 | 69 | Each of the views in `measurements` holds some historical data, and is populated with 70 | streaming data soon after it is delivered by the probes. The tables in `samples` 71 | are populated with 1% of one week of data for each measurement type. 72 | 73 | ## Costs 74 | 75 | We're managing the storage of this data, and the trade-off is that we can't 76 | absorb the costs of all users running querie, just as we can't absorb the 77 | cost of running a custom platform to do the same thing. Google will charge 78 | you to query this data. 79 | 80 | The cost of querying data is directly related to how much data must be 81 | retrieved from storage. The amount of computation performed over that data 82 | once it's retrieved is not relevant to pricing. Our measurements tables are 83 | partitioned by the field 84 | 85 | * start_time 86 | 87 | at daily granularities. You can always constrain costs by reducing the time 88 | window that you are querying with this column. You can read more on 89 | optimising costs on [this page](tutorial_cost_efficiency.md). 90 | 91 | Note that there's a baseline free-level: it's possible to query up to 1TB free 92 | per month, but also note that it's very easy to query 1TB of data! For 93 | testing and prototyping, please consider using the `samples` dataset we 94 | make available alongside `measurements`. 95 | 96 | In addition to the free tier, new users get US$300 to spend on the 97 | platform, and that'll get you a little further: closer to 50-60TB of queries 98 | depending on current pricing. 99 | 100 | Users at educational institutions ought to investigate the Google For Education 101 | program: https://edu.google.com/programs/ 102 | 103 | You can read more about query pricing here: https://cloud.google.com/bigquery/pricing#queries 104 | 105 | 106 | -------------------------------------------------------------------------------- /docs/basics.md: -------------------------------------------------------------------------------- 1 | # Basics 2 | 3 | ## Concepts 4 | 5 | * Projects 6 | * Datasets 7 | * Tables 8 | 9 | * Queries 10 | 11 | * Access 12 | 13 | 14 | ## Interfaces 15 | 16 | * The web console 17 | * The command line utilities 18 | * Language-specific hooks 19 | 20 | `bq query --project_id prod-atlas-project --nouse_legacy_sql ' select column_name, is_nullable, data_type from atlas_measurements.INFORMATION_SCHEMA.COLUMNS where table_name = "traceroute"` 21 | 22 | 23 | ## Efficiency 24 | 25 | The cost for a query is related to the volume of data that will be retroeved 26 | from storage, and so it can be useful to think of broad queries that can be 27 | summarised later, rather than queries that summarise too early. 28 | 29 | ### Partitions 30 | 31 | Our main tables are partitioned by day along the `start_time` column. The 32 | granularity on these partitioned is per day, so requesting measurements across 33 | one minute will cost the same to retrieve as measurements for that full day. 34 | 35 | Our tables also require a filter against `start_time`, a basic protection to 36 | stop you from accidentally querying many years of data. 37 | 38 | ### Columnar scoping 39 | 40 | > bq query --project_id=prod-atlas-project \ 41 | > --use_legacy_sql=false \ 42 | > --dry_run \ 43 | > 'select * from atlas_measurements.ping where timestamp_trunc(start_time, day) = "2020-08-01" ' 44 | > Query successfully validated. Assuming the tables are not modified, running 45 | > this query will process upper bound of 103143443924 bytes of data. 46 | 47 | The web console indicates the same estimations live, as you edit a query. 48 | 49 | But perhaps you don't need to select everything; perhaps you care about, for 50 | example, probes and the targets they're running ping measurements to. You 51 | wouldn't need to select superfluous columns such as the start_time, or the RTT 52 | measurements themselves, or any other metadata. You could instead start from a 53 | more restricted query: 54 | 55 | ```shell 56 | $ bq query --project_id=prod-atlas-project \ 57 | --use_legacy_sql=false \ 58 | --dry_run \ 59 | 'select prb_id, dst_addr from atlas_measurements.ping where timestamp_trunc(start_time, day) = "2020-08-01" ' 60 | Query successfully validated. Assuming the tables are not modified, running 61 | this query will process upper bound of 17536398896 bytes of data. 62 | ``` 63 | 64 | That's under 20% of the first query; the costs are not significant on small 65 | data volumes like this, but you'll notice it during development or when running 66 | recurring queries. 67 | 68 | In this case, the query is simple and the upper estimate was easy to calculate. The system indicates that's precisely the amount of data that was fetched: 69 | 70 | ```bash 71 | $ bq show -j prod-atlas-project:EU.bquxjob_4328da65_1753068f44d 72 | Job prod-atlas-project:bquxjob_4328da65_1753068f44d 73 | 74 | Job Type State Start Time Duration User Email Bytes Processed Bytes Billed Billing Tier Labels 75 | ---------- --------- ----------------- ---------------- ------------------- ----------------- -------------- -------------- -------- 76 | query SUCCESS 16 Oct 10:25:10 0:00:15.877000 sstrowes@ripe.net 17536398896 17537433600 1 77 | ``` 78 | 79 | ### Maximising the useful output while minimising data 80 | 81 | As above, it's generally useful to consider the dimensions along which you want 82 | to cut your data: if you care about (as above) probe IDs, targeted IP 83 | addresses, and a particular time slice, then those are the dimensions that you 84 | can choose to pull from storage to satisfy your query. These are all 85 | specifically retrievable because of the structure of the data: the columns in 86 | the schema and the daily partitioning on start\_time. 87 | 88 | If you care about a specific subset within that data, BigQuery must still 89 | retrieve that data for you. That means that even if you throw 90% of the above 90 | data away, you'll still be charged for the bytes retrieved. 91 | 92 | A useful way to reshape how you think about problems in BigQuery can be to 93 | perform the same computation across all of the data, and filter later when you 94 | need to. This can apply in specific situations, and it can be useful to modify 95 | your thinking here: rather than being overly specific about targets or 96 | measurement IDs, compute over all of them instead, when the cost is going to be 97 | the same. 98 | 99 | Here's a three-stage toy example. 100 | 101 | ```sql 102 | SELECT prbId, min(rtt) as min_rtt FROM prod.traceroute_atlas_prod, UNNEST(hops) h, UNNEST(resultHops) rh WHERE startTime >= "2019-11-01T00:00:00" AND startTime < "2019-11-02T00:00:00" AND rh.from = "8.8.8.8" GROUP BY prbId``` 103 | 104 | 105 | ### Outro 106 | 107 | In short: you can reduce query costs ahead-of-time by knowing what data you 108 | want to retrieve. Don't pull in more days than you need, and don't pull in more 109 | columns that you need. 110 | 111 | Aim to be efficient in your queries, and you can get more 112 | out of the system. 113 | 114 | -------------------------------------------------------------------------------- /docs/measurements_ntp.md: -------------------------------------------------------------------------------- 1 | ## Notes on the NTP dataset 2 | 3 | This is the NTP schema in our BigQuery tables. For more information on the raw data this is based upon, please also look at the [RIPE Atlas schema description](https://atlas.ripe.net/docs/data_struct/#v5000). 4 | 5 | ### Schema 6 | 7 | ``` 8 | +-----------------------------+-----------------+-----------------------------------------------------------------+ 9 | | field_path | data_type | description | 10 | +-----------------------------+-----------------+-----------------------------------------------------------------+ 11 | | af | INT64 | Address Family | 12 | | ntp_version | INT64 | | 13 | | protoc | STRING | Transport protocol. Always UDP | 14 | | prb_id | INT64 | RIPE Atlas probe ID | 15 | | last_time_synced | INT64 | Discrepancy (seconds) between probe's clock and the RIPE Atlas | 16 | | | | controller's clock. The value -1 means unknown. | 17 | | msm_id | INT64 | RIPE Atlas measurement id | 18 | | group_id | INT64 | RIPE Atlas measurement-group id | 19 | | src_addr | STRING | IP Address of the interface used by RIPE Atlas probe | 20 | | src_addr_bytes | BYTES | value of the src_addr field as a bytes representation | 21 | | dst_addr | STRING | IP address of the destination of the traceroute | 22 | | dst_addr_bytes | BYTES | value of the dst_addr fiels as a bytes representation | 23 | | synth_addr | STRING | IP address of the probe as synthesized by the RIPE Atlas | 24 | | | | controller | 25 | | synth_addr_bytes | BYTES | value of the synth_addr field as a bytes representation | 26 | | start_time | TIMESTAMP | start time of the measurement | 27 | | start_hour | INT64 | time hour of the day | 28 | | mode | STRING | server | 29 | | server_stratum | INT64 | Number of NTP-hops away server is from reference clock, as | 30 | | | | indicated by NTP server (stratum -1: invalid; stratum 0: | 31 | | | | reference clocks; stratum 1: attached to stratum 0; stratum 2: | 32 | | | | queries stratum 1, etc) | 33 | | server_root_dispersion | FLOAT64 | error between NTP server and reference clock (indicated by NTP | 34 | | | | server) | 35 | | server_root_delay | FLOAT64 | Total round-trip delay to the reference clock (indicated by NTP | 36 | | | | server) | 37 | | server_reference_time | FLOAT64 | server's reference timestamp (in NTP time) | 38 | | server_precision | FLOAT64 | Precision of the server's clock in seconds (running time taken | 39 | | | | to read the clock), as indicated by the NTP server | 40 | | server_reference_clock | STRING | String indicating the source of the server's time signal | 41 | | leap_indicator | INT64 | warning of an impending leap second. '-1' indicates the removal | 42 | | | | of a second (i.e. 59 seconds), '1' indicates the addition of a | 43 | | | | second (61 seconds), 0 indicates no leap second. Null when | 44 | | | | unknown. | 45 | | results | ARRAY> | | 56 | +-----------------------------+-----------------+-----------------------------------------------------------------+ 57 | 58 | ``` 59 | -------------------------------------------------------------------------------- /docs/measurements_traceroute.md: -------------------------------------------------------------------------------- 1 | ## Notes on the Traceroute dataset 2 | 3 | This is the traceroute schema in our BigQuery tables. For more information on the raw data this is based upon, please also look at the [RIPE Atlas schema description](https://atlas.ripe.net/docs/data_struct/#v5000). 4 | 5 | ### Schema 6 | 7 | ``` 8 | +-----------------------+------------------+--------------------------------------------------------------+ 9 | | field_path | data_type | description | 10 | +-----------------------+------------------+--------------------------------------------------------------+ 11 | | af | INT64 | Adress Family | 12 | | protoc | STRING | Protocol used for traceroute: TCP, UDP or ICMP | 13 | | prb_id | INT64 | RIPE Atlas probe ID | 14 | | last_time_synced | INT64 | Discrepancy (seconds) between probe's clock and the RIPE | 15 | | | | Atlas controller's clock. The value -1 means unknown. | 16 | | msm_id | INT64 | RIPE Atlas measurement id | 17 | | group_id | INT64 | RIPE Atlas measurement-group id | 18 | | src_addr | STRING | IP Address of the interface used by RIPE Atlas probe | 19 | | src_addr_bytes | BYTES | value of the src_addr field as a bytes representation | 20 | | dst_addr | STRING | IP address of the destination of the traceroute | 21 | | dst_addr_bytes | BYTES | value of the dst_addr fiels as a bytes representation | 22 | | synth_addr | STRING | IP address of the probe as synthesized by the RIPE Atlas | 23 | | | | controller | 24 | | synth_addr_bytes | BYTES | value of the synth_addr field as a bytes representation | 25 | | start_time | TIMESTAMP | start time of the measurement | 26 | | end_time | TIMESTAMP | stop time of the measurement | 27 | | start_hour | INT64 | time hour of the day | 28 | | size | INT64 | size of the payload send in the traceroute | 29 | | dscp | INT64 | 6 bits, derived from ToS in the header of the sent IP | 30 | | | | packet, 0 when ToS header field is missing | 31 | | ecn | INT64 | 2 bits, derived from ToS in the header of the sent IP | 32 | | | | Packet, 0 when 'tos' missing | 33 | | paris_id | INT64 | variation for the Paris mode of traceroute, 0 means no Paris | 34 | | | | traceroute ('classic' traceroute) | 35 | | hops | ARRAY> | | 60 | +-----------------------+------------------+--------------------------------------------------------------+ 61 | 62 | ``` 63 | -------------------------------------------------------------------------------- /docs/tutorial_min_pings.md: -------------------------------------------------------------------------------- 1 | # Evaluating the pings table 2 | 3 | Let's get started just by accesing the `ping` table and build up a small query. 4 | Let's say we want to know the minimum RTT to any target from any probe, for 5 | each day we have data. 6 | 7 | First let's take a couple of diversions to follow the language. It may be worth 8 | opening the [ping schema](measurements_ping.md) to glance at. 9 | 10 | Let's just familiarise ourselves with the language by grabbing a column, the target IP addresses: 11 | 12 | ```sql 13 | select dst_addr 14 | from `ripencc-atlas`.samples.ping 15 | ``` 16 | 17 | This is about as simple and as lightweight as we can go. This is going to do no 18 | aggregation and no calculation over any of the data, it'll simply return any 19 | `dst_addr`, i.e. the target IP address for the measurement, that exists in the 20 | data. 21 | 22 | That's not especially useful, but for the sake of interest, let's aggregate a little bit. Let's say we want to know which IP addresses are the "heavy-hitters" in the dataset: 23 | 24 | ```sql 25 | select dst_addr, count(*) c 26 | from `ripencc-atlas`.samples.ping 27 | group by dst_addr 28 | order by c desc 29 | ``` 30 | 31 | This is already doing much more! Some things to note: 32 | 33 | * `count()` is an aggregate function, and the language requires that you make your intent for the other columns clear. The `group by` clause indicates that the `count()` should be grouped by `dst_addr`. 34 | * `count(*) c` is going to name the resulting column `c`, rather than give it a temporary name 35 | * `order by c desc` is going to order the results in descending order, so we'll see immediately who the top hits are. 36 | 37 | This gets us one step closer. We're interested in the RTTs to each destination by probe, so let's think about selecting by probe: 38 | 39 | ```sql 40 | select prb_id, dst_addr, count(*) c 41 | from `ripencc-atlas`.samples.ping 42 | group by prb_id, dst_addr 43 | order by c desc 44 | ``` 45 | 46 | We can simply add the `prb_id` column, and make sure we group by it. The above 47 | will still order by the top-hitters, but the groups are smaller. We could 48 | equally `order by prb_id, c desc` if we wanted to view the results by `prb_id` 49 | first. 50 | 51 | Now let's try to get to the meat of the results: the actual measured RTTs. 52 | Notice that they're embedded inside an array of structs; arrays require a 53 | little bit of extra work. We simply cannot do the following: 54 | 55 | ```sql 56 | select prb_id, dst_addr, pings.rtt 57 | from `ripencc-atlas`.samples.ping 58 | ``` 59 | 60 | The language simply won't allow that; arrays must be `unnest()`ed first. Let's say we have a small table with the following columns: 61 | 62 | ``` 63 | prb_id | dst_addr | pings[rtt] 64 | ----------+-------------------+--------------- 65 | 14277 | 213.133.109.134 | 21.79902 66 | | | 21.81758 67 | | | 24.965825 68 | ``` 69 | 70 | In the real table, the array has multiple fields, but the principle is the same: to get at the `rtt` values, we must `unnest()`. An `unnest()` will unbundle each of the entries in the array and map it back to the parent row, creating a table that looks like this: 71 | 72 | ``` 73 | prb_id | dst_addr | rtt 74 | ----------+-------------------+--------------- 75 | 14277 | 213.133.109.134 | 21.79902 76 | 14277 | 213.133.109.134 | 21.81758 77 | 14277 | 213.133.109.134 | 24.965825 78 | ``` 79 | 80 | From there, we can easily process the RTT values. 81 | 82 | Applying this to the real table can work as follows: 83 | 84 | 85 | ```sql 86 | select prb_id, dst_addr, rtt 87 | from `ripencc-atlas`.samples.ping, unnest(pings) 88 | ``` 89 | 90 | This is going to unbundle all those RTT values and give us a table with the 91 | three columns we care about. 92 | 93 | Note that if we want all columns from the struct embedded in the `pings` array, 94 | you can name the unnested result as (for example) `unnest(pings) p`, then 95 | `select ... p.*`. 96 | 97 | To get closer to answering the question, we need to aggregate: `min()` is one of the basic aggregation functions, and it'll work much as `count()` did earlier: 98 | 99 | ```sql 100 | select prb_id, dst_addr, min(rtt) min_rtt 101 | from `ripencc-atlas`.samples.ping, unnest(pings) 102 | group by prb_id, dst_addr 103 | ``` 104 | 105 | At this point we observe `null` values in the `min_rtt` column, a reality of the 106 | data we're querying: sometimes, targets don't respond or the responses are 107 | lost. We don't care about those, so let's exclude them: 108 | 109 | ```sql 110 | select prb_id, dst_addr, min(rtt) min_rtt 111 | from `ripencc-atlas`.samples.ping, unnest(pings) 112 | where rtt is not null 113 | group by prb_id, dst_addr 114 | ``` 115 | 116 | This is almost the answer to our question. But we wanted this aggregated as the 117 | minimum per day, so let's take a look at one more column: `start_time`. 118 | `start_time` is when the actual ping measurement started on the probe. 119 | But the granularity is wrong: it's a timestamp to the millisecond granularity. 120 | There are useful [timestamp 121 | functions](https://cloud.google.com/bigquery/docs/reference/standard-sql/timestamp_functions) 122 | that help us here. In particular, `timestamp_trunc()`, which can mask 123 | timestamps to certain granularities, including `week`, `day`, `hour, etc. 124 | We'll truncate to `day`. So let's add in our truncated timestamp: 125 | 126 | ```sql 127 | select timestamp_trunc(start_time, day) day, prb_id, dst_addr, min(rtt) min_rtt 128 | from `ripencc-atlas`.samples.ping, unnest(pings) 129 | where rtt is not null 130 | group by day, prb_id, dst_addr 131 | order by day 132 | ``` 133 | 134 | Note that here, the truncated timestamp is named `day`, and then is included in 135 | the `group by` statement at the end. If you group by `start_time`, BigQuery will accept the 136 | query but you'll lose the granularity in the result (because we're not 137 | selecting that column). 138 | 139 | And that's it! This generates a table of results that you can store elsewhere, 140 | or download, or continue processing further. For example, in some cases `min()` 141 | is precisely what you want, but sometimes you want to know more about the _range_ 142 | of values returned. 143 | 144 | For that, other functions are available. 145 | [`approx_quantiles()`](https://cloud.google.com/bigquery/docs/reference/standard-sql/approximate_aggregate_functions#approx_quantiles) 146 | is one such example. "Approx" stems from the nature of the computation: some 147 | calculations are difficult to achieve over large datasets, but can be 148 | approximated within reasonable bounds in most cases. As always, it's good to 149 | understand how you're processing your data, and consider how important the 150 | precision of the end result is. For typical cases, `approx_quantiles` is going 151 | to work just fine. 152 | 153 | It's use can look like this: 154 | 155 | ```sql 156 | select timestamp_trunc(start_time, day) day, prb_id, dst_addr, approx_quantiles(rtt, 4 ignore nulls) quantiles 157 | from `ripencc-atlas`.samples.ping, unnest(pings) 158 | group by day, prb_id, dst_addr 159 | order by day 160 | ``` 161 | 162 | It has the bonus of allowing us to `ignore nulls` in the quantiles call itself. 163 | The number `4` here will generate four partitions, and therefore five buckets. 164 | This will return an array of non-interpolated values at the minimum, 25%, 50%, 165 | 75%, and maximum. 166 | 167 | -------------------------------------------------------------------------------- /docs/tutorial_cost_efficiency.md: -------------------------------------------------------------------------------- 1 | # Notes on Cost Efficiency 2 | 3 | The cost for a query is related to the volume of data that will be retrieved 4 | from storage. The number of compute cycles spent on that data is not relevant, 5 | so in the most abstract sense it's most cost-efficient to retrieve as little 6 | data as possible, and extract as much as possible from it. 7 | 8 | This page aims to outline how to achieve that. 9 | 10 | ## Development: iteration 11 | 12 | When you're developing a query, it generally makes little sense to repeatedly 13 | query a large dataset. Undoubtedly a full dataset may contain more edge cases 14 | that your code will have to handle, but in many cases it can make sense to 15 | minimise the data you're sketching your queries out against. 16 | 17 | The `samples` dataset is an example of how to do this. Those tables were 18 | generated with queries similar to this query: 19 | 20 | ```sql 21 | create table your_dataset.your_table 22 | as select * 23 | from `ripencc-atlas`.measurements.traceroute 24 | where date(start_time) = "2020-10-01" 25 | and RAND() <= 1/100 26 | ``` 27 | 28 | This will 29 | [create](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_table_statement) 30 | a table from the traceroute table, selecting only one day of data, and 31 | selecting when `RAND()` returns a value under 0.01. That is: retain 32 | approximately 1% of all rows. With a small table, you can iterate repeatedly 33 | while you develop and debug. 34 | 35 | 36 | 37 | ## Partitioned tables 38 | 39 | Large tables can be 40 | [partitioned](https://cloud.google.com/bigquery/docs/partitioned-tables) 41 | against specific columns, which provides a transparent way to reduce the volume 42 | of data BigQuery must retrieve in order to satisfy a query. 43 | 44 | The data in the `measurements` dataset is partitioned by day on `start_time` column. The 45 | granularity on these partitioned is per day, so one day will cost approximatley 46 | half as much as two days, and so forth. This means it can be extremely 47 | important to constrain your query to the time range you care about. 48 | 49 | This level of granularity also means there's a minimum point where there is no 50 | cost advantage to reducing the time window. Requesting all rows in a 24 hour 51 | period will cost the same as requesting all rows in one minute inside that 24 52 | hour period; think of it like having daily tarballs of your data, and having to 53 | decompress the whole thing and scan the data inside that day to find the specific 54 | rows you want. 55 | 56 | 57 | ## Columnar scoping 58 | 59 | Additionally, BigQuery won't retrieve columns that you don't request. Consider 60 | (for example) the [traceroute](measurements_traceroute.md) table: the `hops` 61 | array in each row contains a lot of data, but if you're only interested in the 62 | destinations that a probe targets (and not the responses contained in the data) 63 | then you would not select this column. This significantly reduces the amount of 64 | data that must be retrieved, and therefore also the cost. 65 | 66 | 67 | ## Maximising the useful output while minimising data 68 | 69 | Given the above, and given the size of some datasets, constraining along 70 | partitioned columns and selecting only the columns you care about can still 71 | leave a lot of data, and you're spending the money one way or another. 72 | 73 | Depending on the nature of your work, you may consider whether you need one 74 | specific answer, or if writing the output of a more general query into a much 75 | smaller table is a better approach. 76 | 77 | Here's a toy example. Let's say we want to know the minimum RTT to 78 | 8.8.8.8 from all probes. That can be accomplished with the following query: 79 | 80 | ```sql 81 | select prb_id, min(rtt) as min_rtt 82 | from `ripencc-atlas`.measurements.traceroute, unnest(hops) h 83 | where date(start_time) = "2020-10-01" 84 | and af = 4 85 | and h.hop_addr = "8.8.8.8" 86 | group by prb_id 87 | ``` 88 | 89 | This query definitely does the job, but in order to do it, it has to throw away 90 | all the other results. 91 | 92 | For the same cost, it can be reformulated as: 93 | 94 | ```sql 95 | select prb_id, hop_addr, min(rtt) as min_rtt 96 | from `ripencc-atlas`.measurements.traceroute, unnest(hops) h 97 | where date(start_time) = "2020-10-01" 98 | group by prb_id, hop_addr 99 | ``` 100 | 101 | This query will retain a minimum RTT to _any_ IP that responded to any probe, 102 | and you can store this (much smaller) result in a table if you think you'll 103 | consult it in the future. 104 | 105 | 106 | 107 | ## In summary? 108 | 109 | 110 | As above, it's generally useful to consider the dimensions along which you want 111 | to cut your data: if you care about (as above) probe IDs, targeted IP 112 | addresses, and a particular time slice, then those are the dimensions that you 113 | can choose to pull from storage to satisfy your query. 114 | 115 | Consider how you develop your queries: prototype against small datasets if you can. 116 | 117 | Then consider the dimensions you are about: 118 | * consider how you partition large tables, and constrain the timeframe you're looking at against our tables 119 | * consider the columns you need to retrieve to answer the questions you have 120 | 121 | 122 | 123 | 124 | ## Estimating costs 125 | 126 | ```shell 127 | bq query --project_id=prod-atlas-project \ 128 | --use_legacy_sql=false \ 129 | --dry_run \ 130 | 'select * from atlas_measurements.ping where timestamp_trunc(start_time, day) = "2020-08-01" ' 131 | Query successfully validated. Assuming the tables are not modified, running 132 | this query will process upper bound of 103143443924 bytes of data. 133 | ``` 134 | 135 | The web console indicates the same estimations live, as you edit a query. 136 | 137 | But perhaps you don't need to select everything; perhaps you care about, for 138 | example, probes and the targets they're running ping measurements to. You 139 | wouldn't need to select superfluous columns such as the start_time, or the RTT 140 | measurements themselves, or any other metadata. You could instead start from a 141 | more restricted query: 142 | 143 | ```shell 144 | $ bq query --project_id=prod-atlas-project \ 145 | --use_legacy_sql=false \ 146 | --dry_run \ 147 | 'select prb_id, dst_addr from atlas_measurements.ping where timestamp_trunc(start_time, day) = "2020-08-01" ' 148 | Query successfully validated. Assuming the tables are not modified, running 149 | this query will process upper bound of 17536398896 bytes of data. 150 | ``` 151 | 152 | That's under 20% of the first query; the costs are not significant on small 153 | data volumes like this, but you'll notice it during development or when running 154 | recurring queries. 155 | 156 | In this case, the query is simple and the upper estimate was easy to calculate. The system indicates that's precisely the amount of data that was fetched: 157 | 158 | ```bash 159 | $ bq show -j prod-atlas-project:EU.bquxjob_4328da65_1753068f44d 160 | Job prod-atlas-project:bquxjob_4328da65_1753068f44d 161 | 162 | Job Type State Start Time Duration User Email Bytes Processed Bytes Billed Billing Tier Labels 163 | ---------- --------- ----------------- ---------------- ------------------- ----------------- -------------- -------------- -------- 164 | query SUCCESS 16 Oct 10:25:10 0:00:15.877000 sstrowes@ripe.net 17536398896 17537433600 1 165 | ``` 166 | 167 | ## Outro 168 | 169 | In short: you can reduce query costs ahead-of-time by knowing what data you 170 | want to retrieve. Don't pull in more days than you need, and don't pull in more 171 | columns that you need. 172 | 173 | Aim to be efficient in your queries, and you can get more 174 | out of the system. 175 | 176 | -------------------------------------------------------------------------------- /docs/measurements_dns.md: -------------------------------------------------------------------------------- 1 | ## Notes on the DNS dataset 2 | 3 | ### Schema 4 | 5 | This is the DNS schema in our BigQuery tables. For more information on the raw data this is based upon, please also look at the [RIPE Atlas schema description](https://atlas.ripe.net/docs/data_struct/#v5000). 6 | 7 | ``` 8 | +--------------------+------------------------+-----------------------------------------------------------------------+ 9 | | column_name | data_type | description | 10 | +--------------------+------------------------+-----------------------------------------------------------------------+ 11 | | af | INT64 | Address Family | 12 | | protoc | STRING | Protocol used for DNS request | 13 | | prb_id | INT64 | RIPE Atlas probe ID | 14 | | last_time_synced | INT64 | Discrepancy (seconds) between probe's clock and the RIPE Atlas | 15 | | | | controller's clock. The value -1 means unknown. | 16 | | msm_id | INT64 | RIPE Atlas measurement id | 17 | | group_id | INT64 | RIPE Atlas measurement-group id | 18 | | src_addr | STRING | IP Address of the interface used by RIPE Atlas probe | 19 | | src_addr_bytes | BYTES | value of the src_addr field as a bytes representation | 20 | | dst_addr | STRING | IP address of the DNS server as used by the probe | 21 | | dst_addr_bytes | BYTES | value of the dst_addr fiels as a bytes representation | 22 | | synth_addr | STRING | IP address of the probe as synthesized by the RIPE Atlas controller | 23 | | synth_addr_bytes | BYTES | value of the synth_addr field as a bytes representation | 24 | | dst_name | STRING | Hostname of the DNS server as used by the probe | 25 | | start_time | TIMESTAMP | start time of the measurement | 26 | | start_hour | INT64 | time hour of the day | 27 | | query | STRING | Query payload send to the DNS server. UU encoded | 28 | | retry_count | INT64 | Number of retries of the DNS request | 29 | | wire_message | STRING | The RFC1035 4.1 DNS message as received from the server. UU encoded. | 30 | | header_ANCOUNT | INT64 | ANCOUNT field from the `Header` section of the answer RFC1035 4.1.1 | 31 | | header_ARCOUNT | INT64 | | 32 | | header_ID | INT64 | | 33 | | header_QDCOUNT | INT64 | | 34 | | response_time | FLOAT64 | | 35 | | size | INT64 | | 36 | | error | STRING | output of the GETADDRINFO(3) system call on the probe | 37 | | answers | ARRAY> | | 46 | +--------------------+------------------------+-----------------------------------------------------------------------+ 47 | 48 | ``` 49 | 50 | ### Queries 51 | 52 | Notice that the DNS results contain the same `qbuf` and `abuf` fields that the RIPE Atlas API returns. 53 | 54 | 55 | 56 | `bq query --udf_resource= ` 57 | 58 | More on UDFs: 59 | https://cloud.google.com/bigquery/docs/reference/standard-sql/user-defined-functions 60 | 61 | Running from the command line is possible as follows: 62 | ``` 63 | bq query --project_id= --use_legacy_sql=false --udf_resource=gs://ripe-atlas-bigquery/scripts/dns.sql 'select dst_addr, query, parse_dns_buffer(TO_CODE_POINTS(FROM_BASE64(query))) query_parsed from `ripencc-atlas`.samples.dns where date(start_time) = "2020-10-21" and query is not null' 64 | ``` 65 | 66 | ```sql 67 | CREATE TEMP FUNCTION parse_dns_buffer(buffer ARRAY) 68 | RETURNS STRUCT 69 | LANGUAGE js 70 | OPTIONS ( 71 | library=["gs://ripe-atlas-bigquery/scripts/dns.js"] 72 | ) 73 | AS """ 74 | return parse_qbuf(buffer); 75 | """; 76 | 77 | select dst_addr, query, parse_dns_buffer(TO_CODE_POINTS(FROM_BASE64(query))) query_parsed 78 | from samples.dns 79 | where date(start_time) = "2020-10-21" 80 | and query is not null 81 | ``` 82 | 83 | ## Parsing the abuf or the qbuf 84 | 85 | DNS results have the issue that the DNS responses are binary and stored encoded 86 | in base64. So in order to get at the response to a DNS query, some parsing is 87 | necessary. 88 | 89 | Google BigQuery allows for user-defined functions that can then effectively be 90 | parallelised and run on individual results. The language of choice is 91 | JavaScript, but it's still possible to use this fairly easily. 92 | 93 | Below is an example that uses some of our code to parse out most parts of DNS 94 | responses: this is operating in the general case, because the data payload for 95 | individual resource types is defined per type, so the script below decodes 96 | everything but bundles the actual data into String containing the byte values 97 | separated by commas. 98 | 99 | In the less general case -- say you wanted to parse only AAAA queries -- it'd 100 | be possible instead to focus on those and parse appropriately. 101 | 102 | The Javascript code referred to in the code snippet here should be regarded as 103 | a template or a prototype: it's a great starting point, but it's not been 104 | heavily road-tested. Feedback or suggestions for that code are most welcome! 105 | 106 | ### abuf 107 | 108 | ```sql 109 | CREATE TEMP FUNCTION parse_dns_buffer(buffer ARRAY) 110 | RETURNS STRUCT< 111 | id INT64, 112 | flag_query BOOL, 113 | flag_opcode STRING, 114 | flag_auth BOOL, 115 | flag_trunc BOOL, 116 | flag_recurse_desired BOOL, 117 | flag_recurse_avail BOOL, 118 | flag_rcode STRING, 119 | qdcount INT64, 120 | ancount INT64, 121 | aucount INT64, 122 | adcount INT64, 123 | payload ARRAY> 131 | > 132 | LANGUAGE js 133 | OPTIONS ( 134 | library=["gs://ripe-atlas-bigquery/scripts/dns.js"] 135 | ) 136 | AS 137 | """ 138 | return parse_wire_message(buffer); 139 | """; 140 | 141 | select * except(wire_message), parse_dns_buffer(TO_CODE_POINTS(FROM_BASE64(wire_message))) answer_parsed 142 | from `ripencc-atlas`.samples.dns 143 | where date(start_time) = "2020-10-21" 144 | limit 10 145 | 146 | ``` 147 | 148 | 149 | ### qbuf 150 | 151 | ```sql 152 | CREATE TEMP FUNCTION parse_dns_buffer(buffer ARRAY) 153 | RETURNS STRUCT< 154 | id INT64, 155 | flag_query BOOL, 156 | flag_opcode STRING, 157 | flag_auth BOOL, 158 | flag_trunc BOOL, 159 | flag_recurse_desired BOOL, 160 | flag_recurse_avail BOOL, 161 | flag_rcode STRING, 162 | qdcount INT64, 163 | payload ARRAY> 171 | > 172 | LANGUAGE js 173 | OPTIONS ( 174 | library=["gs://ripe-atlas-bigquery/scripts/dns.js"] 175 | ) 176 | AS 177 | """ 178 | return parse_query(buffer); 179 | """; 180 | 181 | select * except(query), parse_dns_buffer(TO_CODE_POINTS(FROM_BASE64(query))) query_parsed 182 | from `ripencc-atlas`.samples.dns 183 | where query is not null 184 | limit 10 185 | ``` 186 | 187 | -------------------------------------------------------------------------------- /scripts/dns_parser.js: -------------------------------------------------------------------------------- 1 | const rrtype_table = { 2 | 1: "A", 3 | 2: "NS", 4 | 5: "CNAME", 5 | 6: "SOA", 6 | 11: "WKS", 7 | 12: "PTR", 8 | 13: "HINFO", 9 | 14: "MINFO", 10 | 15: "MX", 11 | 16: "TXT", 12 | 17: "RP", 13 | 18: "AFSDB", 14 | 19: "X25", 15 | 20: "ISDN", 16 | 21: "RT", 17 | 22: "NSAP", 18 | 23: "NSAP-PTR", 19 | 24: "SIG", 20 | 25: "KEY", 21 | 26: "PX", 22 | 27: "GPOS", 23 | 28: "AAAA", 24 | 29: "LOC", 25 | 31: "EID", 26 | 32: "NIMLOC", 27 | 33: "SRV", 28 | 34: "ATMA", 29 | 35: "NAPTR", 30 | 36: "KX", 31 | 37: "CERT", 32 | 39: "DNAME", 33 | 40: "SINK", 34 | 41: "OPT", 35 | 42: "APL", 36 | 43: "DS", 37 | 44: "SSHFP", 38 | 45: "IPSECKEY", 39 | 46: "RRSIG", 40 | 47: "NSEC", 41 | 48: "DNSKEY", 42 | 49: "DHCID", 43 | 50: "NSEC3", 44 | 51: "NSEC3PARAM", 45 | 52: "TLSA", 46 | 53: "SMIMEA", 47 | 55: "HIP", 48 | 56: "NINFO", 49 | 57: "RKEY", 50 | 58: "TALINK", 51 | 59: "CDS", 52 | 60: "CDNSKEY", 53 | 61: "OPENPGPKEY", 54 | 62: "CSYNC", 55 | 63: "ZONEMD", 56 | 64: "SVCB", 57 | 65: "HTTPS", 58 | 99: "SPF", 59 | 100: "UINFO", 60 | 101: "UID", 61 | 102: "GID", 62 | 103: "UNSPEC", 63 | 104: "NID", 64 | 105: "L32", 65 | 106: "L64", 66 | 107: "LP", 67 | 108: "EUI48", 68 | 109: "EUI64", 69 | 249: "TKEY", 70 | 250: "TSIG", 71 | 251: "IXFR", 72 | 252: "AXFR", 73 | 253: "MAILB", 74 | 254: "MAILA", 75 | 255: "*", 76 | 256: "URI", 77 | 257: "CAA", 78 | 258: "AVC", 79 | 259: "DOA", 80 | 260: "AMTRELAY", 81 | 32768: "TA" 82 | } 83 | 84 | const rrclass_table = { 85 | 1: "IN", 86 | 3: "CH" 87 | } 88 | 89 | const dns_opcodes = { 90 | 0: "Query", 91 | 1: "Inverse Query", 92 | 2: "Status", 93 | 4: "Notify", 94 | 5: "Update", 95 | 6: "DNS Stateful Operations" 96 | } 97 | 98 | const dns_rcodes = { 99 | 0: "NoError", 100 | 1: "FormErr", 101 | 2: "ServFail", 102 | 3: "NXDomain", 103 | 4: "NotImp", 104 | 5: "Refused", 105 | 6: "YXDomain", 106 | 7: "YXRRSet", 107 | 8: "NXRRSet", 108 | 9: "NotAuth", 109 | 10: "NotAuth", 110 | 11: "NotZone", 111 | 16: "BADVERS/BADSIG", 112 | 17: "BADKEY", 113 | 18: "BADTIME", 114 | 19: "BADMODE", 115 | 20: "BADNAME", 116 | 21: "BADALG", 117 | 22: "BADTRUNC", 118 | 23: "BADCOOKIE" 119 | } 120 | const option_type = { 121 | 3: "NSID" // Add other fields 122 | } 123 | 124 | 125 | function map_rrtype(value) 126 | { 127 | return rrtype_table[value] || value; 128 | } 129 | 130 | function map_rrtype_edns0(value) 131 | { 132 | return option_type[value] || value; 133 | } 134 | 135 | 136 | function map_rrclass(value) 137 | { 138 | return rrclass_table[value] || value; 139 | } 140 | 141 | function parse_name(buffer, i) 142 | { 143 | let name = ""; 144 | let position = i; 145 | let compression = false; 146 | 147 | for ( ; position < buffer.length; ) { 148 | 149 | let length = parseInt(buffer[position++]); 150 | 151 | if (length === 0) { 152 | if (compression === false) { 153 | i++; 154 | } 155 | break; 156 | } 157 | 158 | // header compression; this is referring to a name someplace else 159 | // RFC1035, Section 4.1.4: 160 | // The first two bits are ones. This allows a pointer to be distinguished 161 | // from a label, since the label must begin with two zero bits because 162 | // labels are restricted to 63 octets or less. 163 | if (length >= 192) { 164 | const old_position = position; 165 | const new_position = ((length - 192) << 8) | parseInt(buffer[position++]); 166 | 167 | // We definitely can't seek beyond the end of the buffer 168 | if (new_position > buffer.length) { 169 | return -1; 170 | } 171 | 172 | position = new_position; 173 | 174 | if (compression === false) { 175 | compression = true; 176 | 177 | // two bytes describe the compression: compression flag + new_position 178 | // compressed strings can themselves refer to compressed strings, 179 | // so only increment once for the top level of the main stream 180 | i += 2; 181 | } 182 | // set length for start of redirected string 183 | length = parseInt(buffer[position++]); 184 | } 185 | 186 | for (let j = 0; j < length; j++) { 187 | const character = String.fromCharCode( buffer[position] ); 188 | // We've found a null character, so probably the length is wrong. 189 | // Rather than try to guess, just bail out. 190 | if (character === '\0') { 191 | return -1; 192 | } 193 | name += character; 194 | position++; 195 | } 196 | name += "."; 197 | if (compression === false) { 198 | i = position; 199 | } 200 | } 201 | if (name.length === 0) { 202 | name += "."; 203 | } 204 | 205 | return [name, i]; 206 | } 207 | 208 | function parse_question(buffer, i) 209 | { 210 | const out = parse_name(buffer, i); 211 | if (out === -1) { 212 | return -1; 213 | } 214 | [qname, i] = out; 215 | 216 | const qtype = map_rrtype(parseInt((buffer[i] << 8) | buffer[i+1])); 217 | i += 2; 218 | const qclass = map_rrclass(parseInt((buffer[i] << 8) | buffer[i+1])); 219 | i += 2; 220 | 221 | return [qtype, qclass, qname, i]; 222 | } 223 | 224 | 225 | 226 | //4.1.3. Resource record format 227 | // 228 | //The answer, authority, and additional sections all share the same 229 | //format: a variable number of resource records, where the number of 230 | //records is specified in the corresponding count field in the header. 231 | //Each resource record has the following format: 232 | // 1 1 1 1 1 1 233 | // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 234 | // +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ 235 | // | | 236 | // / / 237 | // / NAME / 238 | // | | 239 | // +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ 240 | // | TYPE | 241 | // +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ 242 | // | CLASS | 243 | // +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ 244 | // | TTL | 245 | // | | 246 | // +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ 247 | // | RDLENGTH | 248 | // +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--| 249 | // / RDATA / 250 | // / / 251 | // +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ 252 | 253 | 254 | function parse_record_data(buffer, rtype, rclass, i, l) 255 | { 256 | let data = []; 257 | const end = i+l; 258 | 259 | 260 | 261 | switch (rtype) { 262 | case "A": { 263 | data = buffer.slice(i, i+l).join("."); 264 | 265 | // Mash the IPv4 address into hex, so it should be 8 chars long. 266 | // It's possible with a truncated buffer to not grab a full IP addr, 267 | // which breaks calls to net.ip_from_string 268 | const tmp_str = buffer.slice(i, i+l).reduce((output, dat) => 269 | (output + ('0' + (dat & 0xff).toString(16)).slice(-2)), 270 | ''); 271 | if (tmp_str.length !== 8) {i 272 | return -1; 273 | } 274 | 275 | break; 276 | } 277 | case "AAAA": { 278 | data = buffer.slice(i, i+l).reduce((output, dat) => 279 | (output + ('0' + (dat & 0xff).toString(16)).slice(-2)), 280 | ''); 281 | if (data.length !== 32) { 282 | // 32 hex digits == 128 bits 283 | // if the length is less, the buffer is incomplete 284 | return -1; 285 | } 286 | 287 | data = data.substring(0,4) + ":" + 288 | data.substring(4,8) + ":" + 289 | data.substring(8,12) + ":" + 290 | data.substring(12,16) + ":" + 291 | data.substring(16,20) + ":" + 292 | data.substring(20,24) + ":" + 293 | data.substring(24,28) + ":" + 294 | data.substring(28,32) 295 | 296 | break; 297 | } 298 | case "CNAME": { 299 | let out = 300 | 301 | parse_name(buffer, i); 302 | if (out === -1) { 303 | return -1; 304 | } 305 | [cname, i] = out; 306 | 307 | data = cname; 308 | 309 | break; 310 | } 311 | case "OPT": { 312 | 313 | data = buffer.slice(i, i+l); 314 | offset = 1; 315 | let name =""; 316 | code = data[offset]; 317 | if (code ===3) //only parsing for nsid 318 | { 319 | offset +=2; 320 | length = data[offset]; 321 | if (length === 0) { 322 | break; 323 | } 324 | offset +=1; 325 | for (let j = 0; j < length; j++) { 326 | const character = String.fromCharCode( data[j+offset] ); 327 | name += character; 328 | } 329 | data=name; 330 | } 331 | break; 332 | 333 | } 334 | case "NS": { 335 | let out = parse_name(buffer, i); 336 | if (out === -1) { 337 | return -1; 338 | } 339 | [dname, i] = out; 340 | 341 | data = dname; 342 | 343 | break; 344 | } 345 | case "SOA": { 346 | let out = parse_name(buffer, i); 347 | if (out === -1) { 348 | return -1; 349 | } 350 | [mname, i] = out; 351 | 352 | out = parse_name(buffer, i); 353 | if (out === -1) { 354 | return -1; 355 | } 356 | [rname, i] = out; 357 | 358 | const serial = parseInt((buffer[i] << 24) | (buffer[i+1] << 16) | (buffer[i+2] << 8) | buffer[i+3]); 359 | i += 4; 360 | const refresh = parseInt((buffer[i] << 24) | (buffer[i+1] << 16) | (buffer[i+2] << 8) | buffer[i+3]); 361 | i += 4; 362 | const retry = parseInt((buffer[i] << 24) | (buffer[i+1] << 16) | (buffer[i+2] << 8) | buffer[i+3]); 363 | i += 4; 364 | const expire = parseInt((buffer[i] << 24) | (buffer[i+1] << 16) | (buffer[i+2] << 8) | buffer[i+3]); 365 | i += 4; 366 | const minimum = parseInt((buffer[i] << 24) | (buffer[i+1] << 16) | (buffer[i+2] << 8) | buffer[i+3]); 367 | i += 4; 368 | 369 | data = mname + " " + rname + " " + serial + " " + refresh + " " + retry + " " + expire + " " + minimum; 370 | 371 | break; 372 | } 373 | case "TXT": { 374 | let name = ""; 375 | 376 | for ( ; i < end; ) { 377 | let length = parseInt(buffer[i++]); 378 | 379 | if (length === 0) { 380 | break; 381 | } 382 | 383 | name += "\""; 384 | 385 | for (let j = 0; j < length; j++) { 386 | const character = String.fromCharCode( buffer[i] ); 387 | // We've found a null character, so probably the length is wrong. 388 | // Rather than try to guess, just bail out. 389 | name += character; 390 | i++; 391 | } 392 | 393 | name += "\""; 394 | if (i < end) { 395 | name += " "; 396 | } 397 | } 398 | 399 | i += 2; 400 | 401 | data = name; 402 | 403 | break; 404 | } 405 | default: { 406 | data = buffer.slice(i, i+l); 407 | } 408 | break; 409 | } 410 | 411 | return data.toString(); 412 | } 413 | 414 | function parse_rr(buffer, i) 415 | { 416 | // We definitely can't seek beyond the end of the buffer 417 | if (i > buffer.length) { 418 | return -1; 419 | } 420 | 421 | const out = parse_name(buffer, i); 422 | if (out === -1) { 423 | return -1; 424 | } 425 | 426 | [rname, i] = out; 427 | 428 | const rtype = map_rrtype(parseInt( (buffer[i] << 8) | buffer[i+1])); 429 | i += 2; 430 | 431 | const rclass = map_rrclass(parseInt((buffer[i] << 8) | buffer[i+1])); 432 | i += 2; 433 | const ttl = parseInt((buffer[i] << 24) | (buffer[i+1] << 16) | (buffer[i+2] << 8) | buffer[i+3]); 434 | i += 4; 435 | 436 | const l = parseInt((buffer[i] << 8) | buffer[i+1]); 437 | i += 2; 438 | 439 | data = parse_record_data(buffer, rtype, rclass, i, l); 440 | if (data === -1) { 441 | // there was a parse error 442 | return -1; 443 | } 444 | if (rtype ==='OPT') 445 | { 446 | rname = option_type[buffer[i+1]] || "." 447 | } 448 | 449 | i += l; 450 | 451 | return [rtype, rclass, rname, ttl, data, i] 452 | } 453 | 454 | function parse_rr_set(buffer, section, record_count, i, output) 455 | { 456 | for (let count = 0; count < record_count; count++) { 457 | let rrtype = -1; 458 | let rrclass = -1; 459 | let rrttl = -1; 460 | let rrname = ""; 461 | let rrdata = []; 462 | parsed = parse_rr(buffer, i); 463 | if (parsed === -1) { 464 | return -1; 465 | } 466 | 467 | [rrtype, rrclass, rrname, rrttl, rrdata, i] = parsed; 468 | 469 | output.push( {"section": section, "type": rrtype, "class": rrclass, "ttl": rrttl, "name": rrname, "data": rrdata} ); 470 | } 471 | 472 | return i; 473 | } 474 | 475 | 476 | const chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/='; 477 | 478 | function atob(input) 479 | { 480 | let str = input.replace(/=+$/, ''); 481 | let output = ''; 482 | 483 | if (str.length % 4 === 1) { 484 | throw new Error("'atob' failed: The string to be decoded is not correctly encoded."); 485 | } 486 | for (let bc = 0, bs = 0, buffer, i = 0; 487 | buffer = str.charAt(i++); 488 | ~buffer && (bs = bc % 4 ? bs * 64 + buffer : buffer, bc++ % 4) ? output += String.fromCharCode(255 & bs >> (-2 * bc & 6)) : 0) { 489 | buffer = chars.indexOf(buffer); 490 | } 491 | 492 | let i = [] 493 | for (j = 0 ; j < output.length; j++) { 494 | i.push(output.charCodeAt(j)); 495 | } 496 | 497 | return i; 498 | } 499 | 500 | 501 | 502 | function parse_wire_message(buffer) 503 | { 504 | if (buffer === null) { 505 | return {'type':0, 'class':0, 'name':"", 'error':"null buffer"}; 506 | } 507 | 508 | buffer = atob(buffer); 509 | 510 | if (buffer.length < 12) { 511 | return {'type':0, 'class':0, 'name':"", 'error':"short buffer"}; 512 | } 513 | 514 | 515 | /* https://tools.ietf.org/html/rfc1035 516 | * 517 | * 4.1.1. Header section format 518 | * 519 | * The header contains the following fields: 520 | * 521 | * 1 1 1 1 1 1 522 | * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 523 | * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ 524 | * | ID | 525 | * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ 526 | * |QR| Opcode |AA|TC|RD|RA| Z | RCODE | 527 | * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ 528 | * | QDCOUNT | 529 | * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ 530 | * | ANCOUNT | 531 | * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ 532 | * | NSCOUNT | 533 | * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ 534 | * | ARCOUNT | 535 | * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ 536 | */ 537 | 538 | const id = parseInt((buffer[0] << 8) | buffer[1]); 539 | 540 | const flags_field = parseInt((buffer[2] << 8) | buffer[3]); 541 | const flag_query_val = parseInt(flags_field & 0x8000); 542 | const flag_opcode_val = parseInt((flags_field & 0x7800) >> 11); 543 | const flag_authoritative_val = parseInt(flags_field & 0x0400); 544 | const flag_truncated_val = parseInt(flags_field & 0x0200); 545 | const flag_recursion_desired_val = parseInt(flags_field & 0x0100); 546 | const flag_recursion_avail_val = parseInt(flags_field & 0x0080); 547 | const flag_z_val = parseInt(flags_field & 0x0040); 548 | const flag_authentic_data_val = parseInt(flags_field & 0x0020); 549 | const flag_checking_disabled_val = parseInt(flags_field & 0x0010); 550 | const flag_rcode_val = parseInt(flags_field & 0x000f); 551 | 552 | const flag_query = (flag_query_val === 0) ? true : false; 553 | const flag_authoritative = (!flag_query ? ((flag_authoritative_val === 0) ? false : true) : null); 554 | const flag_truncated = (flag_truncated_val === 0) ? false : true; 555 | const flag_recursion_desired = (flag_recursion_desired_val === 0) ? false : true; 556 | const flag_recursion_avail = (flag_recursion_avail_val === 0) ? false : true; 557 | const flag_z = flag_z_val; 558 | const flag_authentic_data = (flag_authentic_data_val === 0) ? false : true; 559 | const flag_checking_disabled = (flag_checking_disabled_val === 0) ? false : true; 560 | const flag_opcode = (flag_query ? dns_opcodes[flag_opcode_val] || "Unassigned" : null); 561 | const flag_rcode = (!flag_query ? dns_rcodes[flag_rcode_val] || "Unassigned/Reserved" : null); 562 | 563 | const flags = {"query": flag_query, 564 | "opcode": flag_opcode, 565 | "authoritative_answer": flag_authoritative, 566 | "truncated": flag_truncated, 567 | "recursion_desired": flag_recursion_desired, 568 | "recursion_available": flag_recursion_avail, 569 | "z": flag_z, 570 | "authentic_data": flag_authentic_data, 571 | "checking_disabled": flag_checking_disabled, 572 | "rcode": flag_rcode}; 573 | 574 | 575 | 576 | const qdcount = parseInt((buffer[4] << 8) | buffer[5]); 577 | const ancount = parseInt((buffer[6] << 8) | buffer[7]); 578 | const aucount = parseInt((buffer[8] << 8) | buffer[9]); 579 | const adcount = parseInt((buffer[10] << 8) | buffer[11]); 580 | let output = []; 581 | 582 | let bail_out = false; 583 | 584 | let i = 12; 585 | for (let count = 0; count < qdcount && bail_out === false; count++) { 586 | let qtype = -1; 587 | let qclass = -1; 588 | let qdata = ""; 589 | out = parse_question(buffer, i); 590 | if (out === -1) { 591 | error = {"error": "parse error in question section"}; 592 | output.push(error); 593 | bail_out = true; 594 | i = -1; 595 | } 596 | else { 597 | [qtype, qclass, qdata, i] = out; 598 | output.push( {"section": "query", "type": qtype, "class": qclass, "name": qdata} ); 599 | } 600 | } 601 | 602 | 603 | if (bail_out === false) { 604 | i = parse_rr_set(buffer, "answer", ancount, i, output); 605 | if (i === -1) { 606 | error = {"error": "parse error in answer section"}; 607 | output.push(error); 608 | bail_out = true; 609 | } 610 | } 611 | if (bail_out === false) { 612 | i = parse_rr_set(buffer, "authority", aucount, i, output); 613 | if (i === -1) { 614 | error = {"error": "parse error in auth section"}; 615 | output.push(error); 616 | bail_out = true; 617 | } 618 | } 619 | if (bail_out === false) { 620 | i = parse_rr_set(buffer, "additional", adcount, i, output); 621 | if (i === -1) { 622 | error = {"error": "parse error in additional section"}; 623 | output.push(error); 624 | bail_out = true; 625 | } 626 | } 627 | if (i !== -1 && buffer.length !== i) { 628 | // pack an error and some state that may help debug 629 | error = {"error": "bad buffer length. Expected:"+buffer.length+", got:"+i+". Incomplete parse?"}; 630 | output.push(error); 631 | } 632 | 633 | return { 634 | "id": id, 635 | "flags": flags, 636 | "qdcount": qdcount, 637 | "ancount": ancount, 638 | "aucount": aucount, 639 | "adcount": adcount, 640 | "payload": output 641 | }; 642 | } 643 | 644 | function parse_query(buffer) 645 | { 646 | const tmp = parse_wire_message(buffer); 647 | 648 | return { 649 | "id": tmp.id, 650 | "opcode": tmp.opcode, 651 | "rcode": tmp.rcode, 652 | "flags": tmp.flags, 653 | "qdcount": tmp.qdcount, 654 | "payload": tmp.payload 655 | }; 656 | } 657 | 658 | module = typeof(module) === 'undefined' ? {} : module; 659 | module.exports = parse_wire_message; 660 | -------------------------------------------------------------------------------- /tests/fixtures/withErrors.json: -------------------------------------------------------------------------------- 1 | { 2 | "NG6EAAABAAEADQAKAAAGAAEAAAYAAQABUYAAQAFhDHJvb3Qtc2VydmVycwNuZXQABW5zdGxkDHZlcmlzabdmLWdycwNjb20AeCoLNAAABwgAAAOEAAk6gAABUYAAAAIAAQAH6QAABAFrwB4AAAIAAQAH6QAABAFnwB4AAAIAAQAH6QAABAFiwB4AAAIAAQAH6QAAAsAcAAACAAEAB+kAAAQBbcAeAAACAAEAB+kAAAQBY8AeAAACAAEAB+kAAAQBZsAeAAACAAEAB+kAAAQBasAeAAACAAEAB+kAAAQBZcAeAAACAAEAB+kAAAQBZMAeAAACAAEAB+kAAAQBaMAeAAACAAEAB+l3d3d4sXeGd3d0dwgAB+kAAAQBbMAewGcAAQABADbugAAEwQAOgcBnABwAAQA27oAAECABB/0AAAAAAAAAAAAAAAHAdgABAAEANu6AAATAcCQEwIUAAQABADbugAAEwORPycCFABwAAQA27oAAECABBQAAhAAAAAAAAAAAAAvAHAABAAEANu6AAATGKQAEwBwAHAABADbugAAQIAEFA7o+AAAAAAAAAAIAMMChAAEAAQA27oAABMoMGyHAoQAcAAEANu6AABAgAQ38d3d3d3cAAAAAAAA1wLAAAQABADbugAAEwCEEDA==": { 3 | "id": 13422, 4 | "flags": { 5 | "query": false, 6 | "opcode": null, 7 | "authoritative_answer": true, 8 | "truncated": false, 9 | "recursion_desired": false, 10 | "recursion_available": false, 11 | "z": 0, 12 | "authentic_data": false, 13 | "checking_disabled": false, 14 | "rcode": "NoError" 15 | }, 16 | "qdcount": 1, 17 | "ancount": 1, 18 | "aucount": 13, 19 | "adcount": 10, 20 | "payload": [ 21 | { 22 | "section": "query", 23 | "type": "SOA", 24 | "class": "IN", 25 | "name": "." 26 | }, 27 | { 28 | "section": "answer", 29 | "type": "SOA", 30 | "class": "IN", 31 | "ttl": 86400, 32 | "name": "nstld.verisi·f-grs.com.", 33 | "data": "a.root-servers.net. nstld.verisi·f-grs.com. 2016021300 1800 900 604800 86400" 34 | }, 35 | { 36 | "section": "authority", 37 | "type": "NS", 38 | "class": "IN", 39 | "ttl": 518400, 40 | "name": ".", 41 | "data": "k.root-servers.net." 42 | }, 43 | { 44 | "section": "authority", 45 | "type": "NS", 46 | "class": "IN", 47 | "ttl": 518400, 48 | "name": ".", 49 | "data": "g.root-servers.net." 50 | }, 51 | { 52 | "section": "authority", 53 | "type": "NS", 54 | "class": "IN", 55 | "ttl": 518400, 56 | "name": ".", 57 | "data": "b.root-servers.net." 58 | }, 59 | { 60 | "section": "authority", 61 | "type": "NS", 62 | "class": "IN", 63 | "ttl": 518400, 64 | "name": ".", 65 | "data": "a.root-servers.net." 66 | }, 67 | { 68 | "section": "authority", 69 | "type": "NS", 70 | "class": "IN", 71 | "ttl": 518400, 72 | "name": ".", 73 | "data": "m.root-servers.net." 74 | }, 75 | { 76 | "section": "authority", 77 | "type": "NS", 78 | "class": "IN", 79 | "ttl": 518400, 80 | "name": ".", 81 | "data": "c.root-servers.net." 82 | }, 83 | { 84 | "section": "authority", 85 | "type": "NS", 86 | "class": "IN", 87 | "ttl": 518400, 88 | "name": ".", 89 | "data": "f.root-servers.net." 90 | }, 91 | { 92 | "section": "authority", 93 | "type": "NS", 94 | "class": "IN", 95 | "ttl": 518400, 96 | "name": ".", 97 | "data": "j.root-servers.net." 98 | }, 99 | { 100 | "section": "authority", 101 | "type": "NS", 102 | "class": "IN", 103 | "ttl": 518400, 104 | "name": ".", 105 | "data": "e.root-servers.net." 106 | }, 107 | { 108 | "section": "authority", 109 | "type": "NS", 110 | "class": "IN", 111 | "ttl": 518400, 112 | "name": ".", 113 | "data": "d.root-servers.net." 114 | }, 115 | { 116 | "section": "authority", 117 | "type": "NS", 118 | "class": "IN", 119 | "ttl": 518400, 120 | "name": ".", 121 | "data": "h.root-servers.net." 122 | }, 123 | { 124 | "error": "parse error in auth section" 125 | } 126 | ] 127 | }, 128 | "CcsAAAABAAAAAAABBHJpcGUDbmV0AAAcAAEAACkCAAAAgAAAAAAAAAA=": { 129 | "id": 2507, 130 | "flags": { 131 | "query": true, 132 | "opcode": "Query", 133 | "authoritative_answer": null, 134 | "truncated": false, 135 | "recursion_desired": false, 136 | "recursion_available": false, 137 | "z": 0, 138 | "authentic_data": false, 139 | "checking_disabled": false, 140 | "rcode": null 141 | }, 142 | "qdcount": 1, 143 | "ancount": 0, 144 | "aucount": 0, 145 | "adcount": 1, 146 | "payload": [ 147 | { 148 | "section": "query", 149 | "type": "AAAA", 150 | "class": "IN", 151 | "name": "ripe.net." 152 | }, 153 | { 154 | "section": "additional", 155 | "type": "OPT", 156 | "class": 512, 157 | "ttl": 32768, 158 | "name": ".", 159 | "data": "" 160 | }, 161 | { 162 | "error": "bad buffer length. Expected:41, got:37. Incomplete parse?" 163 | } 164 | ] 165 | }, 166 | "IBKEAAABAAEABgACBWJhbmtzBWF0bGFzBHJpcGUDbmV0AAAcAAHADAAcAAEAAFRgABAgAQZ8AugAEQAAAADBABMxwBgAAgABAAAOEAAMA25zMwNuaWMCZnIAwEIAAgABAAAOEAAWA3ByaQdhdXRoZG5zBHJpcGUDbmV0AMBCAAIAAQAADhAADQRzZWMxBWFwbmljwHfAQgACAAEAAA4QAAcEc2VjM8CNwEIAAgABAAAOEAAQBnNucy1wYgNpc2MDb3JnAMBCAAIAAQAADhAAEQZ0aW5uaWUEYXJpbgNuZXQAwGYAAQABAAAOEAAEwQAJBcBmABwAAQAADhAAECABBnwA4AAAAAAAAAAAAAU=": { 167 | "id": 8210, 168 | "flags": { 169 | "query": false, 170 | "opcode": null, 171 | "authoritative_answer": true, 172 | "truncated": false, 173 | "recursion_desired": false, 174 | "recursion_available": false, 175 | "z": 0, 176 | "authentic_data": false, 177 | "checking_disabled": false, 178 | "rcode": "NoError" 179 | }, 180 | "qdcount": 1, 181 | "ancount": 1, 182 | "aucount": 6, 183 | "adcount": 2, 184 | "payload": [ 185 | { 186 | "section": "query", 187 | "type": "AAAA", 188 | "class": "IN", 189 | "name": "banks.atlas.ripe.net." 190 | }, 191 | { 192 | "section": "answer", 193 | "type": "AAAA", 194 | "class": "IN", 195 | "ttl": 21600, 196 | "name": "banks.atlas.ripe.net.", 197 | "data": "2001:067c:02e8:0011:0000:0000:c100:1331" 198 | }, 199 | { 200 | "section": "authority", 201 | "type": "NS", 202 | "class": "IN", 203 | "ttl": 3600, 204 | "name": "ripe.net.", 205 | "data": "ns3.nic.fr." 206 | }, 207 | { 208 | "error": "parse error in auth section" 209 | } 210 | ] 211 | }, 212 | "KMiBgAABAAEADQAQA3d3dwRyaXBlA25ldAAAAQABwAwAAQABAAAhYgAEwQAGi8AVAAIAAQACWfMAEQFmDGd0bGQtc2VydmVyc8AVwBUAAgABAAJZ8wAEAW3APMAVAAIAAQACWfMABAFswDzAFQACAAEAAlnzAAQBacA8wBUAAgABAAJZ8wAEAWHAPMAVAAIAAQACWfMABAFiwDzAFQACAAEAAlnzAAQBa8A8wBUAAgABAAJZ8wAEAWjAPMAVAAIAAQACWfMABAFjwDzAFQACAAEAAlnzAAQBZ8A8wBUAAgABAAJZ8wAEAWrAPMAVAAIAAQACWfMABAFkwDzAFQACAAEAAlnzAAQBZcA8wIcAAQABAAFKdAAEwAUGHsCHABwAAQABSnQAECABBQOoPgAAAAAAAAACADDAlwABAAEAAX6YAATAIQ4ewJcAHAABAAF+mAAQIAEFAyMdAAAAAAAAAAIAMMDHAAEAAQABiQMABMAaXB7A9wABAAEAAYbaAATAH1AewQcAAQABAAFKyQAEwAxeHsA6AAEAAQABSskABMAjMx7A1wABAAEAAXdwAATAKl0ewLcAAQABAAFjZgAEwDZwHsB3AAEAAQACaqkABMArrB7A5wABAAEAAeXzAATAME8ewKcAAQABAAFxiAAEwDSyHsBnAAEAAQABSnQABMA=": { 213 | "id": 10440, 214 | "flags": { 215 | "query": false, 216 | "opcode": null, 217 | "authoritative_answer": false, 218 | "truncated": false, 219 | "recursion_desired": true, 220 | "recursion_available": true, 221 | "z": 0, 222 | "authentic_data": false, 223 | "checking_disabled": false, 224 | "rcode": "NoError" 225 | }, 226 | "qdcount": 1, 227 | "ancount": 1, 228 | "aucount": 13, 229 | "adcount": 16, 230 | "payload": [ 231 | { 232 | "section": "query", 233 | "type": "A", 234 | "class": "IN", 235 | "name": "www.ripe.net." 236 | }, 237 | { 238 | "section": "answer", 239 | "type": "A", 240 | "class": "IN", 241 | "ttl": 8546, 242 | "name": "www.ripe.net.", 243 | "data": "193.0.6.139" 244 | }, 245 | { 246 | "section": "authority", 247 | "type": "NS", 248 | "class": "IN", 249 | "ttl": 154099, 250 | "name": "net.", 251 | "data": "f.gtld-servers.net." 252 | }, 253 | { 254 | "section": "authority", 255 | "type": "NS", 256 | "class": "IN", 257 | "ttl": 154099, 258 | "name": "net.", 259 | "data": "m.gtld-servers.net." 260 | }, 261 | { 262 | "section": "authority", 263 | "type": "NS", 264 | "class": "IN", 265 | "ttl": 154099, 266 | "name": "net.", 267 | "data": "l.gtld-servers.net." 268 | }, 269 | { 270 | "section": "authority", 271 | "type": "NS", 272 | "class": "IN", 273 | "ttl": 154099, 274 | "name": "net.", 275 | "data": "i.gtld-servers.net." 276 | }, 277 | { 278 | "section": "authority", 279 | "type": "NS", 280 | "class": "IN", 281 | "ttl": 154099, 282 | "name": "net.", 283 | "data": "a.gtld-servers.net." 284 | }, 285 | { 286 | "section": "authority", 287 | "type": "NS", 288 | "class": "IN", 289 | "ttl": 154099, 290 | "name": "net.", 291 | "data": "b.gtld-servers.net." 292 | }, 293 | { 294 | "section": "authority", 295 | "type": "NS", 296 | "class": "IN", 297 | "ttl": 154099, 298 | "name": "net.", 299 | "data": "k.gtld-servers.net." 300 | }, 301 | { 302 | "section": "authority", 303 | "type": "NS", 304 | "class": "IN", 305 | "ttl": 154099, 306 | "name": "net.", 307 | "data": "h.gtld-servers.net." 308 | }, 309 | { 310 | "section": "authority", 311 | "type": "NS", 312 | "class": "IN", 313 | "ttl": 154099, 314 | "name": "net.", 315 | "data": "c.gtld-servers.net." 316 | }, 317 | { 318 | "section": "authority", 319 | "type": "NS", 320 | "class": "IN", 321 | "ttl": 154099, 322 | "name": "net.", 323 | "data": "g.gtld-servers.net." 324 | }, 325 | { 326 | "section": "authority", 327 | "type": "NS", 328 | "class": "IN", 329 | "ttl": 154099, 330 | "name": "net.", 331 | "data": "j.gtld-servers.net." 332 | }, 333 | { 334 | "section": "authority", 335 | "type": "NS", 336 | "class": "IN", 337 | "ttl": 154099, 338 | "name": "net.", 339 | "data": "d.gtld-servers.net." 340 | }, 341 | { 342 | "section": "authority", 343 | "type": "NS", 344 | "class": "IN", 345 | "ttl": 154099, 346 | "name": "net.", 347 | "data": "e.gtld-servers.net." 348 | }, 349 | { 350 | "section": "additional", 351 | "type": "A", 352 | "class": "IN", 353 | "ttl": 84596, 354 | "name": "a.gtld-servers.net.", 355 | "data": "192.5.6.30" 356 | }, 357 | { 358 | "section": "additional", 359 | "type": "AAAA", 360 | "class": "IN", 361 | "ttl": 84596, 362 | "name": "a.gtld-servers.net.", 363 | "data": "2001:0503:a83e:0000:0000:0000:0002:0030" 364 | }, 365 | { 366 | "section": "additional", 367 | "type": "A", 368 | "class": "IN", 369 | "ttl": 97944, 370 | "name": "b.gtld-servers.net.", 371 | "data": "192.33.14.30" 372 | }, 373 | { 374 | "section": "additional", 375 | "type": "AAAA", 376 | "class": "IN", 377 | "ttl": 97944, 378 | "name": "b.gtld-servers.net.", 379 | "data": "2001:0503:231d:0000:0000:0000:0002:0030" 380 | }, 381 | { 382 | "section": "additional", 383 | "type": "A", 384 | "class": "IN", 385 | "ttl": 100611, 386 | "name": "c.gtld-servers.net.", 387 | "data": "192.26.92.30" 388 | }, 389 | { 390 | "section": "additional", 391 | "type": "A", 392 | "class": "IN", 393 | "ttl": 100058, 394 | "name": "d.gtld-servers.net.", 395 | "data": "192.31.80.30" 396 | }, 397 | { 398 | "section": "additional", 399 | "type": "A", 400 | "class": "IN", 401 | "ttl": 84681, 402 | "name": "e.gtld-servers.net.", 403 | "data": "192.12.94.30" 404 | }, 405 | { 406 | "section": "additional", 407 | "type": "A", 408 | "class": "IN", 409 | "ttl": 84681, 410 | "name": "f.gtld-servers.net.", 411 | "data": "192.35.51.30" 412 | }, 413 | { 414 | "section": "additional", 415 | "type": "A", 416 | "class": "IN", 417 | "ttl": 96112, 418 | "name": "g.gtld-servers.net.", 419 | "data": "192.42.93.30" 420 | }, 421 | { 422 | "section": "additional", 423 | "type": "A", 424 | "class": "IN", 425 | "ttl": 90982, 426 | "name": "h.gtld-servers.net.", 427 | "data": "192.54.112.30" 428 | }, 429 | { 430 | "section": "additional", 431 | "type": "A", 432 | "class": "IN", 433 | "ttl": 158377, 434 | "name": "i.gtld-servers.net.", 435 | "data": "192.43.172.30" 436 | }, 437 | { 438 | "section": "additional", 439 | "type": "A", 440 | "class": "IN", 441 | "ttl": 124403, 442 | "name": "j.gtld-servers.net.", 443 | "data": "192.48.79.30" 444 | }, 445 | { 446 | "section": "additional", 447 | "type": "A", 448 | "class": "IN", 449 | "ttl": 94600, 450 | "name": "k.gtld-servers.net.", 451 | "data": "192.52.178.30" 452 | }, 453 | { 454 | "error": "parse error in additional section" 455 | } 456 | ] 457 | }, 458 | "KOcAAAABAAAAAAABBnN0ZXJlbwJocQZwaGljb2gDbmV0AAAQAAEAACkCAAAAAAAABAAIAAQAAQAA": { 459 | "id": 10471, 460 | "flags": { 461 | "query": true, 462 | "opcode": "Query", 463 | "authoritative_answer": null, 464 | "truncated": false, 465 | "recursion_desired": false, 466 | "recursion_available": false, 467 | "z": 0, 468 | "authentic_data": false, 469 | "checking_disabled": false, 470 | "rcode": null 471 | }, 472 | "qdcount": 1, 473 | "ancount": 0, 474 | "aucount": 0, 475 | "adcount": 1, 476 | "payload": [ 477 | { 478 | "section": "query", 479 | "type": "TXT", 480 | "class": "IN", 481 | "name": "stereo.hq.phicoh.net." 482 | }, 483 | { 484 | "section": "additional", 485 | "type": "OPT", 486 | "class": 512, 487 | "ttl": 0, 488 | "name": ".", 489 | "data": "0,8,0,4" 490 | }, 491 | { 492 | "error": "bad buffer length. Expected:57, got:53. Incomplete parse?" 493 | } 494 | ] 495 | }, 496 | "ML+BgAABAAEAAAABA3d3dwRyaXBlA25ldAAAAQABwAwAAQABAAAA7gAEwQAGi8O7/Ndt9Mb/7dvz92f79w==": { 497 | "id": 12479, 498 | "flags": { 499 | "query": false, 500 | "opcode": null, 501 | "authoritative_answer": false, 502 | "truncated": false, 503 | "recursion_desired": true, 504 | "recursion_available": true, 505 | "z": 0, 506 | "authentic_data": false, 507 | "checking_disabled": false, 508 | "rcode": "NoError" 509 | }, 510 | "qdcount": 1, 511 | "ancount": 1, 512 | "aucount": 0, 513 | "adcount": 1, 514 | "payload": [ 515 | { 516 | "section": "query", 517 | "type": "A", 518 | "class": "IN", 519 | "name": "www.ripe.net." 520 | }, 521 | { 522 | "section": "answer", 523 | "type": "A", 524 | "class": "IN", 525 | "ttl": 238, 526 | "name": "www.ripe.net.", 527 | "data": "193.0.6.139" 528 | }, 529 | { 530 | "error": "parse error in additional section" 531 | } 532 | ] 533 | }, 534 | "VkEBAAABAAAAAAABA3d3dwRyaXBlA25ldAAAAQABAAApAgAAAIAAAAAAAAAA": { 535 | "id": 22081, 536 | "flags": { 537 | "query": true, 538 | "opcode": "Query", 539 | "authoritative_answer": null, 540 | "truncated": false, 541 | "recursion_desired": true, 542 | "recursion_available": false, 543 | "z": 0, 544 | "authentic_data": false, 545 | "checking_disabled": false, 546 | "rcode": null 547 | }, 548 | "qdcount": 1, 549 | "ancount": 0, 550 | "aucount": 0, 551 | "adcount": 1, 552 | "payload": [ 553 | { 554 | "section": "query", 555 | "type": "A", 556 | "class": "IN", 557 | "name": "www.ripe.net." 558 | }, 559 | { 560 | "section": "additional", 561 | "type": "OPT", 562 | "class": 512, 563 | "ttl": 32768, 564 | "name": ".", 565 | "data": "" 566 | }, 567 | { 568 | "error": "bad buffer length. Expected:45, got:41. Incomplete parse?" 569 | } 570 | ] 571 | }, 572 | "WC+BoAABAAIABwAFA3d3dwRyaXBlA25ldAAAAQABwAwAAQABAABRzQAEwQAGi8AMAC4AAQAAUc0AnAABBQMAAFRgU8jUG1OhOQstQwRyaXBlA25ldAAjuVBkX2JrdfzcpNjTX+iuHepGoBLQFh6jMHtJn+O5KTRF95OeBaggiQvO9sFXXU5Guav3cWQfnE+s+1P1Z/hzRACplE35HLI/wrklowjhMZP1xzaYBLkZ/x6WDZz98jYu8gyNVcqmTExwSH9tKtukGJLy2NHzPanrQJY6KvSwTcAQAAIAAQAAC30ADQRzZWMzBWFwbmljwBXAEAACAAEAAAt9AA4DcHJpB2F1dGhkbnPAEMAQAAIAAQAAC30ADgZ0aW5uaWUEYXJpbsAVwBAAAgABAAALfQAQBnNucy1wYgNpc2MDb3JnAMAQAAIAAQAAC30ABwRzZWMxwOfAEAACAAEAAAt9AAwDbnMzA25pYwJmcgDAEAAuAAEAAAt9AJwAAgUCAAAOEFPI1BtToTkLLUMEcmlwZQNuZXQAC4WMH46cBWT/hhWvVrStIdXrqHA2fwfphGkx9+6wbss+mHg8mbfKvaFfcg43/MZh/PwdyAQkRN8I+v/OZ1JA3Gt3KvDc00PebtQZBYlXxssZVNtcx45DG5a3M/RGzhqjM5hfuigLmghIEhuvMhtrhmC4WS/7B3KrYOenFQUJmxnA+wABAAEAAAaYAATBAAkFwPsAHAABAAAGmAAQIAEGfADgAAAAAAAAAAAABcD7": { 573 | "id": 22575, 574 | "flags": { 575 | "query": false, 576 | "opcode": null, 577 | "authoritative_answer": false, 578 | "truncated": false, 579 | "recursion_desired": true, 580 | "recursion_available": true, 581 | "z": 0, 582 | "authentic_data": true, 583 | "checking_disabled": false, 584 | "rcode": "NoError" 585 | }, 586 | "qdcount": 1, 587 | "ancount": 2, 588 | "aucount": 7, 589 | "adcount": 5, 590 | "payload": [ 591 | { 592 | "section": "query", 593 | "type": "A", 594 | "class": "IN", 595 | "name": "www.ripe.net." 596 | }, 597 | { 598 | "section": "answer", 599 | "type": "A", 600 | "class": "IN", 601 | "ttl": 20941, 602 | "name": "www.ripe.net.", 603 | "data": "193.0.6.139" 604 | }, 605 | { 606 | "section": "answer", 607 | "type": "RRSIG", 608 | "class": "IN", 609 | "ttl": 20941, 610 | "name": "www.ripe.net.", 611 | "data": "0,1,5,3,0,0,84,96,83,200,212,27,83,161,57,11,45,67,4,114,105,112,101,3,110,101,116,0,35,185,80,100,95,98,107,117,252,220,164,216,211,95,232,174,29,234,70,160,18,208,22,30,163,48,123,73,159,227,185,41,52,69,247,147,158,5,168,32,137,11,206,246,193,87,93,78,70,185,171,247,113,100,31,156,79,172,251,83,245,103,248,115,68,0,169,148,77,249,28,178,63,194,185,37,163,8,225,49,147,245,199,54,152,4,185,25,255,30,150,13,156,253,242,54,46,242,12,141,85,202,166,76,76,112,72,127,109,42,219,164,24,146,242,216,209,243,61,169,235,64,150,58,42,244,176,77" 612 | }, 613 | { 614 | "section": "authority", 615 | "type": "NS", 616 | "class": "IN", 617 | "ttl": 2941, 618 | "name": "ripe.net.", 619 | "data": "sec3.apnic.net." 620 | }, 621 | { 622 | "section": "authority", 623 | "type": "NS", 624 | "class": "IN", 625 | "ttl": 2941, 626 | "name": "ripe.net.", 627 | "data": "pri.authdns.ripe.net." 628 | }, 629 | { 630 | "section": "authority", 631 | "type": "NS", 632 | "class": "IN", 633 | "ttl": 2941, 634 | "name": "ripe.net.", 635 | "data": "tinnie.arin.net." 636 | }, 637 | { 638 | "section": "authority", 639 | "type": "NS", 640 | "class": "IN", 641 | "ttl": 2941, 642 | "name": "ripe.net.", 643 | "data": "sns-pb.isc.org." 644 | }, 645 | { 646 | "section": "authority", 647 | "type": "NS", 648 | "class": "IN", 649 | "ttl": 2941, 650 | "name": "ripe.net.", 651 | "data": "sec1.apnic.net." 652 | }, 653 | { 654 | "section": "authority", 655 | "type": "NS", 656 | "class": "IN", 657 | "ttl": 2941, 658 | "name": "ripe.net.", 659 | "data": "ns3.nic.fr." 660 | }, 661 | { 662 | "section": "authority", 663 | "type": "RRSIG", 664 | "class": "IN", 665 | "ttl": 2941, 666 | "name": "ripe.net.", 667 | "data": "0,2,5,2,0,0,14,16,83,200,212,27,83,161,57,11,45,67,4,114,105,112,101,3,110,101,116,0,11,133,140,31,142,156,5,100,255,134,21,175,86,180,173,33,213,235,168,112,54,127,7,233,132,105,49,247,238,176,110,203,62,152,120,60,153,183,202,189,161,95,114,14,55,252,198,97,252,252,29,200,4,36,68,223,8,250,255,206,103,82,64,220,107,119,42,240,220,211,67,222,110,212,25,5,137,87,198,203,25,84,219,92,199,142,67,27,150,183,51,244,70,206,26,163,51,152,95,186,40,11,154,8,72,18,27,175,50,27,107,134,96,184,89,47,251,7,114,171,96,231,167,21,5,9,155,25" 668 | }, 669 | { 670 | "section": "additional", 671 | "type": "A", 672 | "class": "IN", 673 | "ttl": 1688, 674 | "name": "pri.authdns.ripe.net.", 675 | "data": "193.0.9.5" 676 | }, 677 | { 678 | "section": "additional", 679 | "type": "AAAA", 680 | "class": "IN", 681 | "ttl": 1688, 682 | "name": "pri.authdns.ripe.net.", 683 | "data": "2001:067c:00e0:0000:0000:0000:0000:0005" 684 | }, 685 | { 686 | "section": "additional", 687 | "type": 0, 688 | "class": 0, 689 | "ttl": 0, 690 | "name": "pri.authdns.ripe.net.", 691 | "data": "" 692 | }, 693 | { 694 | "error": "parse error in additional section" 695 | } 696 | ] 697 | }, 698 | "sPIAAgdkcmFniW5zBFppb3kCZXUAABAAAQdkcmFnb24tBGFpb3kCZXUAABAAAQAAgRAATyVIZXJlIGJlIP8gZHJhZ29ucyQgV2l0aCBcIGFuZCAZIGFuZCB/CGFzIHcA/2wuH1Rocm93aW5nIGluIIAgZm9yIGdvO2SgbWVhc3VyZS7AKQAGAAEAAA4QAAYDbnMxQCnAKQACAAEAAA4RAAYDbnMywCnAlwABAAEAAA4QAAQAECABCIgQRAAQAqDJ//4=": { 699 | "id": 45298, 700 | "flags": { 701 | "query": true, 702 | "opcode": "Query", 703 | "authoritative_answer": null, 704 | "truncated": false, 705 | "recursion_desired": false, 706 | "recursion_available": false, 707 | "z": 0, 708 | "authentic_data": false, 709 | "checking_disabled": false, 710 | "rcode": null 711 | }, 712 | "qdcount": 1892, 713 | "ancount": 29281, 714 | "aucount": 26505, 715 | "adcount": 28275, 716 | "payload": [ 717 | { 718 | "section": "query", 719 | "type": "TXT", 720 | "class": "IN", 721 | "name": "Zioy.eu." 722 | }, 723 | { 724 | "section": "query", 725 | "type": "TXT", 726 | "class": "IN", 727 | "name": "dragon-.aioy.eu." 728 | }, 729 | { 730 | "section": "query", 731 | "type": 129, 732 | "class": 4096, 733 | "name": "." 734 | }, 735 | { 736 | "error": "parse error in question section" 737 | } 738 | ] 739 | }, 740 | "sPJ6AAABACEAAsICB1VyWGdviFEEYf//AAAQAAEPZHJhSm9uA/R/aW95AmV1AAAGAAEAAHv/VHUAABAAAQ9kcmFKAAAGAAEAAHv/9H9pb3lvbgP0f2lveQJldQAABgABAAAOcABPJUhlcuUgYmUgcwR/cyEGV2l0aENcP2HiAAJgIGFpbwJldQAABgABAAB7/1R1AAAQAAEPZHJhSm9fAmV1AQAQf2lveQJldQAABgABAAAOEAAAHAD///+AfwBpV2cgaW4ggCAAAnIgZ29vZP9tZWFzdXJlLsApDwIBAMApAAIAAQAVDjAABgNuczLAL8CXAAEAAQAADhD5lYAAAABzMsAvwJcAAQABAAAOEPmVEJcAAQAJGwAOEAAEEEQA7wGgyf/+": { 741 | "id": 45298, 742 | "flags": { 743 | "query": true, 744 | "opcode": "Unassigned", 745 | "authoritative_answer": null, 746 | "truncated": true, 747 | "recursion_desired": false, 748 | "recursion_available": false, 749 | "z": 0, 750 | "authentic_data": false, 751 | "checking_disabled": false, 752 | "rcode": null 753 | }, 754 | "qdcount": 1, 755 | "ancount": 33, 756 | "aucount": 2, 757 | "adcount": 49666, 758 | "payload": [ 759 | { 760 | "error": "parse error in question section" 761 | } 762 | ] 763 | }, 764 | "sPJh////AAH+/voHZHIEYWJvcgR/Ym95AmVcfC7AczHAKcAKAAD+/jHAKcAKAAD+/v7+AN7e3t7e0t7eAAEAAAIQfwBuJQ8jwC7AKQACAAEAAAMQ/+RiZSD/IGQAAxaHABAgAQiI3s7e3t4FRAAQAqDJ//4=": { 765 | "id": 45298, 766 | "flags": { 767 | "query": true, 768 | "opcode": "Unassigned", 769 | "authoritative_answer": null, 770 | "truncated": false, 771 | "recursion_desired": true, 772 | "recursion_available": true, 773 | "z": 64, 774 | "authentic_data": true, 775 | "checking_disabled": true, 776 | "rcode": null 777 | }, 778 | "qdcount": 65535, 779 | "ancount": 1, 780 | "aucount": 65278, 781 | "adcount": 64007, 782 | "payload": [ 783 | { 784 | "error": "parse error in question section" 785 | } 786 | ] 787 | }, 788 | "sPKAgAABAAYAAAAAwAwAAgABY8ARwAwA6P//fwAKrgAHBHNlYzPAK8AMAAIAAQAACq4ADgN9cmkHYXV0aGRuc8AMwAz4AQABAAAKrgAMA25zMwNuaWMCZnIAwAwAAgABAAAKrgAfBnNubS1wYgNpc2MDb3JnAMAMAAIAAQAACq4ADgZ0aW5uaWUE": { 789 | "id": 45298, 790 | "flags": { 791 | "query": false, 792 | "opcode": null, 793 | "authoritative_answer": false, 794 | "truncated": false, 795 | "recursion_desired": false, 796 | "recursion_available": true, 797 | "z": 0, 798 | "authentic_data": false, 799 | "checking_disabled": false, 800 | "rcode": "NoError" 801 | }, 802 | "qdcount": 1, 803 | "ancount": 6, 804 | "aucount": 0, 805 | "adcount": 0, 806 | "payload": [ 807 | { 808 | "error": "parse error in question section" 809 | } 810 | ] 811 | }, 812 | "sPKD/+cBd2luZyBpZIByYWdvbnMEYWlveQJldQAAEAABB2RyYQNuczHAKcApAAIAAQAADhAABgNuczLAKcCXAAEAAWdvbnMEYWlveQJldQBPJUhlcmUgYmUg/yBkcmFnb25zISBXaXRoIFwgYW5kICIgYW5kIH8IYXMgd2VsbC4fVGhyb3dpbmcgaW4gH1RocmwuH1Rocm93aW5nIGluIB9UaHJvd2luZyBpbiCAb3dpbmcgaW4ggCBmb4AgZm9yIGdvb2QgbWVhc3VyZS7AKQACAAEAAA4QAAYDbnMxwCnAKQACAAEAAA4QAAYDbnMywCnAlwABAAEAAA4QAASCJQ8jwKkAHAABAAAOEAAQIAEIiBBEABACoMn//g==": { 813 | "id": 45298, 814 | "flags": { 815 | "query": false, 816 | "opcode": null, 817 | "authoritative_answer": false, 818 | "truncated": true, 819 | "recursion_desired": true, 820 | "recursion_available": true, 821 | "z": 64, 822 | "authentic_data": true, 823 | "checking_disabled": true, 824 | "rcode": "Unassigned/Reserved" 825 | }, 826 | "qdcount": 59137, 827 | "ancount": 30569, 828 | "aucount": 28263, 829 | "adcount": 8297, 830 | "payload": [ 831 | { 832 | "error": "parse error in question section" 833 | } 834 | ] 835 | }, 836 | "sPKEAAABAAEAAgACB2RyYWdvbnMEYWlveQJldQAAEAABB2RyYWdvbnMEYWlveRJldQAAEAABAAAOEABPJUhlcmUgYmUg/yBkcmFnb25zISBXaXRoIFwgYW5kICIgYW5kIH8IYXMgd2VsbC4fVGhyb3dpbmcgaW4ggCBmb3IgZ29vZCBtZWFzdXJlLsApAAIAAQAADhAABgNuczHAKcApAAIAAQAADhAABgNuczLAKcCXAAEAAQAADhAABIIlDyPAqQAcAAEAAA4QABAgAQiIEEQAEAKgyf/+": { 837 | "id": 45298, 838 | "flags": { 839 | "query": false, 840 | "opcode": null, 841 | "authoritative_answer": true, 842 | "truncated": false, 843 | "recursion_desired": false, 844 | "recursion_available": false, 845 | "z": 0, 846 | "authentic_data": false, 847 | "checking_disabled": false, 848 | "rcode": "NoError" 849 | }, 850 | "qdcount": 1, 851 | "ancount": 1, 852 | "aucount": 2, 853 | "adcount": 2, 854 | "payload": [ 855 | { 856 | "section": "query", 857 | "type": "TXT", 858 | "class": "IN", 859 | "name": "dragons.aioy.eu." 860 | }, 861 | { 862 | "error": "parse error in answer section" 863 | } 864 | ] 865 | }, 866 | "sPKEAAABAAEAAgACB2RyYX5vbnPr6+vr6+vr6+vr6+vr62RyYWdvbnMEYWlveQJldQAAEAABAAAGAyBcIGFuZOgDAABugYGBgYGBgYGBgYGBgYGBgYF5gYGBgYGBYgB/CGEABgMgXCBhbmToAxQAbmQgfwhhcyB3ZWxsLh8AABAAAQAABgMgXCBhbmToAyBnUGV1AAAQAADnZHIBZ29ucwRhaW95AmV1AAAQAAEAAA4Ab2QgbQMgXCBhbmToAwAA/wEGAA4QAG5zMcApwCkAAhAgAQiI/0QAAQAADuEABgNuczLAKcCXABwAAQAADhAABIIlDyPfqQAcAAD9AA4QABAgAQiI/0QAEAKgyf/+": { 867 | "id": 45298, 868 | "flags": { 869 | "query": false, 870 | "opcode": null, 871 | "authoritative_answer": true, 872 | "truncated": false, 873 | "recursion_desired": false, 874 | "recursion_available": false, 875 | "z": 0, 876 | "authentic_data": false, 877 | "checking_disabled": false, 878 | "rcode": "NoError" 879 | }, 880 | "qdcount": 1, 881 | "ancount": 1, 882 | "aucount": 2, 883 | "adcount": 2, 884 | "payload": [ 885 | { 886 | "error": "parse error in question section" 887 | } 888 | ] 889 | }, 890 | "sPKEAAABHgEQAgACBwFyYWdvbnMEYWlveQJlcQAAEAABB2RyYWdv1tbW1tbW1tbW1tZucwAA//95AmV1AAAwAAEAAA4QAE8lSF5yZSBiZSD/IGRyYTFvbnMhIFdpdGggXCBhbmQgIiBhbmQgfwhhcyB3ZWxsLh86aHJvd2luACBpbiCAIGZvciBnb4AAIG1lYXN3cmUuwCkAAgABAAAOEAAGA25zMeApwCkAAgABAAAOEAAGA25zMsApJQ8jwJoAHAABAAAOEAAQIAEIiBBEABACoMn//g==": { 891 | "id": 45298, 892 | "flags": { 893 | "query": false, 894 | "opcode": null, 895 | "authoritative_answer": true, 896 | "truncated": false, 897 | "recursion_desired": false, 898 | "recursion_available": false, 899 | "z": 0, 900 | "authentic_data": false, 901 | "checking_disabled": false, 902 | "rcode": "NoError" 903 | }, 904 | "qdcount": 1, 905 | "ancount": 7681, 906 | "aucount": 4098, 907 | "adcount": 2, 908 | "payload": [ 909 | { 910 | "section": "query", 911 | "type": "TXT", 912 | "class": "IN", 913 | "name": "\u0001ragons.aioy.eq." 914 | }, 915 | { 916 | "error": "parse error in answer section" 917 | } 918 | ] 919 | }, 920 | "sPKEAAABbmQgfwhhc2RyYWdvbnMEZWlveQJldQAAEAABB2RyYWcBbnMEYWlveQJldQAAEAABAAB1AAAQBwEAAA4QAE8lSGVyZSBiZW4AAAQAaXRoIFwgYW5kICIgYW5kIH8IYXMgd2VsbC4fVGhyEHdpbmcgaW4ggCBmb3IgZ29vZCBtZWFzdXJlLsApAAIAAQAADhAABgNuczHAKcApAAIAAQAADhAABgNuczLAKcCXAAEAAQAADhAABIIlDyPAqQAcAAEAAA4QABAgAQiIEEQAEAKgyf/+": { 921 | "id": 45298, 922 | "flags": { 923 | "query": false, 924 | "opcode": null, 925 | "authoritative_answer": true, 926 | "truncated": false, 927 | "recursion_desired": false, 928 | "recursion_available": false, 929 | "z": 0, 930 | "authentic_data": false, 931 | "checking_disabled": false, 932 | "rcode": "NoError" 933 | }, 934 | "qdcount": 1, 935 | "ancount": 28260, 936 | "aucount": 8319, 937 | "adcount": 2145, 938 | "payload": [ 939 | { 940 | "error": "parse error in question section" 941 | } 942 | ] 943 | }, 944 | "sPKsrKysrKysZKysrKysrLusrKysrKysrKysKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoLCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgorKysrKy6rKysrKysrAA9PT0APT09PT09PT0jPT09PT09PT09PT09PT09PT09PSAAqD09PT09PT0BPT09Iz1WPT0oKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExKCgoKCgoKCgoKCgoKCgoKCgoKCgSKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCisrKysrKysrKysrKysAD3/////PT09PT09PSM9PT09PT09PT09PT09PT09PT09IACoPT09PT09PQE9PT0jPT09PUc9PT09PcApwCkAAgABAAAOEAAGDW5zMsApwIAAIT09PT09PT09ID09//8AAEAAPT09PT1HPT1aPT09PT09PT09PYA9PT09PT09PT09PT09LVE9PT09PT09IAA9PT09PT09PT09f////z09PT09PT09PT09RD09PT09ID098f8AAD09PT09PT1HPT09PT09PT09PT09PYA9PT09PSM9PT09Pf+Pj489PT09//9//z09PT09PT09PT09PT1ZPVBFPT09PT09PWQ9PTM9PT09PT09PT0yPT09PTY9PT09PT09PT0AAAB/bnMxwCnAKQACAAEAAA4QAAYDbnMywCnAgAAhAAEAAA4QAASCJQ8jwKkAHAAAAgAAZCAQIAIIrKysrKysrGSsrKysrKy7rKysrKysrKysrCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoAgICAgICAgICAigoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKKysrKysrKysrKysrKwAPT09AD09PT09PT09Iz09PT09PT09PT09PT09PT09PT0gAKg9PT09PT09AT09PSM9Vj09KCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKDExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMSgoKCgoKCgoKCgoKCgoKCgoKCgoEigoKCgoKCgoKCgoKCgoKCgoKCgoKCgorKysrKysrKysrKysrAA9PT0APT09PT09PT0jPT09PT09PT09PT09PT09PT09PSAAqD09PT09PT0BPT09Iz09PT1HPT09PT3AKcApAAIAAQAADhAABg1uczLAKcCAACE9PT09PT09PSA9Pf//AABAAD09PT09Rz09Wj09PT09PT09PT2APT09PT09PT09PT09PS1RPT09PT09PSAAPT09PT09PT09PX////89PT09PT09PT09PUQABAAAPSA9PfH/AAA9PT09PT09Rz09PT09PT09PT09PT2APT09PT0jPT09PT3/j4+PPT09Pf//f/89PT09PT09PT09PT09WT1QRT09PT09PT1kPT0zPT09PT09PT09Mj09PT02PT09PT09PT09AAAAf25zMcApwCkAAgABAAAOEAAGA25zMsApwIAAIQABAAAOEAAEgiUPghBEABACoMn//g==": { 945 | "id": 45298, 946 | "flags": { 947 | "query": false, 948 | "opcode": null, 949 | "authoritative_answer": true, 950 | "truncated": false, 951 | "recursion_desired": false, 952 | "recursion_available": true, 953 | "z": 0, 954 | "authentic_data": true, 955 | "checking_disabled": false, 956 | "rcode": "Unassigned/Reserved" 957 | }, 958 | "qdcount": 44204, 959 | "ancount": 44204, 960 | "aucount": 44132, 961 | "adcount": 44204, 962 | "payload": [ 963 | { 964 | "error": "parse error in question section" 965 | } 966 | ] 967 | } 968 | } 969 | --------------------------------------------------------------------------------