├── assignments ├── assignment0 │ ├── excercise │ │ ├── python-env.sh │ │ ├── requirements.txt │ │ ├── get_dataset.sh │ │ └── setup_solr.sh │ └── README.md ├── assignment1 │ ├── exercise │ │ └── src │ │ │ ├── frontend │ │ │ ├── templates │ │ │ │ ├── layout.html │ │ │ │ └── simpleIndexSearchPage.html │ │ │ └── app.py │ │ │ ├── understand_data.py │ │ │ └── simple_index.py │ └── README.md └── assignment2 │ ├── excercise │ └── src │ │ ├── frontend │ │ ├── templates │ │ │ ├── layout.html │ │ │ ├── simpleIndexSearchPage.html │ │ │ └── entityAwareSearchPage.html │ │ └── app.py │ │ └── entity_aware_index.py │ └── README.md ├── finished-product ├── resources │ ├── sigir2017-demo.gif │ ├── stanford_title_ner_tags_case_sensitive.csv.gz │ ├── setup_solr.sh │ └── create_fields.sh ├── data │ └── get_dataset.sh └── src │ ├── frontend │ ├── templates │ │ ├── layout.html │ │ ├── simpleIndexSearchPage.html │ │ └── entityAwareSearchPage.html │ └── app.py │ ├── backend │ ├── understand_data.py │ └── nlp.py │ └── indexing │ ├── simple_index.py │ └── entity_aware_index.py ├── README.md └── LICENSE /assignments/assignment0/excercise/python-env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | pip install -r requirements.txt 4 | python -m nltk.downloader punkt 5 | python -m nltk.downloader stopwords -------------------------------------------------------------------------------- /finished-product/resources/sigir2017-demo.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/candidate-selection-tutorial-sigir2017/candidate-selection-tutorial/HEAD/finished-product/resources/sigir2017-demo.gif -------------------------------------------------------------------------------- /finished-product/resources/stanford_title_ner_tags_case_sensitive.csv.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/candidate-selection-tutorial-sigir2017/candidate-selection-tutorial/HEAD/finished-product/resources/stanford_title_ner_tags_case_sensitive.csv.gz -------------------------------------------------------------------------------- /finished-product/resources/setup_solr.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | rm -rf solr-6.6.0 4 | wget -nc http://www-eu.apache.org/dist/lucene/solr/6.6.0/solr-6.6.0.tgz 5 | tar -zxf solr-6.6.0.tgz 6 | cd solr-6.6.0 7 | bin/solr restart 8 | cd .. 9 | ./create_fields.sh 10 | -------------------------------------------------------------------------------- /assignments/assignment0/excercise/requirements.txt: -------------------------------------------------------------------------------- 1 | cycler==0.10.0 2 | httplib2==0.10.3 3 | lxml==3.8.0 4 | nltk==3.2.4 5 | numpy==1.13.1 6 | pandas==0.20.3 7 | PTable==0.9.2 8 | pyparsing==2.2.0 9 | pysolr==3.6.0 10 | python-dateutil==2.6.1 11 | pytz==2017.2 12 | requests==2.10.0 13 | six==1.10.0 14 | sunburnt==0.6 15 | web.py==0.37 16 | sner==0.2.1 17 | -------------------------------------------------------------------------------- /assignments/assignment0/excercise/get_dataset.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Escape code 6 | esc=`echo -en "\033"` 7 | 8 | # Set colors 9 | cc_red="${esc}[0;31m" 10 | cc_green="${esc}[0;32m" 11 | cc_yellow="${esc}[0;33m" 12 | cc_blue="${esc}[0;34m" 13 | cc_normal=`echo -en "${esc}[m\017"` 14 | 15 | function ec () { 16 | echo -e "${cc_green}${1}${cc_normal}" 17 | } 18 | 19 | cd ~/workspace/candidate-selection-tutorial/finished-product/data 20 | ./get_dataset.sh 21 | cd ~/workspace/candidate-selection-tutorial/assignments/assignment0/excercise 22 | ec "\n\nNews aggregator dataset has been downloaded!" 23 | 24 | -------------------------------------------------------------------------------- /assignments/assignment0/excercise/setup_solr.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Escape code 5 | esc=`echo -en "\033"` 6 | 7 | # Set colors 8 | cc_red="${esc}[0;31m" 9 | cc_green="${esc}[0;32m" 10 | cc_yellow="${esc}[0;33m" 11 | cc_blue="${esc}[0;34m" 12 | cc_normal=`echo -en "${esc}[m\017"` 13 | 14 | function ec () { 15 | echo -e "${cc_green}${1}${cc_normal}" 16 | } 17 | 18 | cd ~/workspace/candidate-selection-tutorial/finished-product/resources 19 | ./setup_solr.sh 20 | cd ~/workspace/candidate-selection-tutorial/assignments/assignment0/excercise 21 | ec "\n\nSolr has been started and collections with specific dataset fields have been created!" 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## SIGIR 2017 - Candidate Selection for Personalized Search and Recommender Systems 2 | 3 | ### Abstract 4 | Modern day social media search and recommender systems require complex query formulation that incorporates both user context and their explicit search queries. Users expect these systems to be fast and provide relevant results to their query and context. With millions of documents to choose from, these systems utilize a multi-pass scoring function to narrow the results and provide the most relevant ones to users. Candidate selection is required to sift through all the documents in the index and select a relevant few to be ranked by subsequent scoring functions. It becomes crucial to narrow down the document set while maintaining relevant ones in resulting set. In this tutorial we survey various candidate selection techniques and deep dive into case studies on a large scale social media platform. In the later half we provide hands-on tutorial where we explore building these candidate selection models on a real world dataset and see how to balance the tradeoff between relevance and latency. 5 | 6 | ### Presenters 7 | Dhruv Arya, Ganesh Venkataraman, Aman Grover, Krishnaram Kenthapadi, Yiqun Liu 8 | 9 | ### Final Output 10 | ![Demo](finished-product/resources/sigir2017-demo.gif) 11 | -------------------------------------------------------------------------------- /assignments/assignment1/exercise/src/frontend/templates/layout.html: -------------------------------------------------------------------------------- 1 | $def with (content) 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | News Articles - Search 14 | 15 | 16 | 17 | 25 |
26 | 27 |

28 | 29 | $:content 30 | 31 |
32 | 33 | 34 | -------------------------------------------------------------------------------- /finished-product/data/get_dataset.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Escape code 6 | esc=`echo -en "\033"` 7 | 8 | # Set colors 9 | cc_red="${esc}[0;31m" 10 | cc_green="${esc}[0;32m" 11 | cc_yellow="${esc}[0;33m" 12 | cc_blue="${esc}[0;34m" 13 | cc_normal=`echo -en "${esc}[m\017"` 14 | 15 | function ec () { 16 | echo -e "${cc_green}${1}${cc_normal}" 17 | } 18 | 19 | ec "Downloading the NewsAggregator dataset" 20 | wget -nc http://archive.ics.uci.edu/ml/machine-learning-databases/00359/NewsAggregatorDataset.zip 21 | rm -rf news-aggregator-dataset 22 | mkdir news-aggregator-dataset 23 | cd news-aggregator-dataset 24 | unzip ./../NewsAggregatorDataset.zip 25 | cd .. 26 | rm NewsAggregatorDataset.zip 27 | ec "NewsAggregator dataset has been downloaded!" 28 | ec "\nFetching Stanford NER library and dependencies" 29 | wget -nc "https://nlp.stanford.edu/software/stanford-ner-2017-06-09.zip" 30 | unzip stanford-ner-2017-06-09.zip 31 | mv stanford-ner-2017-06-09 stanford-ner 32 | rm -rf stanford-ner-2017-06-09.zip 33 | ec "\nDownloading Stanford Core NLP english models" 34 | wget -nc http://nlp.stanford.edu/software/stanford-english-corenlp-2017-06-09-models.jar 35 | rm -rf stanford-english-corenlp-models 36 | mkdir stanford-english-corenlp-models 37 | cd stanford-english-corenlp-models 38 | jar -xvf ./../stanford-english-corenlp-2017-06-09-models.jar 39 | cd .. 40 | cp -r stanford-english-corenlp-models/edu/stanford/nlp/models/ner/* stanford-ner/classifiers/ 41 | rm -rf stanford-english-corenlp-models 42 | ec "\n\nData dependency setup completed!" 43 | 44 | -------------------------------------------------------------------------------- /finished-product/src/frontend/templates/layout.html: -------------------------------------------------------------------------------- 1 | $def with (content) 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | News Articles - Search 14 | 15 | 16 | 17 | 26 |
27 | 28 |

29 | 30 | $:content 31 | 32 |
33 | 34 | 35 | -------------------------------------------------------------------------------- /finished-product/resources/create_fields.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ./solr-6.6.0/bin/solr delete -c simpleindex 4 | ./solr-6.6.0/bin/solr delete -c entityawareindex 5 | ./solr-6.6.0/bin/solr restart 6 | ./solr-6.6.0/bin/solr create -c simpleindex 7 | ./solr-6.6.0/bin/solr create -c entityawareindex 8 | 9 | curl -X POST -H 'Content-type:application/json' --data-binary '{ 10 | "add-field-type" : { 11 | "name":"simple_indexed_text", 12 | "class":"solr.TextField", 13 | "positionIncrementGap":"100", 14 | "analyzer" : { 15 | "tokenizer":{ 16 | "class":"solr.WhitespaceTokenizerFactory" } 17 | }} 18 | }' http://localhost:8983/solr/simpleindex/schema 19 | 20 | curl -X POST -H 'Content-type:application/json' --data-binary '{ 21 | "add-dynamic-field":{ 22 | "name":"_news_*", 23 | "type":"simple_indexed_text", 24 | "indexed":true, 25 | "stored":true } 26 | }' http://localhost:8983/solr/simpleindex/schema 27 | 28 | curl -X POST -H 'Content-type:application/json' --data-binary '{ 29 | "add-field-type" : { 30 | "name":"simple_indexed_text", 31 | "class":"solr.TextField", 32 | "positionIncrementGap":"100", 33 | "analyzer" : { 34 | "tokenizer":{ 35 | "class":"solr.WhitespaceTokenizerFactory" } 36 | }} 37 | }' http://localhost:8983/solr/entityawareindex/schema 38 | 39 | curl -X POST -H 'Content-type:application/json' --data-binary '{ 40 | "add-dynamic-field":{ 41 | "name":"_news_*", 42 | "type":"simple_indexed_text", 43 | "indexed":true, 44 | "stored":true } 45 | }' http://localhost:8983/solr/entityawareindex/schema 46 | -------------------------------------------------------------------------------- /assignments/assignment2/excercise/src/frontend/templates/layout.html: -------------------------------------------------------------------------------- 1 | $def with (content) 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | News Articles - Search 14 | 15 | 16 | 17 | 27 |
28 | 29 |

30 | 31 | $:content 32 | 33 |
34 | 35 | 36 | -------------------------------------------------------------------------------- /finished-product/src/backend/understand_data.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | import argparse 4 | import csv 5 | 6 | from prettytable import PrettyTable 7 | import pandas as pd 8 | 9 | 10 | HEADERS = ["ID", "TITLE", "URL", "PUBLISHER", "CATEGORY", "STORY", "HOSTNAME", "TIMESTAMP"] 11 | 12 | def run(input_file, num_records): 13 | 14 | # printing data in a structured form 15 | with open(input_file) as csvfile: 16 | csvreader = csv.reader(csvfile, delimiter=b'\t') 17 | pretty_table = PrettyTable() 18 | pretty_table.field_names = HEADERS 19 | for count, row in enumerate(csvreader): 20 | if count == num_records: 21 | break 22 | pretty_table.add_row(row) 23 | print pretty_table 24 | 25 | raw_input('Enter to continue: ') 26 | # analyze the dataset 27 | dataset = pd.read_csv(input_file, nrows=500000, sep=b'\t', names=HEADERS) 28 | print 'Shape of the dataset - %s' % (dataset.shape,) 29 | 30 | raw_input('Enter to continue: ') 31 | print 'Category values in the dataset -' 32 | print(dataset.CATEGORY.value_counts()) 33 | 34 | raw_input('Enter to continue: ') 35 | print 'Most common hostnames in the dataset -' 36 | print(dataset.HOSTNAME.value_counts()[dataset.HOSTNAME.value_counts() > 50]) 37 | 38 | 39 | 40 | 41 | def main(): 42 | arg_parser = argparse.ArgumentParser() 43 | arg_parser.add_argument('--input', action='store', dest='input', ) 44 | arg_parser.add_argument('--num_records', action='store', dest='num_records', default=10) 45 | args = arg_parser.parse_args() 46 | run(input_file=args.input, num_records=args.num_records) 47 | 48 | 49 | if __name__ == '__main__': 50 | main() -------------------------------------------------------------------------------- /assignments/assignment1/exercise/src/understand_data.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | import argparse 4 | import csv 5 | 6 | from prettytable import PrettyTable 7 | import pandas as pd 8 | 9 | 10 | HEADERS = ["ID", "TITLE", "URL", "PUBLISHER", "CATEGORY", "STORY", "HOSTNAME", "TIMESTAMP"] 11 | 12 | def run(input_file, num_records): 13 | 14 | # printing data in a structured form 15 | with open(input_file) as csvfile: 16 | csvreader = csv.reader(csvfile, delimiter=b'\t') 17 | pretty_table = PrettyTable() 18 | pretty_table.field_names = HEADERS 19 | for count, row in enumerate(csvreader): 20 | if count == num_records: 21 | break 22 | pretty_table.add_row(row) 23 | print pretty_table 24 | 25 | raw_input('Enter to continue: ') 26 | # analyze the dataset 27 | dataset = pd.read_csv(input_file, nrows=500000, sep=b'\t', names=HEADERS) 28 | print 'Shape of the dataset - %s' % (dataset.shape,) 29 | 30 | raw_input('Enter to continue: ') 31 | print 'Category values in the dataset -' 32 | print(dataset.CATEGORY.value_counts()) 33 | 34 | raw_input('Enter to continue: ') 35 | print 'Most common hostnames in the dataset -' 36 | print(dataset.HOSTNAME.value_counts()[dataset.HOSTNAME.value_counts() > 50]) 37 | 38 | 39 | 40 | 41 | def main(): 42 | arg_parser = argparse.ArgumentParser() 43 | arg_parser.add_argument('--input', action='store', dest='input', ) 44 | arg_parser.add_argument('--num_records', action='store', dest='num_records', default=10) 45 | args = arg_parser.parse_args() 46 | run(input_file=args.input, num_records=args.num_records) 47 | 48 | 49 | if __name__ == '__main__': 50 | main() -------------------------------------------------------------------------------- /assignments/assignment1/exercise/src/simple_index.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from subprocess import call 4 | import argparse 5 | import csv 6 | import pysolr 7 | 8 | INDEX_NAME = 'simpleindex' 9 | INDEX_MAP = ["id", "title", "url", "publisher", "category", "story", "hostname", "timestamp"] 10 | SOLR_URL = 'http://localhost:8983/solr' 11 | 12 | 13 | def create_document(record): 14 | """ 15 | This function creates a representation for the document to be 16 | put in the solr index. 17 | """ 18 | # TODO: Write an iterator over the INDEX_MAP to fetch fields from the record and 19 | # return a dictionary representing the document. 20 | return None 21 | 22 | 23 | def index(input_file, num_records): 24 | """ 25 | Creates a representation of the document and puts the document 26 | in the solr index. The index name is defined as a part of the url. 27 | """ 28 | solr_interface = pysolr.Solr(url="%s/%s" % (SOLR_URL, INDEX_NAME)) 29 | with open(input_file) as csvfile: 30 | records = csv.reader(csvfile, delimiter=b'\t') 31 | batched_documents = [] 32 | for idx, record in enumerate(records): 33 | if idx == num_records: 34 | break 35 | # TODO: Write code for creating the document and passing it for indexing 36 | # Commit the changes to the index after adding the documents 37 | solr_interface.commit() 38 | print 'Finished adding the documents to the solr index' 39 | return 40 | 41 | 42 | def main(): 43 | arg_parser = argparse.ArgumentParser() 44 | arg_parser.add_argument('--input', action='store', dest='input', ) 45 | arg_parser.add_argument('--num_records', action='store', dest='num_records', default=250000) 46 | args = arg_parser.parse_args() 47 | index(input_file=args.input, num_records=args.num_records) 48 | 49 | if __name__ == '__main__': 50 | main() 51 | -------------------------------------------------------------------------------- /assignments/assignment1/exercise/src/frontend/app.py: -------------------------------------------------------------------------------- 1 | import web 2 | import pysolr 3 | import json 4 | from nltk.tokenize import word_tokenize 5 | 6 | urls = ( 7 | '/', 'SimpleIndexSearchPage', 8 | '/searchSimpleIndex', 'SearchSimpleIndex', 9 | ) 10 | 11 | CATEGORY = {'b': 'Business', 'e': 'Entertainment', 't': 'Science and Technology', 'm': 'Health'} 12 | render = web.template.render('templates/', base='layout') 13 | SOLR_SIMPLEINDEX = pysolr.Solr('http://localhost:8983/solr/simpleindex') 14 | 15 | 16 | def get_web_input(web_input): 17 | draw = web_input['draw'] 18 | query = web_input['search[value]'] 19 | offset = web_input['start'] 20 | count = web_input['length'] 21 | return draw, query, offset, count 22 | 23 | 24 | def search(query, offset, count, draw, solr_endpoint): 25 | """ 26 | This function is responsible for hitting the solr endpoint 27 | and returning the results back. 28 | """ 29 | results = solr_endpoint.search(q=query, **{ 30 | 'start': int(offset), 31 | 'rows': int(count) 32 | }) 33 | print("Saw {0} result(s) for query {1}.".format(len(results), query)) 34 | formatted_hits = [] 35 | for hit in results.docs: 36 | formatted_hits.append( 37 | [hit['_news_title'], hit['_news_publisher'], CATEGORY[hit['_news_category'][0]], hit['_news_url']]) 38 | response = {'draw': draw, 39 | 'recordsFiltered': results.hits, 40 | 'data': formatted_hits} 41 | web.header('Content-Type', 'application/json') 42 | return json.dumps(response) 43 | 44 | 45 | class SimpleIndexSearchPage: 46 | def GET(self): 47 | return render.simpleIndexSearchPage() 48 | 49 | 50 | class SearchSimpleIndex: 51 | def GET(self): 52 | draw, query, offset, count = get_web_input(web_input=web.input()) 53 | # TODO: Write code for handling the empty query (no keywords) 54 | # TODO: Write code for tokenizing the search query and creating must clauses for each token 55 | return None 56 | 57 | 58 | 59 | if __name__ == "__main__": 60 | app = web.application(urls, globals()) 61 | app.run() 62 | -------------------------------------------------------------------------------- /finished-product/src/indexing/simple_index.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from subprocess import call 4 | import argparse 5 | import csv 6 | import pysolr 7 | 8 | INDEX_NAME = 'simpleindex' 9 | INDEX_MAP = ["ID", "TITLE", "URL", "PUBLISHER", "CATEGORY", "STORY", "HOSTNAME", "TIMESTAMP"] 10 | SOLR_URL = 'http://localhost:8983/solr' 11 | 12 | 13 | def create_document(record): 14 | """ 15 | This function creates a representation for the document to be 16 | put in the solr index. 17 | """ 18 | document = {} 19 | for idx, field in enumerate(INDEX_MAP): 20 | if field.lower() == 'id': 21 | document[field.lower()] = record[idx] 22 | else: 23 | document["_news_%s" % (field.lower())] = record[idx].lower() 24 | return document 25 | 26 | 27 | def index(input_file, num_records): 28 | """ 29 | Creates a representation of the document and puts the document 30 | in the solr index. The index name is defined as a part of the url. 31 | """ 32 | 33 | # create the solr core 34 | call(["./../../resources/solr-6.6.0/bin/solr", "create", "-c", INDEX_NAME]) 35 | 36 | solr_interface = pysolr.Solr(url="%s/%s" % (SOLR_URL, INDEX_NAME)) 37 | with open(input_file) as csvfile: 38 | records = csv.reader(csvfile, delimiter=b'\t') 39 | batched_documents = [] 40 | for idx, record in enumerate(records): 41 | if idx == num_records: 42 | break 43 | if idx % 5000 == 0: 44 | solr_interface.add(batched_documents) 45 | batched_documents = [] 46 | print 'Added %d documents to the %s index' % (idx, INDEX_NAME) 47 | batched_documents.append(create_document(record)) 48 | # Commit the changes to the index after adding the documents 49 | solr_interface.commit() 50 | print 'Finished adding the documents to the solr index' 51 | return 52 | 53 | 54 | def main(): 55 | arg_parser = argparse.ArgumentParser() 56 | arg_parser.add_argument('--input', action='store', dest='input', ) 57 | arg_parser.add_argument('--num_records', action='store', dest='num_records', default=250000) 58 | args = arg_parser.parse_args() 59 | index(input_file=args.input, num_records=args.num_records) 60 | 61 | if __name__ == '__main__': 62 | main() -------------------------------------------------------------------------------- /assignments/assignment2/excercise/src/entity_aware_index.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from itertools import izip 3 | import argparse 4 | import json 5 | import csv 6 | import pysolr 7 | import gzip 8 | 9 | INDEX_NAME = 'entityawareindex' 10 | INDEX_MAP = ["id", "title", "url", "publisher", "category", "story", "hostname", "timestamp"] 11 | SOLR_URL = 'http://localhost:8983/solr' 12 | 13 | # Location, Time, Person, Organization, Money, Percent, Date (Stanford NER) 14 | 15 | def create_document(record, ner_tags): 16 | """ 17 | This function creates a representation for the document to be 18 | put in the solr index. 19 | """ 20 | # TODO: Write an iterator over the INDEX_MAP to fetch fields from the record and 21 | # return a dictionary representing the document. 22 | return None 23 | 24 | 25 | def index(input_file, ner_tags_filename, num_records): 26 | """ 27 | Creates a representation of the document and puts the document 28 | in the solr index. The index name is defined as a part of the url. 29 | """ 30 | 31 | solr_interface = pysolr.Solr(url="%s/%s" % (SOLR_URL, INDEX_NAME)) 32 | with open(input_file) as csvfile, gzip.open(ner_tags_filename) as ner_tags_file: 33 | records = csv.reader(csvfile, delimiter=b'\t') 34 | ner_tags = csv.reader(ner_tags_file, delimiter=b'\t') 35 | batched_documents = [] 36 | for idx, (record, ner_tag_serialized) in enumerate(izip(records, ner_tags)): 37 | if idx == num_records: 38 | break 39 | # ner_tag is a dictionary of the form {"ORGANIZATION": ["Omega", "PayPal", "CNBC"], "PERSON": ["Cooperman"]} 40 | ner_tag = json.loads(ner_tag_serialized[1]) 41 | # TODO: Write code for creating the document and passing it for indexing 42 | # Commit the changes to the index after adding the documents 43 | solr_interface.commit() 44 | print 'Finished adding the documents to the solr index' 45 | return 46 | 47 | 48 | def main(): 49 | arg_parser = argparse.ArgumentParser() 50 | arg_parser.add_argument('--input', action='store', dest='input', ) 51 | arg_parser.add_argument('--ner_tags', action='store', dest='ner_tags', ) 52 | arg_parser.add_argument('--num_records', action='store', dest='num_records', default=250000) 53 | args = arg_parser.parse_args() 54 | index(input_file=args.input, ner_tags_filename=args.ner_tags, num_records=args.num_records) 55 | 56 | if __name__ == '__main__': 57 | main() 58 | -------------------------------------------------------------------------------- /finished-product/src/frontend/templates/simpleIndexSearchPage.html: -------------------------------------------------------------------------------- 1 |
2 |
3 |
4 |
5 |

Indexing and Basic Search

6 |
7 | 13 |
14 |
15 |
16 | 17 |
18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 |
Search ResultsPublisherCategoryUrl
31 |
32 | 33 | -------------------------------------------------------------------------------- /finished-product/src/frontend/templates/entityAwareSearchPage.html: -------------------------------------------------------------------------------- 1 |
2 |
3 |
4 |
5 |

Entity Aware Index and Search

6 |
7 | 13 |
14 |
15 |
16 | 17 |
18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 |
Search ResultsPublisherCategoryUrl
31 |
32 | 33 | -------------------------------------------------------------------------------- /assignments/assignment1/exercise/src/frontend/templates/simpleIndexSearchPage.html: -------------------------------------------------------------------------------- 1 |
2 |
3 |
4 |
5 |

Indexing and Basic Search

6 |
7 | 13 |
14 |
15 |
16 | 17 |
18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 |
Search ResultsPublisherCategoryUrl
31 |
32 | 33 | -------------------------------------------------------------------------------- /assignments/assignment2/excercise/src/frontend/templates/simpleIndexSearchPage.html: -------------------------------------------------------------------------------- 1 |
2 |
3 |
4 |
5 |

Indexing and Basic Search

6 |
7 | 13 |
14 |
15 |
16 | 17 |
18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 |
Search ResultsPublisherCategoryUrl
31 |
32 | 33 | -------------------------------------------------------------------------------- /assignments/assignment2/excercise/src/frontend/templates/entityAwareSearchPage.html: -------------------------------------------------------------------------------- 1 |
2 |
3 |
4 |
5 |

Entity Aware Index and Search

6 |
7 | 13 |
14 |
15 |
16 | 17 |
18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 |
Search ResultsPublisherCategoryUrl
31 |
32 | 33 | -------------------------------------------------------------------------------- /finished-product/src/backend/nlp.py: -------------------------------------------------------------------------------- 1 | # from __future__ import unicode_literals 2 | import argparse 3 | from tqdm import tqdm 4 | import ner 5 | from collections import defaultdict 6 | from itertools import groupby 7 | from operator import itemgetter 8 | from nltk.tokenize import StanfordTokenizer 9 | from nltk.tag.stanford import StanfordNERTagger 10 | from sner import Ner 11 | import json 12 | import csv 13 | import sys 14 | reload(sys) 15 | sys.setdefaultencoding('utf8') 16 | 17 | 18 | INDEX_MAP = ["ID", "TITLE", "URL", "PUBLISHER", "CATEGORY", "STORY", "HOSTNAME", "TIMESTAMP"] 19 | 20 | # Location, Time, Person, Organization, Money, Percent, Date 21 | CASELESS_CLASSIFIER = '/usr/share/stanford-ner/classifiers/english.muc.7class.caseless.distsim.crf.ser.gz' 22 | 23 | # To use the Stanford NER server run the following command in the stanford-ner directory 24 | ''' 25 | java -Xmx3g -Djava.ext.dirs=./lib -cp stanford-ner.jar edu.stanford.nlp.ie.NERServer -port 9199 -loadClassifier 26 | ./classifiers/english.muc.7class.distsim.crf.ser.gz -tokenizerFactory edu.stanford.nlp.process.WhitespaceTokenizer 27 | -tokenizerOptions tokenizeNLs=false 28 | ''' 29 | STANFORD_NER_HANDLER = Ner(host='localhost',port=9199) 30 | 31 | 32 | def accumulate(list_of_tuples): 33 | tokens, entities = zip(*list_of_tuples) 34 | recognised = defaultdict(set) 35 | duplicates = defaultdict(list) 36 | 37 | for i, item in enumerate(entities): 38 | duplicates[item].append(i) 39 | 40 | for key, value in duplicates.items(): 41 | for k, g in groupby(enumerate(value), lambda x: x[0] - x[1]): 42 | indices = list(map(itemgetter(1), g)) 43 | recognised[key].add(' '.join(tokens[index] for index in indices)) 44 | recognised.pop('O', None) 45 | 46 | recognised = dict(recognised) 47 | ner_info = {} 48 | for key, value in recognised.iteritems(): 49 | ner_info[key] = list(value) 50 | return ner_info 51 | 52 | 53 | def _generate_ner_tags(sentence): 54 | token_entity_pairs = STANFORD_NER_HANDLER.get_entities(sentence) 55 | accumulated = accumulate(token_entity_pairs) 56 | return accumulated 57 | 58 | 59 | def generate_ner_tags(input_file, output_file, num_records): 60 | with open(input_file) as csvfile: 61 | with open(output_file, 'w+') as csvoutputfile: 62 | records = csv.reader(csvfile, delimiter=b'\t') 63 | for idx, record in tqdm(enumerate(records)): 64 | if idx == num_records: 65 | break 66 | title = record[1] 67 | ner_tags = _generate_ner_tags(title) 68 | csvoutputfile.write("%s\t%s" % (record[0], json.dumps(ner_tags))) 69 | csvoutputfile.write("\n") 70 | csvoutputfile.flush() 71 | 72 | 73 | 74 | 75 | def main(): 76 | arg_parser = argparse.ArgumentParser() 77 | arg_parser.add_argument('--input', action='store', dest='input', ) 78 | arg_parser.add_argument('--output', action='store', dest='output', ) 79 | arg_parser.add_argument('--num_records', action='store', dest='num_records', default=500000) 80 | args = arg_parser.parse_args() 81 | generate_ner_tags(input_file=args.input, output_file=args.output, num_records=args.num_records) 82 | 83 | if __name__ == '__main__': 84 | main() 85 | -------------------------------------------------------------------------------- /finished-product/src/indexing/entity_aware_index.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from subprocess import call 4 | from itertools import izip 5 | import argparse 6 | import json 7 | import csv 8 | import pysolr 9 | import gzip 10 | 11 | INDEX_NAME = 'entityawareindex' 12 | INDEX_MAP = ["ID", "TITLE", "URL", "PUBLISHER", "CATEGORY", "STORY", "HOSTNAME", "TIMESTAMP"] 13 | SOLR_URL = 'http://localhost:8983/solr' 14 | 15 | # Location, Time, Person, Organization, Money, Percent, Date (Stanford NER) 16 | 17 | # Person, Norp (Nationalities or religious or political groups.), Facility, Org, GPE (Countries, cities, states.) 18 | # Loc (Non GPE Locations ex. mountain ranges, water), Product (Objects, vehicles, foods, etc. (Not services.), 19 | # EVENT (Named hurricanes, battles, wars, sports events, etc.), WORK_OF_ART (Titles of books, songs, etc), LANGUAGE 20 | # Refer to https://spacy.io/docs/usage/entity-recognition (SPACY NER) 21 | 22 | 23 | def create_document(record, ner_tags): 24 | """ 25 | This function creates a representation for the document to be 26 | put in the solr index. 27 | """ 28 | document = {} 29 | for idx, field in enumerate(INDEX_MAP): 30 | if field.lower() == 'id': 31 | document[field.lower()] = record[idx] 32 | elif field.lower() == 'title': 33 | for ner_tag, tokens in ner_tags.iteritems(): 34 | # generate field like title_ ex. title_person, title_organization etc. 35 | document["_news_%s_%s" % (field.lower(), ner_tag.lower())] \ 36 | = " ".join(map(lambda x:x.lower(), tokens)) 37 | document["_news_%s" % (field.lower())] = record[idx].lower() 38 | else: 39 | document["_news_%s" % (field.lower())] = record[idx].lower() 40 | return document 41 | 42 | 43 | def index(input_file, ner_tags_filename, num_records): 44 | """ 45 | Creates a representation of the document and puts the document 46 | in the solr index. The index name is defined as a part of the url. 47 | """ 48 | 49 | # create the solr core 50 | call(["./../../resources/solr-6.6.0/bin/solr", "create", "-c", INDEX_NAME]) 51 | 52 | solr_interface = pysolr.Solr(url="%s/%s" % (SOLR_URL, INDEX_NAME)) 53 | with open(input_file) as csvfile, gzip.open(ner_tags_filename) as ner_tags_file: 54 | records = csv.reader(csvfile, delimiter=b'\t') 55 | ner_tags = csv.reader(ner_tags_file, delimiter=b'\t') 56 | batched_documents = [] 57 | for idx, (record, ner_tag_serialized) in enumerate(izip(records, ner_tags)): 58 | if idx == num_records: 59 | break 60 | 61 | if len(record) != 8: 62 | continue 63 | 64 | if idx % 5000 == 0: 65 | solr_interface.add(batched_documents) 66 | batched_documents = [] 67 | print 'Added %d documents to the %s index' % (idx, INDEX_NAME) 68 | ner_tag = json.loads(ner_tag_serialized[1]) 69 | batched_documents.append(create_document(record, ner_tag)) 70 | # Commit the changes to the index after adding the documents 71 | solr_interface.commit() 72 | print 'Finished adding the documents to the solr index' 73 | return 74 | 75 | 76 | def main(): 77 | arg_parser = argparse.ArgumentParser() 78 | arg_parser.add_argument('--input', action='store', dest='input', ) 79 | arg_parser.add_argument('--ner_tags', action='store', dest='ner_tags', ) 80 | arg_parser.add_argument('--num_records', action='store', dest='num_records', default=250000) 81 | args = arg_parser.parse_args() 82 | index(input_file=args.input, ner_tags_filename=args.ner_tags, num_records=args.num_records) 83 | 84 | if __name__ == '__main__': 85 | main() -------------------------------------------------------------------------------- /assignments/assignment2/README.md: -------------------------------------------------------------------------------- 1 | # Assignment 2! 2 | 3 | In this assignment we will be building a better index with fields specific to the entities recognized in the title. We will make use of Stanford NER along NLTK. In addition to building the index we will work on utilizing entities in the incoming query and writing a field specific query matching entities in the query with the fields containing those entities in the search index. 4 | 5 | ## Building the Search Index 6 | We will use our learnings from the previous assignment to build the entity based search index. To utilize the time better we have pregenerated the entity tags using the **Stanford NER** library and **english.all.3class.distsim.crf.ser.gz** classifier. The classifier provides three tags namely 7 | 8 | * *PERSON* 9 | * *ORGANIZATION* 10 | * *LOCATION* 11 | 12 | When building the index we will read through the tags and our dataset simultaneously. This will allow us to use the pregenerated tags when building the document to be indexed. 13 | 14 | Open the file **entity\_aware\_index.py** and you will need to implement the following parts 15 | 16 | * Writing the function for creating the document. Similar to **Assignment 1** we will be building a dictionary with all the index fields. Our focus here will be to add additional title fields specifically for the NER tags. The specific fields \_news\_title\_person, \_news\_title\_organization and \_news\_title\_location need to be added in addition to \_news\_title. 17 | 18 | ~~~ 19 | Your documents should have a structure similar to the one below - 20 | 21 | { 22 | "_news_url": "http://www.ifamagazine.com/news/us-open-stocks-fall-after-fed-official-hints-at-accelerated-tapering-294436", 23 | "_news_title_organization": "Fed", 24 | "_news_title": "us open: stocks fall after fed official hints at accelerated tapering", 25 | "_news_story": "dduyu0vzz0brnemioxupqvp6sixvm", 26 | "_news_category": "b", 27 | "_news_hostname": "www.ifamagazine.com", 28 | "_news_publisher": "ifa magazine", 29 | "_news_timestamp": "1394470371550", 30 | "id": "3", 31 | "_news_title_location": "US" 32 | } 33 | ~~~ 34 | 35 | * The second task involves writing code for addition a document to the Solr index. You can reuse your code from **Assignment 1** here. 36 | 37 | To begin indexing follow the commands listed below, you need to be in the **assignment2** folder for running the commands. 38 | 39 | ~~~ 40 | cd exercise/src 41 | python entity_aware_index.py --input /home/sigir/workspace/candidate-selection-tutorial/finished-product/data/news-aggregator-dataset/newsCorpora.csv --ner_tags /home/sigir/workspace/candidate-selection-tutorial/finished-product/resources/stanford_title_ner_tags_case_sensitive.csv.gz 42 | ~~~ 43 | 44 | ## Query Rewriting and Searching 45 | In this task we will define a query that is matched with the document on specific fields. We will make use of our entity understanding and utilize the **Stanford NER** server at runtime to generate tags. 46 | 47 | As next step open the file **frontend/app.py**. This is our middle tier that does serves the search requests and talks to the search backend. In this file you need to write the **GET** method of **SearchEntityAwareIndex** class. 48 | 49 | * **Match All Query** - Similar to **Assignment 1** add the logic to serve results when the query is empty. 50 | * **Entity & Field Based Query** - For queries with keywords we will make use of the catch all field in solr. All content to be indexed in a predefined "catch-all" \_text\_ field, to enable single-field search that includes all fields' content. The query should look of the form: 51 | 52 | ~~~ 53 | Query: cooperman paypal 54 | Tokens: ['cooperman', 'paypal'] 55 | NER Tags: {"ORGANIZATION": ["PayPal"], "PERSON": ["Cooperman"]} 56 | Generated Query: _news_title_organization:paypal AND _news_title_person:Cooperman 57 | ~~~ 58 | 59 | #### Helper Code Snippets 60 | * Calling the Stanford NER server to get NER tags, accumulate_tags function in SearchEntityAwareIndex is provided for aggregating the NER tags. 61 | 62 | ~~~ 63 | entity_tags = STANFORD_NER_SERVER.get_entities(query) 64 | entity_tags = self.accumulate_tags(entity_tags) 65 | ~~~ 66 | 67 | * Boosting Paramter - You can pass in an optional boosting parameter of the form to boost matches in certain fields. Example 68 | 69 | ~~~ 70 | qf = '_news_title_person^10 _news_title_organization^5 _news_title_location^100 _news_title^2.0 _news_publisher^10.0' 71 | 72 | ~~~ 73 | 74 | ### Running the server 75 | To see the search in action follow the commands below: 76 | 77 | ~~~ 78 | cd frontend 79 | python app.py 80 | ~~~ 81 | 82 | This should run the simple index search server on [http://0.0.0.0:8080](http://0.0.0.0:8080). The page should look like the image below. Try out some queries to see if you are getting results back. 83 | 84 | Congratulations you have completed assignment 2! 85 | 86 | 87 | 88 | 89 | 90 | -------------------------------------------------------------------------------- /assignments/assignment2/excercise/src/frontend/app.py: -------------------------------------------------------------------------------- 1 | from itertools import groupby 2 | from operator import itemgetter 3 | 4 | import web 5 | import pysolr 6 | import string 7 | import json 8 | 9 | from collections import defaultdict 10 | from nltk.corpus import stopwords 11 | from nltk.tokenize import word_tokenize 12 | from sner import Ner 13 | 14 | urls = ( 15 | '/', 'SimpleIndexSearchPage', 16 | '/entityAwareSearchPage', 'EntityAwareSearch', 17 | '/searchSimpleIndex', 'SearchSimpleIndex', 18 | '/searchEntityAwareIndex', 'SearchEntityAwareIndex', 19 | '/searchEntityAwareWithEfficientQuery', 'SearchEntityAwareWithEfficientQuery' 20 | ) 21 | 22 | CATEGORY = {'b': 'Business', 'e': 'Entertainment', 't': 'Science and Technology', 'm': 'Health'} 23 | render = web.template.render('templates/', base='layout') 24 | SOLR_SIMPLEINDEX = pysolr.Solr('http://localhost:8983/solr/simpleindex') 25 | SOLR_ENTITYAWAREINDEX = pysolr.Solr('http://localhost:8983/solr/entityawareindex') 26 | STANFORD_NER_SERVER = Ner(host='localhost', port=9199) 27 | 28 | 29 | def get_web_input(web_input): 30 | draw = web_input['draw'] 31 | query = web_input['search[value]'] 32 | if len(query) == 0: 33 | query = '*:*' 34 | offset = web_input['start'] 35 | count = web_input['length'] 36 | return draw, query, offset, count 37 | 38 | 39 | def search_simple_index(query, offset, count, draw): 40 | """ 41 | This function is responsible for hitting the solr endpoint 42 | and returning the results back. 43 | """ 44 | results = SOLR_SIMPLEINDEX.search(q=query, **{ 45 | 'start': int(offset), 46 | 'rows': int(count) 47 | }) 48 | print("Saw {0} result(s) for query {1}.".format(len(results), query)) 49 | formatted_hits = [] 50 | for hit in results.docs: 51 | formatted_hits.append( 52 | [hit['_news_title'], hit['_news_publisher'], CATEGORY[hit['_news_category'][0]], hit['_news_url']]) 53 | response = {'draw': draw, 54 | 'recordsFiltered': results.hits, 55 | 'data': formatted_hits} 56 | web.header('Content-Type', 'application/json') 57 | return json.dumps(response) 58 | 59 | 60 | def search_entity_aware_index(query, offset, count, draw, qf): 61 | """ 62 | This function is responsible for hitting the solr endpoint 63 | and returning the results back. 64 | """ 65 | results = SOLR_ENTITYAWAREINDEX.search(q=query, **{ 66 | 'start': int(offset), 67 | 'rows': int(count), 68 | 'qf': qf 69 | }) 70 | print("Saw {0} result(s) for query {1}.".format(len(results), query)) 71 | formatted_hits = [] 72 | for hit in results.docs: 73 | formatted_hits.append( 74 | [hit['_news_title'], hit['_news_publisher'], CATEGORY[hit['_news_category'][0]], hit['_news_url']]) 75 | response = {'draw': draw, 76 | 'recordsFiltered': results.hits, 77 | 'data': formatted_hits} 78 | web.header('Content-Type', 'application/json') 79 | return json.dumps(response) 80 | 81 | 82 | class SimpleIndexSearchPage: 83 | def GET(self): 84 | return render.simpleIndexSearchPage() 85 | 86 | 87 | class EntityAwareSearch: 88 | def GET(self): 89 | return render.entityAwareSearchPage() 90 | 91 | 92 | class SearchSimpleIndex: 93 | def GET(self): 94 | draw, query, offset, count = get_web_input(web_input=web.input()) 95 | 96 | if query == '*:*': 97 | return search_simple_index(query=query, offset=offset, count=count, draw=draw) 98 | 99 | clauses = [] 100 | for token in word_tokenize(query): 101 | clauses.append("+_text_:%s" % token) 102 | query = " AND ".join(clauses) 103 | return search_simple_index(query=query, offset=offset, count=count, draw=draw) 104 | 105 | 106 | class SearchEntityAwareIndex: 107 | def accumulate_tags(self, list_of_tuples): 108 | tokens, entities = zip(*list_of_tuples) 109 | recognised = defaultdict(set) 110 | duplicates = defaultdict(list) 111 | 112 | for i, item in enumerate(entities): 113 | duplicates[item].append(i) 114 | 115 | for key, value in duplicates.items(): 116 | for k, g in groupby(enumerate(value), lambda x: x[0] - x[1]): 117 | indices = list(map(itemgetter(1), g)) 118 | recognised[key].add(' '.join(tokens[index] for index in indices)) 119 | # recognised.pop('O', None) 120 | 121 | recognised = dict(recognised) 122 | ner_info = {} 123 | for key, value in recognised.iteritems(): 124 | ner_info[key] = list(value) 125 | return ner_info 126 | 127 | 128 | def GET(self): 129 | draw, query, offset, count = get_web_input(web_input=web.input()) 130 | 131 | # TODO: Write code for handling the empty query (no keywords) 132 | # TODO: Write code for tokenizing the search query 133 | # TODO: Use the Stanford NER server to get NER tags for the query 134 | # TODO: Write out the candidate query with NER index fields 135 | # TODO: Define the boosting parameters for different NER index field matches 136 | 137 | return None 138 | 139 | 140 | if __name__ == "__main__": 141 | app = web.application(urls, globals()) 142 | app.run() -------------------------------------------------------------------------------- /assignments/assignment1/README.md: -------------------------------------------------------------------------------- 1 | # Assignment 1! 2 | 3 | In this assignment we will take a look at our dataset, the fields available and setup a basic search index using solr. We will read through the dataset and build our search index. Once we have the index setup we will write a very simple query to retrieve search results and show them on the ui. 4 | 5 | ## Exploring the News Aggregator Dataset 6 | The first step of building a search index is to understand the dataset and the fields that you want to allow the user to search on. To do this we will read a first few records of the dataset into a tabular form. This will allow us to understand how does the data look like. 7 | 8 | This analysis allows us to understand to what degree are the following tasks needed 9 | 10 | * Tokenization and Segmentation 11 | * Term Normalization 12 | * Data transformation 13 | 14 | We have provided you with a basic script that prints out the data in a tabular form along with some statistics about field values. 15 | 16 | To run the script issue the following command or run it with arguments from PyCharm. 17 | 18 | ~~~ 19 | cd ~/workspace/candidate-selection-tutorial/assignments/assignment1/exercise/src 20 | python understand_data.py --input /home/sigir/workspace/candidate-selection-tutorial/finished-product/data/news-aggregator-dataset/newsCorpora.csv 21 | ~~~ 22 | 23 | 24 | ## Building the Search Index 25 | Now that we have gotten a glimpse of the dataset we will proceed further to build out a simple search index on top of Solr which we can query. 26 | 27 | The goal here is make a baseline index that we will use to compare with another index that we will be building in the assignment later. 28 | 29 | We will be indexing the following fields in the search index. To get a broader understanding we will be using the minimal tokenization and analysis functionality from Solr. In **Assignment 0** when we setup Solr we made use of [**Solr Schema API**](https://lucene.apache.org/solr/guide/6_6/schema-api.html) to create a [**Dynamic Field**](https://lucene.apache.org/solr/guide/6_6/dynamic-fields.html) for storing our index fields. This will allow our indexed fields from the dataset to be tokenized and be searchable on the tokens. 30 | 31 | In the first command we define a simple **TextField** which is the type of field we want our data to be present in. 32 | 33 | In the next command we add a **dynamic field** as we prefix all fields from the dataset with \_news\_ prefix and declare it of type **simple_indexed_text** which we defined previously. 34 | 35 | 36 | ~~~ 37 | curl -X POST -H 'Content-type:application/json' --data-binary '{ 38 | "add-field-type" : { 39 | "name":"simple_indexed_text", 40 | "class":"solr.TextField", 41 | "positionIncrementGap":"100", 42 | "analyzer" : { 43 | "tokenizer":{ 44 | "class":"solr.WhitespaceTokenizerFactory" } 45 | }} 46 | }' http://localhost:8983/solr/simpleindex/schema 47 | 48 | curl -X POST -H 'Content-type:application/json' --data-binary '{ 49 | "add-dynamic-field":{ 50 | "name":"_news_*", 51 | "type":"simple_indexed_text", 52 | "indexed":true, 53 | "stored":true } 54 | }' http://localhost:8983/solr/simpleindex/schema 55 | 56 | ~~~ 57 | 58 | We will start by building a very basic document with prefix \_news\_ and utilizing **pysolr** for batch indexing the documents. 59 | 60 | As next steps open **simple_index.py** and complete the function for creating a document. This involves completing two steps 61 | 62 | * Writing the function for creating the document. **Pysolr** expects a dictionary containing all the fields to be put in the index. A special field with name **id** representing the unique id must be added. 63 | The fields to be added must be prefixed with \_news\_ to allow use of our custom TextField type. 64 | 65 | * The second task involves writing code for adding a document to the solr index. Frequent additions can be slow, consider utilizing batched addition of documents into Solr. 66 | 67 | Refer to [**pysolr documentation**](https://github.com/django-haystack/pysolr) 68 | 69 | Run the following command to start the indexing process. 70 | 71 | ~~~ 72 | python simple_index.py --input /home/sigir/workspace/candidate-selection-tutorial/finished-product/data/news-aggregator-dataset/newsCorpora.csv 73 | ~~~ 74 | 75 | ## Query Rewriting and Searching 76 | In this task we will connect our middletier and the frontend to the index. We will accept the query from the search front end, rewrite the query to search our index and send the results back to the frontend for displaying. 77 | 78 | As next step open the file **frontend/app.py**. This is our middle tier that does serves the search requests and talks to the search backend. In this file you need to write the **GET** method of **SearchSimpleIndex** class. 79 | 80 | * **Match All Query** - This query will be useful for serving queries with no keywords. Refer to [**solr documentation**](http://lucene.apache.org/solr/quickstart.html#searching) on how to construct it. 81 | * **Text Based Query** - For queries with keywords we will make use of the catch all field in solr. All content to be indexed in a predefined "catch-all" \_text\_ field, to enable single-field search that includes all fields' content. The query should look of the form: 82 | 83 | ~~~ 84 | Query: la times 85 | Tokens: ['la', 'times'] 86 | Generated Query: _text_:la AND _text_:times 87 | ~~~ 88 | 89 | ### Running the server 90 | To see the search in action follow the commands below: 91 | 92 | ~~~ 93 | cd frontend 94 | python app.py 95 | ~~~ 96 | 97 | This should run the simple index search server on [http://0.0.0.0:8080](http://0.0.0.0:8080). The page should look like the image below. Try out some queries to see if you are getting results back. 98 | 99 | Congratulations you have completed assignment 1! 100 | 101 | 102 | 103 | 104 | 105 | -------------------------------------------------------------------------------- /assignments/assignment0/README.md: -------------------------------------------------------------------------------- 1 | # Assignment 0! 2 | 3 | In this assignment we will be working to make sure our development environment has been set up completely. We will be working with opensource systems, links and documentation to each of the parts will be included for reference as well. 4 | 5 | We will also take a quick look at our open source dataset to familiarize ourselves with it. 6 | 7 | ## Building Blocks 8 | We look at the tools that will lay out the foundation for helping us build a full stack search engine capable of serving an opensource dataset. 9 | 10 | ### Python 11 | 12 | ![Python Logo](https://www.python.org/static/community_logos/python-powered-w-200x80.png) 13 | 14 | We will be working through our hands on tutorial utilizing [**Python 2.7**](https://www.python.org/download/releases/2.7/) as our primary coding language. Python has grown to become very mature and is very easy to use. There also exists a plethora of libraries contributed by academia, industry and enthusiasts. Along with good infrastructure support there is also very well documented libraries for data science and machine learning. 15 | 16 | In our tutorial we will be using the following dependencies - 17 | 18 | * [**PySolr**](https://github.com/django-haystack/pysolr) is a lightweight Python wrapper for Apache Solr. It provides an interface that queries the server and returns results based on the query. 19 | 20 | * [**NLTK**](http://www.nltk.org/) is a leading platform for building Python programs to work with human language data. It provides easy-to-use interfaces to over 50 corpora and lexical resources such as WordNet, along with a suite of text processing libraries for classification, tokenization, stemming, tagging, parsing, and semantic reasoning, wrappers for industrial-strength NLP libraries, and an active discussion forum. 21 | 22 | * [**Web.py**](http://webpy.org/) is a web framework for Python that is as simple as it is powerful. web.py is in the public domain; you can use it for whatever purpose with absolutely no restrictions. 23 | 24 | 25 | * [**Pandas**](http://pandas.pydata.org/) is an open source, BSD-licensed library providing high-performance, easy-to-use data structures and data analysis tools for the Python programming language. 26 | 27 | 28 | ### Solr 29 | 30 | ![Solr Logo](http://www.mcplusa.com/wp-content/uploads/2016/05/platform-solr-logo-330x200.png) 31 | 32 | [**Apache Solr**](http://lucene.apache.org/solr/) is an open source search platform built upon a Java library called Lucene. Solr is a popular search platform for Web sites because it can index and search multiple sites and return recommendations for related content based on the search query's taxonomy. 33 | 34 | 35 | ### Stanford CoreNLP 36 | [**Stanford CoreNLP**](https://stanfordnlp.github.io/CoreNLP/) provides a set of human language technology tools. It can give the base forms of words, their parts of speech, whether they are names of companies, people, etc., normalize dates, times, and numeric quantities, mark up the structure of sentences in terms of phrases and syntactic dependencies, indicate which noun phrases refer to the same entities, indicate sentiment, extract particular or open-class relations between entity mentions, get the quotes people said, etc. 37 | 38 | ### spaCy NLP 39 | [**spaCy NLP**](https://spacy.io/) excels at large-scale information extraction tasks. It's written from the ground up in carefully memory-managed Cython. 40 | 41 | ### Ubuntu Linux 42 | 43 | ## Dataset 44 | We will be using an open source News Aggregator Dataset. It references to news pages collected from a web aggregator in the period from 10-March-2014 to 10-August-2014. The resources are grouped into clusters that represent pages discussing the same story. 45 | 46 | Full details about the dataset can be found at [**UCI Machine Learning Repository - News Aggregator Dataset**](http://archive.ics.uci.edu/ml/datasets/News+Aggregator#) 47 | 48 | ####Acknowledgement 49 | Lichman, M. (2013). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, School of Information and Computer Science. 50 | 51 | 52 | 53 | ## Setup Instructions 54 | ####Download Ubuntu Virtual Machine 55 | 56 | The virtual machine is currently running Ubuntu and has all the dependencies setup for you. This is a good option if you do not want to corrupt or change things in python installed on your machine. 57 | 58 | 59 | * [**Virtual Box VM Download**](https://drive.google.com/open?id=0B1eBBrAnKVJlc1NQR3BVMkF2WjA) Gzip File 1.6G **or** 60 | * [**Virtual Box VM Download**](https://drive.google.com/open?id=0B1eBBrAnKVJlWFo0SlhHYmVidTg) 7z File 1.2G 61 | You will need to unzip this 7z - Click here to see instructions for [**Windows**](), [**Mac**](http://www.kekaosx.com/en/), [**Linux**](https://askubuntu.com/questions/341628/how-to-extract-tar-7z-files-from-command-line) 62 | 63 | 64 | * You will also need to download [**Virtual Box**](https://www.virtualbox.org/wiki/Downloads) to run the VM. 65 | * Username: **sigir** Password: **sigir2017** 66 | 67 | Open Terminal and follow the commands. These commands do the following tasks 68 | 69 | * Clone the repository from github 70 | * Setup the Python Virtual Environment with all dependencies 71 | * Download and extract the dataset 72 | * Setup a solr instance and create index schema for the dataset 73 | 74 | ~~~ 75 | cd ~/ 76 | mkdir workspace 77 | cd workspace 78 | git clone https://github.com/candidate-selection-tutorial-sigir2017/candidate-selection-tutorial.git 79 | source ~/.sigir-venv/bin/activate 80 | cd candidate-selection-tutorial 81 | cd assignments/assignment0/excercise 82 | ./python-env.sh 83 | ./get_dataset.sh 84 | ./setup_solr.sh 85 | ~~~ 86 | 87 | Open another Terminal window and start the **Stanford NER** server. This will be used in subsequent assignments for Named Entity Recognition. 88 | 89 | ~~~ 90 | java -Djava.ext.dirs=./lib -cp /home/sigir/workspace/candidate-selection-tutorial/finished-product/data/stanford-ner/stanford-ner.jar edu.stanford.nlp.ie.NERServer -port 9199 -loadClassifier /home/sigir/workspace/candidate-selection-tutorial/finished-product/data/stanford-ner/classifiers/english.all.3class.caseless.distsim.crf.ser.gz -tokenizerFactory edu.stanford.nlp.process.WhitespaceTokenizer -tokenizerOptions tokenizeNLs=false 91 | ~~~ 92 | 93 | Once you have completed the above steps your development environment has been setup and you are ready to proceed to Assignment 1! -------------------------------------------------------------------------------- /finished-product/src/frontend/app.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | from itertools import groupby 3 | from operator import itemgetter 4 | import web 5 | import pysolr 6 | import string 7 | import json 8 | from nltk.corpus import stopwords 9 | from nltk.tokenize import word_tokenize 10 | from nltk.corpus import wordnet as wn 11 | from sner import Ner 12 | 13 | urls = ( 14 | '/', 'SimpleIndexSearchPage', 15 | '/entityAwareSearchPage', 'EntityAwareSearch', 16 | '/searchSimpleIndex', 'SearchSimpleIndex', 17 | '/searchEntityAwareIndex', 'SearchEntityAwareIndex' 18 | ) 19 | 20 | CATEGORY = {'b': 'Business', 'e': 'Entertainment', 't': 'Science and Technology', 'm': 'Health'} 21 | render = web.template.render('templates/', base='layout') 22 | SOLR_SIMPLEINDEX = pysolr.Solr('http://localhost:8983/solr/simpleindex') 23 | SOLR_ENTITYAWAREINDEX = pysolr.Solr('http://localhost:8983/solr/entityawareindex') 24 | STANFORD_NER_SERVER = Ner(host='localhost', port=9199) 25 | 26 | 27 | def get_web_input(web_input): 28 | draw = web_input['draw'] 29 | query = web_input['search[value]'] 30 | if len(query) == 0: 31 | query = '*:*' 32 | offset = web_input['start'] 33 | count = web_input['length'] 34 | return draw, query, offset, count 35 | 36 | 37 | def search_simple_index(query, offset, count, draw): 38 | """ 39 | This function is responsible for hitting the solr endpoint 40 | and returning the results back. 41 | """ 42 | results = SOLR_SIMPLEINDEX.search(q=query, **{ 43 | 'start': int(offset), 44 | 'rows': int(count), 45 | 'cache': 'false' 46 | }) 47 | print("Saw {0} result(s) for query {1}.".format(len(results), query)) 48 | formatted_hits = [] 49 | for hit in results.docs: 50 | formatted_hits.append( 51 | [hit['_news_title'], hit['_news_publisher'], CATEGORY[hit['_news_category'][0]], hit['_news_url']]) 52 | response = {'draw': draw, 53 | 'recordsFiltered': results.hits, 54 | 'data': formatted_hits} 55 | web.header('Content-Type', 'application/json') 56 | return json.dumps(response) 57 | 58 | 59 | def search_entity_aware_index(query, offset, count, draw, qf, time_in_ms): 60 | """ 61 | This function is responsible for hitting the solr endpoint 62 | and returning the results back. 63 | """ 64 | results = SOLR_ENTITYAWAREINDEX.search(q=query, **{ 65 | 'start': int(offset), 66 | 'rows': int(count), 67 | 'segmentTerminatedEarly': 'true', 68 | 'timeAllowed': time_in_ms, 69 | 'cache': 'false', 70 | 'qf': qf, 71 | 'pf': qf, 72 | 'debugQuery': 'true', 73 | 'defType': 'edismax', 74 | 'ps': 10 75 | }) 76 | print("Saw {0} result(s) for query {1}.".format(len(results), query)) 77 | formatted_hits = [] 78 | print results.debug 79 | for hit in results.docs: 80 | formatted_hits.append( 81 | [hit['_news_title'], hit['_news_publisher'], CATEGORY[hit['_news_category'][0]], hit['_news_url']]) 82 | response = {'draw': draw, 83 | 'recordsFiltered': results.hits, 84 | 'data': formatted_hits} 85 | web.header('Content-Type', 'application/json') 86 | return json.dumps(response) 87 | 88 | 89 | class SimpleIndexSearchPage: 90 | def GET(self): 91 | return render.simpleIndexSearchPage() 92 | 93 | 94 | class EntityAwareSearch: 95 | def GET(self): 96 | return render.entityAwareSearchPage() 97 | 98 | 99 | class SearchSimpleIndex: 100 | def GET(self): 101 | draw, query, offset, count = get_web_input(web_input=web.input()) 102 | 103 | if query == '*:*': 104 | return search_simple_index(query=query, offset=offset, count=count, draw=draw) 105 | 106 | clauses = [] 107 | for token in word_tokenize(query): 108 | clauses.append("+_text_:%s" % token) 109 | query = " AND ".join(clauses) 110 | return search_simple_index(query=query, offset=offset, count=count, draw=draw) 111 | 112 | 113 | class SearchEntityAwareIndex: 114 | def accumulate_tags(self, list_of_tuples): 115 | tokens, entities = zip(*list_of_tuples) 116 | recognised = defaultdict(set) 117 | duplicates = defaultdict(list) 118 | 119 | for i, item in enumerate(entities): 120 | duplicates[item].append(i) 121 | 122 | for key, value in duplicates.items(): 123 | for k, g in groupby(enumerate(value), lambda x: x[0] - x[1]): 124 | indices = list(map(itemgetter(1), g)) 125 | recognised[key].add(' '.join(tokens[index] for index in indices)) 126 | # recognised.pop('O', None) 127 | 128 | recognised = dict(recognised) 129 | ner_info = {} 130 | for key, value in recognised.iteritems(): 131 | ner_info[key] = list(value) 132 | return ner_info 133 | 134 | 135 | def get_synonyms(self, text): 136 | syn_set = [] 137 | for synset in wn.synsets(str): 138 | for item in synset.lemma_names: 139 | syn_set.append(item) 140 | return syn_set 141 | 142 | 143 | def tokenize_text(self, text): 144 | # title = unicode(query, "utf-8") 145 | stop = stopwords.words('english') + list(string.punctuation) 146 | return [i for i in word_tokenize(text) if i not in stop] 147 | 148 | 149 | def build_clauses(self, prefix, tagged_segments): 150 | clauses = [] 151 | for tagged_segment in tagged_segments: 152 | tokens = self.tokenize_text(tagged_segment) 153 | if len(tokens) == 1: 154 | clauses.append("%s:%s" % (prefix, tokens[0])) 155 | else: 156 | clauses.append("%s:\"%s\"" % (prefix, " ".join(tokens))) 157 | return clauses 158 | 159 | 160 | def GET(self): 161 | draw, query, offset, count = get_web_input(web_input=web.input()) 162 | 163 | if query == '*:*': 164 | return search_entity_aware_index(query=query, offset=offset, count=count, 165 | draw=draw, qf='_text_^1', time_in_ms=100) 166 | 167 | # Utilize entity tagger to give out entities and remove unwanted tags 168 | entity_tags = STANFORD_NER_SERVER.get_entities(query) 169 | entity_tags = self.accumulate_tags(entity_tags) 170 | print 'Entity tags for query - %s, %s' % (query, entity_tags) 171 | 172 | clauses = [] 173 | for entity_tag, tagged_segments in entity_tags.iteritems(): 174 | if entity_tag == 'PERSON': 175 | clauses.extend(self.build_clauses("_news_title_person", tagged_segments)) 176 | elif entity_tag == 'LOCATION': 177 | clauses.extend(self.build_clauses("_news_title_location", tagged_segments)) 178 | elif entity_tag == 'ORGANIZATION': 179 | clauses.extend(self.build_clauses("_news_title_organization", tagged_segments)) 180 | clauses.extend(self.build_clauses("_news_title_publisher", tagged_segments)) 181 | else: 182 | clauses.extend(self.build_clauses("_news_title", tagged_segments)) 183 | 184 | query = " AND ".join(clauses) 185 | qf = '_news_title_person^10 _news_title_organization^5 _news_title_location^100 _news_title^2.0 _news_publisher^10.0' 186 | 187 | return search_entity_aware_index(query=query, offset=offset, count=count, 188 | draw=draw, qf=qf, time_in_ms=250) 189 | 190 | 191 | if __name__ == "__main__": 192 | app = web.application(urls, globals()) 193 | app.run() 194 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | --------------------------------------------------------------------------------