├── ArticleDataSample.py ├── HmmArticle.py ├── LICENSE ├── Logger.py ├── README.md ├── SummaryCreator.py ├── data ├── LICENSE ├── get_pdfs.py ├── talksumm_papers_urls.txt └── talksumm_summaries.zip ├── example ├── json │ └── Detecting Egregious Conversations between Customers and Virtual Agents.json └── transcript │ └── Detecting Egregious Conversations between Customers and Virtual Agents.txt ├── prepare_data_for_hmm.py ├── requirements.txt ├── summarize.py ├── util.py ├── viterbi.py └── w2v_utils.py /ArticleDataSample.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | from enum import Enum 4 | 5 | 6 | class CommonSectionNames(Enum): 7 | """ 8 | Notice - these string values should match the strings in the sections_info files (created by prepare_data_for_hmm) 9 | """ 10 | INTRO = "Introduction" 11 | RELATED = "Related Work" 12 | ACK = "Acknowledgments" 13 | 14 | 15 | class ArticleDataSample: 16 | """ 17 | A class for reading the files of a single data sample 18 | """ 19 | def __init__(self, 20 | transcript_fname, 21 | paper_text_fname, 22 | sections_info_fname, 23 | section_per_sent_fname, 24 | transcript_json_fname=None, 25 | paper_json_fname=None, 26 | alignment_json_fname=None 27 | ): 28 | """ 29 | The last 3 arguments are obsolete (they were used to represent a labeled data sample, as we had 30 | experimented with few manually-labeled alignments between paper sentences and speech transcript). 31 | """ 32 | 33 | self.transcript_jsn = None 34 | self.paper_jsn = None 35 | self.alignment_jsn = None 36 | self.transcript_sents = None 37 | self.paper_sents = None 38 | self.sections_sent_indices = {} 39 | self.section_per_sent = [] 40 | 41 | if transcript_json_fname: 42 | with open(transcript_json_fname, encoding='utf-8') as in_file: 43 | self.transcript_jsn = json.load(in_file) 44 | 45 | if paper_json_fname: 46 | with open(paper_json_fname, encoding='utf-8') as in_file: 47 | self.paper_jsn = json.load(in_file) 48 | 49 | if alignment_json_fname: 50 | with open(alignment_json_fname, encoding='utf-8') as in_file: 51 | self.alignment_jsn = json.load(in_file) 52 | 53 | if transcript_fname: 54 | with open(transcript_fname) as in_file: 55 | self.transcript_sents = [sent.rstrip('\n') for sent in in_file] 56 | 57 | transcript_word_num = 0 58 | for sent in self.transcript_sents: 59 | # the sentences are not tokenized, but by counting spaces we can get approximately the number of 60 | # words (we don't need exact number here) 61 | transcript_word_num += sent.count(' ') + 1 62 | self.transcript_word_num = transcript_word_num 63 | 64 | if paper_text_fname: 65 | with open(paper_text_fname) as in_file: 66 | self.paper_sents = [sent.rstrip('\n') for sent in in_file] 67 | 68 | # this function handles the case that sections_info_fname is None 69 | self.__read_sections_info_file(sections_info_fname) 70 | 71 | # this function handles the case that section_per_sent_fname is None 72 | self.__read_section_per_sent_file(section_per_sent_fname) 73 | 74 | @staticmethod 75 | def __jsn_to_single_list(jsn): 76 | """ 77 | method for reading transcript.json and alignment.json which share the same structure 78 | """ 79 | keys = jsn.keys() 80 | # convert the keys from string to int (these keys are slide indices) 81 | # notice that the slide indices do not necessarily start from 0 82 | slide_indices_sorted = [int(key) for key in keys] 83 | # sort 84 | slide_indices_sorted.sort() 85 | 86 | # this list will hold the output list 87 | out_list = [] 88 | 89 | for slide_i in slide_indices_sorted: 90 | slide_i_str = str(slide_i) 91 | # per slide, there is a list 92 | cur_list = jsn[slide_i_str] 93 | out_list.extend(cur_list) 94 | 95 | return out_list 96 | 97 | def get_transcript_sentences(self, punctuated: bool): 98 | if punctuated: 99 | if self.transcript_sents: 100 | transcript_sents = self.transcript_sents.copy() 101 | else: 102 | raise Exception("transcript_sents was not initialized") 103 | else: 104 | if self.transcript_jsn: 105 | transcript_sents = self.__jsn_to_single_list(self.transcript_jsn) 106 | else: 107 | raise Exception("transcript_jsn was not initialized") 108 | 109 | num_sents = len(transcript_sents) 110 | print("total number of sentences in the transcript: {}".format(num_sents)) 111 | return transcript_sents 112 | 113 | def get_ground_truth_sent_ids(self): 114 | if not self.alignment_jsn: 115 | raise Exception("alignment_jsn was not initialized") 116 | 117 | gt_sent_ids = self.__jsn_to_single_list(self.alignment_jsn) 118 | num_sents = len(gt_sent_ids) 119 | print("total number of ground-truth sentences: {}".format(num_sents)) 120 | return gt_sent_ids 121 | 122 | def __subsection_index_to_tuple(self, str_index): 123 | """ 124 | converts subsection index (a string) to tuple of ints 125 | """ 126 | split = str_index.split('.') 127 | # omit the last element as it is empty (since the string ends with '.') 128 | split = split[:-1] 129 | # string -> int 130 | split = [int(num) for num in split] 131 | tup = tuple(split) 132 | return tup 133 | 134 | def __tuple_to_subsection_index(self, tup): 135 | """ 136 | converts a tuple of ints to subsection index (a string) 137 | """ 138 | subsection_index = '.'.join('{}'.format(k) for k in tup) 139 | subsection_index += '.' 140 | return subsection_index 141 | 142 | def get_article_sentences_labeled(self, lower_case): 143 | """ 144 | returns 2 lists: 145 | 1. list of the article's sentences (lowercased in case lower_case is True) 146 | 2. list of the full indices of the sentences (section index, subsection index, and so on up to 147 | the sentence index). these full indices are strings (numbers are separated by period). 148 | the order of sentences is similar to the order in the article text. 149 | """ 150 | if not self.paper_jsn: 151 | raise Exception("paper_jsn was not initialized") 152 | 153 | jsn = self.paper_jsn 154 | keys = jsn.keys() 155 | # convert the keys from string to tuple of ints 156 | subsections_indices = [self.__subsection_index_to_tuple(key) for key in keys] 157 | # sort 158 | subsections_indices.sort() 159 | 160 | article_sentences = [] 161 | sentences_full_indices = [] 162 | 163 | for subsection_idx in subsections_indices: 164 | key = self.__tuple_to_subsection_index(subsection_idx) 165 | cur_sent_list = jsn[key] 166 | 167 | for sent_i, sent in enumerate(cur_sent_list): 168 | sent_full_idx = key + str(sent_i) 169 | sentences_full_indices.append(sent_full_idx) 170 | 171 | if lower_case: 172 | sent = sent.lower() 173 | 174 | article_sentences.append(sent) 175 | 176 | return article_sentences, sentences_full_indices 177 | 178 | def get_article_sentences_unlabeled(self, lower_case): 179 | if self.paper_sents: 180 | paper_sents = self.paper_sents.copy() 181 | if lower_case: 182 | paper_sents = [sent.lower() for sent in paper_sents] 183 | 184 | return paper_sents 185 | else: 186 | raise Exception("paper_sents was not initialized") 187 | 188 | def __read_sections_info_file(self, sections_info_fname): 189 | if sections_info_fname and os.path.isfile(sections_info_fname): 190 | with open(sections_info_fname) as in_file: 191 | lines = [line.rstrip('\n') for line in in_file] 192 | 193 | # parse each line, relying on the format used by write_section in prepare_data_for_hmm.py 194 | for line in lines: 195 | splt = line.split('\t') 196 | section_name = splt[0] 197 | start_i = int(splt[1]) 198 | num_sents = int(splt[2]) 199 | 200 | sent_indices = list(range(start_i, start_i + num_sents)) 201 | self.sections_sent_indices[section_name] = sent_indices 202 | 203 | # one ore more section names might be missing from the dictionary 204 | # due to missing file or lack of corresponding line in the file 205 | for section_name in CommonSectionNames: 206 | if section_name.value not in self.sections_sent_indices: 207 | self.sections_sent_indices[section_name.value] = [] 208 | 209 | def __read_section_per_sent_file(self, section_per_sent_fname): 210 | if section_per_sent_fname and os.path.isfile(section_per_sent_fname): 211 | with open(section_per_sent_fname) as in_file: 212 | self.section_per_sent = [line.rstrip('\n') for line in in_file] 213 | 214 | # sanity check - verify that section_per_sent and sections_sent_indices agree on the sentences indices 215 | # of the 3 common sections 216 | for section_name in CommonSectionNames: 217 | sent_indices = [sent_i for sent_i, section_title in enumerate(self.section_per_sent) if 218 | section_name.value == section_title] 219 | if sent_indices != self.sections_sent_indices[section_name.value]: 220 | print("--- mismatch: {}".format(section_name.value)) 221 | print("paper: {}".format(section_per_sent_fname)) 222 | print("sent_indices:") 223 | print(sent_indices) 224 | print("self.sections_sent_indices[{}]:".format(section_name.value)) 225 | print(self.sections_sent_indices[section_name.value]) 226 | 227 | def get_section_sent_indices(self, section_name: CommonSectionNames): 228 | if section_name.value in self.sections_sent_indices: 229 | return self.sections_sent_indices[section_name.value] 230 | else: 231 | raise Exception("unexpected section name: {}".format(section_name.value)) 232 | 233 | def get_paper_sent_num(self): 234 | return len(self.paper_sents) 235 | 236 | def get_paper_sents(self): 237 | return self.paper_sents.copy() 238 | 239 | def get_transcript_word_num(self): 240 | return self.transcript_word_num 241 | 242 | def get_section_per_sent(self): 243 | return self.section_per_sent.copy() 244 | -------------------------------------------------------------------------------- /HmmArticle.py: -------------------------------------------------------------------------------- 1 | import string 2 | import copy 3 | import itertools 4 | import os 5 | import numpy as np 6 | import nltk 7 | from nltk.tokenize import word_tokenize 8 | from enum import Enum 9 | from itertools import compress 10 | from tqdm import tqdm 11 | 12 | from ArticleDataSample import ArticleDataSample, CommonSectionNames 13 | from util import tprint, cosine_similarity 14 | from w2v_utils import read_pretrained_w2v 15 | from viterbi import viterbi 16 | 17 | 18 | class HmmAlgo(Enum): 19 | VITERBI_0 = 0 20 | DUMMY = 1 # for debugging 21 | 22 | 23 | class PredictedSeqInfoKey(Enum): 24 | """ 25 | keys of dictionary containing information of the HMM's predicted sequence of sentences 26 | """ 27 | SENT_I = "Sent i" 28 | BACKGROUND = "Backg" 29 | SENT_FULL_ID = "Sent ID" 30 | SENT_TEXT = "Sent Text" 31 | DURATION = "Duration" 32 | SPOKEN_WORDS = "Spoken words" 33 | IS_GROUND_TRUTH = "GT" 34 | 35 | @staticmethod 36 | def get_columns_order(labeled_data=False, include_background=False): 37 | """ 38 | returns a list of keys describing the values to be printed, and their order 39 | """ 40 | key_list = [] 41 | key_list.append(PredictedSeqInfoKey.SENT_I.value) 42 | if include_background: 43 | key_list.append(PredictedSeqInfoKey.BACKGROUND.value) 44 | if labeled_data: 45 | key_list.append(PredictedSeqInfoKey.SENT_FULL_ID.value) 46 | key_list.append(PredictedSeqInfoKey.SENT_TEXT.value) 47 | key_list.append(PredictedSeqInfoKey.DURATION.value) 48 | key_list.append(PredictedSeqInfoKey.SPOKEN_WORDS.value) 49 | if labeled_data: 50 | key_list.append(PredictedSeqInfoKey.IS_GROUND_TRUTH.value) 51 | 52 | return key_list 53 | 54 | 55 | class HmmArticleConfig: 56 | """ 57 | configuration of HmmArticle 58 | """ 59 | 60 | def __init__(self, 61 | word_embed_fname: str, 62 | labeled_data_mode: bool): 63 | """ 64 | some configuration parameters have no default value - they are must be passed in the constructor 65 | the other configuration parameters may be set after instantiation. 66 | """ 67 | 68 | # parameters with no default values 69 | self.word_embed_fname = word_embed_fname 70 | self.labeled_data_mode = labeled_data_mode 71 | 72 | # parameters with default values 73 | self.section_id_intro = 0 74 | self.section_id_related_work = None 75 | self.similarity_fname = None 76 | self.stay_prob = None # if None, stay_prob will be determined heuristically as a function of paper & transcript lengths 77 | self.auto_stay_prob_first_approach = True # selection between 2 approaches of auto-defining stay_prob 78 | self.trans_prob_decay = 0.75 79 | self.emis_prob_subtruct_min_factor = 0.8 80 | self.allow_backward_steps = True # allow transitioning from one sentence to an earlier one 81 | # factor for making the backward steps less probable than forward steps. 82 | # this parameter is relevant only if allow_backward_steps is True 83 | self.backward_prob_factor = 2 84 | # we tried to use "background" to model those parts in the talk where the speaker utters words that are 85 | # unrelated to any sentence in the paper (like in Malmaud et al., www.cs.ubc.ca/~murphyk/Papers/naacl15.pdf). 86 | # however, better results were obtained without using background 87 | self.backg_stay_prob = None # None means disable background 88 | self.backg_word_count_fname = None 89 | self.lower_case = True 90 | self.remove_stop_words = True 91 | self.hmm_algo = HmmAlgo.VITERBI_0 92 | # transcript_word_level_mode=True means that each time-step corresponds to a single spoken word of the 93 | # transcript - this is the mode which we describe in our paper. we have also tried "sentence-level" mode, in 94 | # which each time-step corresponds to a sentence in the transcript. for this, we used a pre-trained 95 | # punctuation-restoration model in order to split the transcript into sentences. however, better results were 96 | # obtained with the word-level mode. 97 | self.transcript_word_level_mode = True 98 | self.sent_sent_similarity_wordwise = True # releavnt only when transcript_word_level_mode is False 99 | self.debug_mode = False 100 | self.wmd = False # Word Mover's Distance 101 | 102 | def print_configuration(self): 103 | print("HmmArticleConfig:") 104 | for item in vars(self).items(): 105 | print("%s: %s" % item) 106 | 107 | 108 | class HmmArticle: 109 | """ 110 | Given a data sample (article & transcript), this class prepares the HMM's probabilities and 111 | runs the Viterbi algorithm to obtain a predicted sequence of hidden states, i.e. paper sentences 112 | """ 113 | 114 | def __init__(self, article_data_sample: ArticleDataSample, cfg: HmmArticleConfig): 115 | 116 | self.article_data_sample = article_data_sample 117 | self.cfg = copy.deepcopy(cfg) 118 | 119 | self.using_background = (self.cfg.backg_stay_prob is not None) 120 | self.w2v = {} 121 | self.w2v_mean = None 122 | self.w2v_dim = 0 123 | self.transcript_tokens = None 124 | self.transcript_ids = [] 125 | self.transcript_sents = [] 126 | self.id2word = [] 127 | self.word2id = {} 128 | self.article_sentences = [] 129 | self.sentences_full_indices = [] 130 | self.section_idx_per_sentence = [] 131 | # this dict will include also keys of Related Work section (which we omit), since the 132 | # reference summary might include sentences from this section 133 | self.full_index_to_sentence = {} 134 | self.intro_sent_indices = [] 135 | # we exclude the sentences of Related Work and Acknowledgments sections 136 | self.excluded_sent_indices = {} 137 | self.article_all_sent_vecs = [] 138 | self.transcript_all_sent_vecs = [] 139 | self.start_prob = None 140 | self.transition_prob = None 141 | self.emission_prob = None 142 | self.model = None 143 | self.observed_seq = None 144 | self.predicted_seq_info = [] 145 | # Here we will store the duration of each sentence, i.e. number of time-steps in which the sentence was 146 | # chosen by the Viterbi algorithm. This models the number of words uttered by the speaker to describe the 147 | # sentence, and can be used as importance score. 148 | self.durations = None 149 | self.print_predicted_sentences = False 150 | # word count from external corpus - for background word distribution 151 | self.backg_word_count = {} 152 | self.warnings = [] 153 | 154 | if self.cfg.remove_stop_words: 155 | self.stop_words = self.get_stop_words() 156 | print('the following stop words and punctuations will be removed from article text and transcript:') 157 | print(self.stop_words) 158 | 159 | self.parse_transcript() 160 | 161 | self.process_article_sentences() 162 | 163 | self.n_article_sentences = len(self.article_sentences) 164 | self.n_states = 2 * self.n_article_sentences if self.using_background else self.n_article_sentences 165 | 166 | if self.cfg.transcript_word_level_mode: 167 | self.n_observations = len(self.id2word) 168 | else: 169 | self.n_observations = len(self.transcript_sents) 170 | 171 | print("n_observations: {}".format(self.n_observations)) 172 | print("n_article_sentences: {}".format(self.n_article_sentences)) 173 | print("n_states: {}".format(self.n_states)) 174 | 175 | if self.cfg.allow_backward_steps: 176 | # setting it to n_article_sentences means that the probability will be distributed all the way backward 177 | # up to the first sentence. 178 | # we have also tried smaller values, i.e. limiting how far the backward-transition can be 179 | self.max_backward_steps = self.n_article_sentences 180 | else: 181 | self.max_backward_steps = 0 182 | 183 | if self.using_background: 184 | self.read_backg_word_count_file(self.cfg.backg_word_count_fname) 185 | 186 | self.hmm_probabilities_init() 187 | 188 | if self.cfg.labeled_data_mode: 189 | self.gt_unique_sent_ids = set(self.article_data_sample.get_ground_truth_sent_ids()) 190 | 191 | # check if there is a ground-truth sentence from the Related Work section which was omitted 192 | if self.cfg.section_id_related_work is not None: 193 | for gt_sent_id in self.gt_unique_sent_ids: 194 | if self.get_section_idx(gt_sent_id) == self.cfg.section_id_related_work: 195 | warning = "WARNING: Related Work section ({}) was omitted but there is a ground-truth sentence ({}) from this section".format( 196 | self.cfg.section_id_related_work, gt_sent_id) 197 | self.warnings.append(warning) 198 | print(warning) 199 | else: 200 | self.gt_unique_sent_ids = {} 201 | 202 | @staticmethod 203 | def get_stop_words(): 204 | stop_words = set(nltk.corpus.stopwords.words("english")) 205 | punct = set(string.punctuation) 206 | stop_words.update(punct) 207 | return stop_words 208 | 209 | @staticmethod 210 | def get_section_idx(full_index): 211 | """ 212 | extracts the section index out of full index (e.g.: 3.0.1 --> 3) 213 | """ 214 | split = full_index.split('.', maxsplit=1) 215 | section_idx = int(split[0]) 216 | return section_idx 217 | 218 | def process_article_sentences(self): 219 | if self.cfg.labeled_data_mode: 220 | self.article_sentences, self.sentences_full_indices = self.article_data_sample.get_article_sentences_labeled( 221 | self.cfg.lower_case) 222 | 223 | orig_num_of_sents = len(self.article_sentences) 224 | 225 | section_idx_per_sentence = [] 226 | 227 | for sent_i, full_index in enumerate(self.sentences_full_indices): 228 | section_idx = self.get_section_idx(full_index) 229 | section_idx_per_sentence.append(section_idx) 230 | 231 | if section_idx == self.cfg.section_id_intro: 232 | self.intro_sent_indices.append(sent_i) 233 | 234 | self.full_index_to_sentence[full_index] = self.article_sentences[sent_i] 235 | 236 | bool_filter = [section_idx != self.cfg.section_id_related_work for section_idx in section_idx_per_sentence] 237 | 238 | # unlabeled data 239 | else: 240 | self.article_sentences = self.article_data_sample.get_article_sentences_unlabeled(self.cfg.lower_case) 241 | 242 | self.intro_sent_indices = self.article_data_sample.get_section_sent_indices(CommonSectionNames.INTRO) 243 | 244 | related_work_sent_indices = self.article_data_sample.get_section_sent_indices(CommonSectionNames.RELATED) 245 | ack_sent_indices = self.article_data_sample.get_section_sent_indices(CommonSectionNames.ACK) 246 | self.excluded_sent_indices = set(related_work_sent_indices + ack_sent_indices) 247 | 248 | orig_num_of_sents = len(self.article_sentences) 249 | 250 | bool_filter = [sent_i not in self.excluded_sent_indices for sent_i in range(orig_num_of_sents)] 251 | 252 | print("original number of article sentences: {}".format(orig_num_of_sents)) 253 | 254 | if self.cfg.debug_mode: 255 | desired_num_sentences = 5 256 | bool_filter = [False] * len(bool_filter) 257 | bool_filter[:desired_num_sentences] = [True] * desired_num_sentences 258 | 259 | self.intro_sent_indices = [0, 1] 260 | 261 | print("DEBUG mode: we take only the first {} sentences".format(desired_num_sentences)) 262 | 263 | self.article_sentences = list(compress(self.article_sentences, bool_filter)) 264 | if self.cfg.labeled_data_mode: 265 | self.sentences_full_indices = list(compress(self.sentences_full_indices, bool_filter)) 266 | self.section_idx_per_sentence = list(compress(section_idx_per_sentence, bool_filter)) 267 | 268 | num_of_sents = len(self.article_sentences) 269 | 270 | print("after removing sentences of Related Work section, number of article sentences is now: {}".format( 271 | num_of_sents)) 272 | 273 | # avoid empty intro_sent_indices 274 | if len(self.intro_sent_indices) == 0: 275 | dummy_num_intro_sents = min(20, num_of_sents) 276 | self.intro_sent_indices = list(range(dummy_num_intro_sents)) 277 | print("intro_sent_indices was empty. it was set to the first {} sentences".format(dummy_num_intro_sents)) 278 | 279 | def parse_transcript(self): 280 | # we use punctuated=True also in transcript_word_level_mode (transcript.json is actually obsolete) 281 | transcript_sents = self.article_data_sample.get_transcript_sentences(punctuated=True) 282 | 283 | num_sents = len(transcript_sents) 284 | 285 | for sent_i, sent in enumerate(transcript_sents): 286 | sent = sent.replace("%HESITATION", "") 287 | 288 | if self.cfg.lower_case: 289 | sent = sent.lower() 290 | 291 | # replace the sentence string with a list of its tokens 292 | word_list = word_tokenize(sent) 293 | if self.cfg.remove_stop_words: 294 | word_list = [word for word in word_list if word not in self.stop_words] 295 | transcript_sents[sent_i] = word_list 296 | 297 | # list of lists -> one list of all tokens 298 | self.transcript_tokens = list(itertools.chain.from_iterable(transcript_sents)) 299 | 300 | num_tokens = len(self.transcript_tokens) 301 | print("total number of tokens in the whole transcript: {}".format(num_tokens)) 302 | 303 | # the unique tokens are the vocabulary of the transcript 304 | self.id2word = list(set(self.transcript_tokens)) 305 | self.id2word.sort() 306 | print("vocabulary size: {}".format(len(self.id2word))) 307 | # print(self.id2word) 308 | 309 | # initialize word->id dictionary 310 | for word_i, word in enumerate(self.id2word): 311 | self.word2id[word] = word_i 312 | 313 | transcript_ids_per_sent = [] 314 | 315 | for sent in transcript_sents: 316 | word_ids = [] 317 | for word in sent: 318 | word_ids.append(self.word2id[word]) 319 | 320 | transcript_ids_per_sent.append(word_ids) 321 | 322 | # list of lists -> one list of all token ids 323 | self.transcript_ids = list(itertools.chain.from_iterable(transcript_ids_per_sent)) 324 | 325 | self.transcript_sents = transcript_sents 326 | 327 | if self.cfg.transcript_word_level_mode: 328 | if self.cfg.debug_mode: 329 | self.observed_seq = np.array([0, 2, 1, 1, 2, 0]) 330 | else: 331 | self.observed_seq = np.asarray(self.transcript_ids) 332 | else: 333 | self.observed_seq = np.arange(num_sents) 334 | 335 | def read_backg_word_count_file(self, backg_word_count_fname): 336 | tprint("reading file: {}".format(backg_word_count_fname)) 337 | 338 | with open(backg_word_count_fname) as file: 339 | for line in file: 340 | word, count = line.split() 341 | self.backg_word_count[word] = int(count) 342 | 343 | tprint("done") 344 | 345 | def prepare_sent_vecs(self, sent_list): 346 | """ 347 | sent_list can be either a list of strings or a list of lists of tokens 348 | """ 349 | # tokenize if needed 350 | if type(sent_list[0]) == str: 351 | sent_list_tokens = [] 352 | for sent_i, sent in enumerate(sent_list): 353 | sent_list_tokens.append(word_tokenize(sent)) 354 | 355 | sent_list = sent_list_tokens 356 | 357 | # now sent_list is necessarily a list of lists of tokens 358 | 359 | all_sent_vecs = [] 360 | total_not_found = 0 361 | 362 | for sent_i, sent_tokens in enumerate(sent_list): 363 | sent_vecs = [] 364 | 365 | for word in sent_tokens: 366 | if self.cfg.remove_stop_words and word in self.stop_words: 367 | continue 368 | 369 | if word in self.w2v: 370 | sent_vecs.append(self.w2v[word]) 371 | else: 372 | print("word not found: {}".format(word)) 373 | total_not_found += 1 374 | 375 | if not sent_vecs: 376 | sent_str = ' '.join(sent_tokens) 377 | warning = "WARNING: all words not found for sentence: {}".format(sent_str) 378 | # raise Exception(warning) 379 | self.warnings.append(warning) 380 | print(warning) 381 | 382 | sent_vecs.append(self.w2v_mean) 383 | 384 | all_sent_vecs.append(sent_vecs) 385 | 386 | print("total number of times word not found: {}".format(total_not_found)) 387 | 388 | return all_sent_vecs 389 | 390 | @staticmethod 391 | def word_sent_similarity(word_vec, sent_vecs): 392 | """ 393 | sent_vecs: a list of the vectors of the sentence's words 394 | """ 395 | sent_len = len(sent_vecs) 396 | similarities = np.zeros(sent_len) 397 | 398 | for vec_i, vec in enumerate(sent_vecs): 399 | cosine_sim = cosine_similarity(vec, word_vec) 400 | 401 | # obtain positive similarity 402 | sim = np.exp(cosine_sim) 403 | 404 | similarities[vec_i] = sim 405 | 406 | max_sim = np.max(similarities) 407 | return max_sim 408 | 409 | def sent_sent_similarity(self, sent1_vecs, sent2_vecs): 410 | """ 411 | sent1_vecs: a list of the word vectors of the 1st sentence 412 | sent2_vecs: same, for the 2nd sentence 413 | """ 414 | if self.cfg.sent_sent_similarity_wordwise: 415 | similarities = [] 416 | for word_vec in sent1_vecs: 417 | similarities.append(self.word_sent_similarity(word_vec, sent2_vecs)) 418 | 419 | max_sim = max(similarities) 420 | return max_sim 421 | 422 | # cosine similarity between the mean vectors 423 | else: 424 | sent1_mean_w2v = np.mean(sent1_vecs, 0) 425 | sent2_mean_w2v = np.mean(sent2_vecs, 0) 426 | 427 | cosine_sim = cosine_similarity(sent1_mean_w2v, sent2_mean_w2v) 428 | 429 | sim = np.exp(cosine_sim) 430 | return sim 431 | 432 | def prepare_start_prob(self): 433 | """ 434 | prepares the start probabilities 435 | """ 436 | 437 | # we set start probability as uniform over the sentences in the Introduction section 438 | 439 | start_prob = np.zeros(self.n_article_sentences) 440 | 441 | num_sents_in_intro = len(self.intro_sent_indices) 442 | prob = 1 / num_sents_in_intro 443 | 444 | for sent_i in self.intro_sent_indices: 445 | start_prob[sent_i] = prob 446 | 447 | if not self.using_background: 448 | self.start_prob = start_prob 449 | else: 450 | self.start_prob = np.zeros(self.n_states) 451 | # we set probability of 1 to start with background==1 452 | self.start_prob[self.n_article_sentences:] = start_prob 453 | 454 | def prepare_transition_prob(self): 455 | """ 456 | prepares the transition probabilities matrix 457 | """ 458 | stay_prob = self.cfg.stay_prob 459 | if stay_prob is None: 460 | if not self.cfg.transcript_word_level_mode: 461 | raise Exception("None value for stay_prob is supported in transcript_word_level_mode only") 462 | # notice that in some very few cases, this ratio is larger than 1, we will handle this 463 | paper_trans_len_ratio = self.n_article_sentences / len(self.observed_seq) 464 | 465 | if self.cfg.auto_stay_prob_first_approach: 466 | # with this definition, the resulting stay_prob is around 0.3 in average 467 | factor = 3 468 | stay_prob = (1 - paper_trans_len_ratio) / factor 469 | else: 470 | # another approach which we tried, it achieved good results as well 471 | factor = 7 472 | stay_prob = 1 - (factor * paper_trans_len_ratio) 473 | 474 | stay_prob = max(stay_prob, 0.1) 475 | stay_prob = round(stay_prob, 2) 476 | 477 | transition_prob = np.zeros((self.n_article_sentences, self.n_article_sentences)) 478 | 479 | leave_prob = 1 - stay_prob 480 | 481 | print("stay_prob: {:.3}".format(stay_prob)) 482 | 483 | # helper vector for probability decay 484 | helper_vec = np.ones(self.n_article_sentences, dtype=np.float) 485 | for i in range(1, self.n_article_sentences): 486 | helper_vec[i] = self.cfg.trans_prob_decay * helper_vec[i - 1] 487 | 488 | for state_i in range(self.n_article_sentences): 489 | # notice that when state_i == self.n_article_sentences - 1, and if backward steps are not allowed, 490 | # then transition_prob[self.n_article_sentences - 1, :] will not sum up to 1. 491 | # even though there is nowhere to go on from the last state, we don't set the stay probability 492 | # to 1 here, as the viterbi algorithm exploits it and pushes to reach the last state ASAP. 493 | 494 | transition_prob[state_i, state_i] = stay_prob 495 | 496 | n_following_states = self.n_article_sentences - state_i - 1 497 | n_previous_states = min(state_i, self.max_backward_steps) 498 | 499 | right_vec = np.copy(helper_vec[: n_following_states]) 500 | left_vec = np.flip(np.copy(helper_vec[: n_previous_states])) / self.cfg.backward_prob_factor 501 | 502 | # normalization factor such that sum(right_vec) + sum(left_vec) will sum up to leave_prob 503 | normalization_factor = (sum(right_vec) + sum(left_vec)) / leave_prob 504 | 505 | right_vec /= normalization_factor 506 | left_vec /= normalization_factor 507 | 508 | transition_prob[state_i, (state_i + 1):] = right_vec 509 | transition_prob[state_i, (state_i - n_previous_states): state_i] = left_vec 510 | 511 | if not self.using_background: 512 | self.transition_prob = transition_prob 513 | else: 514 | self.transition_prob = np.zeros((self.n_states, self.n_states)) 515 | 516 | # the part of the matrix in which the background stays the same 517 | # in this case we multiply the sentence-transition probabilities by backg_stay_prob 518 | backg_stays_block = self.cfg.backg_stay_prob * transition_prob 519 | 520 | # the part of the matrix in which the background changes 521 | # in this case we multiply the sentence-transition probabilities by (1 - self.cfg.backg_stay_prob) 522 | backg_changes_block = (1 - self.cfg.backg_stay_prob) * transition_prob 523 | 524 | # top-left block: background stays at 0 525 | self.transition_prob[:self.n_article_sentences, :self.n_article_sentences] = backg_stays_block 526 | # bottom-right block: background stays at 1 527 | self.transition_prob[self.n_article_sentences:, self.n_article_sentences:] = backg_stays_block 528 | # bottom-left block: background changes from 1 to 0 529 | self.transition_prob[self.n_article_sentences:, :self.n_article_sentences] = backg_changes_block 530 | # top-right block: background changes from 0 to 1 531 | self.transition_prob[:self.n_article_sentences:, self.n_article_sentences:] = backg_changes_block 532 | 533 | def get_backg_distribution(self): 534 | if self.cfg.transcript_word_level_mode: 535 | # if a word didn't appear in the external text, we set it's count to 1 536 | word_dist = np.ones(self.n_observations) 537 | 538 | for word_i in range(self.n_observations): 539 | word = self.id2word[word_i] 540 | 541 | if word in self.backg_word_count: 542 | word_dist[word_i] = self.backg_word_count[word] 543 | 544 | word_dist /= np.sum(word_dist) 545 | 546 | return word_dist 547 | 548 | else: 549 | raise Exception("currently background is only supported in transcript_word_level_mode") 550 | 551 | def prepare_emission_prob(self): 552 | """ 553 | prepares the emission probabilities matrix 554 | """ 555 | if self.cfg.similarity_fname and os.path.isfile(self.cfg.similarity_fname): 556 | tprint("loading similarity file: {}".format(self.cfg.similarity_fname)) 557 | emission_prob = np.load(self.cfg.similarity_fname) 558 | tprint("done") 559 | 560 | else: 561 | is_glove = not self.cfg.word_embed_fname[-3:] == 'bin' 562 | 563 | self.w2v, self.w2v_mean = read_pretrained_w2v(self.cfg.word_embed_fname, is_glove=is_glove) 564 | self.w2v_dim = self.w2v_mean.shape[0] 565 | tprint("w2v dimension: {}".format(self.w2v_dim)) 566 | 567 | self.article_all_sent_vecs = self.prepare_sent_vecs(self.article_sentences) 568 | 569 | if not self.cfg.transcript_word_level_mode: 570 | self.transcript_all_sent_vecs = self.prepare_sent_vecs(self.transcript_sents) 571 | 572 | emission_prob = np.zeros((self.n_article_sentences, self.n_observations)) 573 | 574 | tprint("preparing similarities for emission probabilities...") 575 | # prepare word vectors in case of word level mode 576 | if self.cfg.transcript_word_level_mode: 577 | word_vecs = [] 578 | 579 | for observation_i in range(self.n_observations): 580 | word = self.id2word[observation_i] 581 | if word in self.w2v: 582 | word_vec = self.w2v[word] 583 | else: 584 | # word_vec = self.w2v[""] 585 | word_vec = self.w2v_mean 586 | 587 | word_vecs.append(word_vec) 588 | 589 | for state_i in tqdm(range(self.n_article_sentences)): 590 | for observation_i in range(self.n_observations): 591 | if self.cfg.transcript_word_level_mode: 592 | emission_prob[state_i, observation_i] = self.word_sent_similarity( 593 | word_vecs[observation_i], 594 | self.article_all_sent_vecs[state_i]) 595 | else: 596 | if not self.cfg.wmd: 597 | emission_prob[state_i, observation_i] = self.sent_sent_similarity( 598 | self.transcript_all_sent_vecs[observation_i], 599 | self.article_all_sent_vecs[state_i]) 600 | else: 601 | emission_prob[state_i, observation_i] = -self.w2v.wmdistance( 602 | self.transcript_sents[observation_i], 603 | self.article_sentences[state_i]) 604 | 605 | if self.cfg.similarity_fname: 606 | # save to file 607 | np.save(self.cfg.similarity_fname, emission_prob) 608 | tprint("created file: {}".format(self.cfg.similarity_fname)) 609 | 610 | # manipulate the similarities and normalize 611 | for state_i in range(self.n_article_sentences): 612 | 613 | if self.cfg.wmd: 614 | emission_prob[state_i, :] -= np.max(emission_prob[state_i, :]) 615 | 616 | # this works better than applying a second softmax 617 | if self.cfg.emis_prob_subtruct_min_factor != 0: 618 | min_val = np.min(emission_prob[state_i, :]) 619 | emission_prob[state_i, :] -= self.cfg.emis_prob_subtruct_min_factor * min_val 620 | 621 | # normalize the similarities to obtain probabilities 622 | emission_prob[state_i, :] /= np.sum(emission_prob[state_i, :]) 623 | 624 | if not self.using_background: 625 | self.emission_prob = emission_prob 626 | else: 627 | word_dist = self.get_backg_distribution() 628 | # for all sentences, the word distribution is set to word_dist 629 | backg_emission_prob = np.tile(word_dist, (self.n_article_sentences, 1)) 630 | 631 | self.emission_prob = np.concatenate((emission_prob, backg_emission_prob)) 632 | 633 | def hmm_probabilities_init(self): 634 | self.prepare_start_prob() 635 | self.prepare_transition_prob() 636 | self.prepare_emission_prob() 637 | 638 | if self.cfg.debug_mode: 639 | print("start_prob:") 640 | print(self.start_prob) 641 | print("transition_prob:") 642 | print(self.transition_prob) 643 | print("emission_prob:") 644 | print(self.emission_prob) 645 | 646 | def get_num_of_states(self): 647 | return self.n_states 648 | 649 | def get_num_of_article_sentences(self): 650 | return self.n_article_sentences 651 | 652 | def get_num_of_gt_sentences(self): 653 | """ 654 | returns the number of ground-truth sentences (the ones which are labeled as positives) 655 | """ 656 | if not self.cfg.labeled_data_mode: 657 | raise Exception("this method is unavailable for unlabeled sample") 658 | 659 | return len(self.gt_unique_sent_ids) 660 | 661 | def get_warnings(self): 662 | return self.warnings 663 | 664 | def state2sent(self, state_i): 665 | """ 666 | given state index, this function returns the corresponding sentence index 667 | (these indices are different only in case background is enabled) 668 | """ 669 | return state_i % self.n_article_sentences 670 | 671 | def state2backg(self, state_i): 672 | """ 673 | returns 1 if the given state is in the background, 0 otherwise 674 | """ 675 | return state_i // self.n_article_sentences 676 | 677 | def state2pair(self, state_i): 678 | """ 679 | given a state index, returns pair of (sentence index, background value) 680 | """ 681 | return self.state2sent(state_i), self.state2backg(state_i) 682 | 683 | def predict(self): 684 | """ 685 | runs the Viterbi algorithm to obtain a predicted sequence of hidden states, i.e. paper sentences 686 | """ 687 | tprint("predict...") 688 | 689 | if self.cfg.hmm_algo == HmmAlgo.VITERBI_0: 690 | predicted_path = viterbi(self.start_prob, 691 | self.transition_prob, 692 | self.emission_prob, 693 | self.observed_seq) 694 | 695 | elif self.cfg.hmm_algo == HmmAlgo.DUMMY: 696 | # for debugging - avoid waiting for prediction 697 | predicted_path = [20] * len(self.observed_seq) 698 | predicted_path[:3] = [10, 10, 12] 699 | else: 700 | raise Exception("unknown HMM algorithm") 701 | 702 | tprint("done") 703 | 704 | # if going backward is not allowed - validate it 705 | if not self.cfg.allow_backward_steps: 706 | for t in range(1, len(self.observed_seq)): 707 | assert (self.state2sent(predicted_path[t]) >= self.state2sent(predicted_path[t - 1])) 708 | 709 | log_prob = self.calc_log_prob(predicted_path, emission_prob_only=False) 710 | 711 | if self.using_background: 712 | foreg_pos = [self.state2backg(state_i) == 0 for state_i in predicted_path] 713 | backg_pos = [not bool_val for bool_val in foreg_pos] 714 | 715 | predicted_sents = list(compress(predicted_path, foreg_pos)) 716 | 717 | print('foreground count: {}'.format(len(predicted_path))) 718 | print('background count: {}'.format(sum(backg_pos))) 719 | else: 720 | predicted_sents = predicted_path 721 | 722 | unique_sent_indices = list(set(predicted_sents)) 723 | unique_sent_indices.sort() 724 | 725 | self.prepare_predicted_seq_info(predicted_path) 726 | 727 | print("predicted sequence summary:") 728 | for subseq_info in self.predicted_seq_info: 729 | sent_i = subseq_info[PredictedSeqInfoKey.SENT_I.value] 730 | backg = subseq_info[PredictedSeqInfoKey.BACKGROUND.value] 731 | if self.using_background: 732 | state_str = "({:4}, {})".format(sent_i, backg) 733 | else: 734 | state_str = "{:4}".format(sent_i) 735 | 736 | duration = subseq_info[PredictedSeqInfoKey.DURATION.value] 737 | 738 | print("{}: {:4}".format(state_str, duration)) 739 | 740 | if self.print_predicted_sentences: 741 | print("predicted sentences:") 742 | for sent_i in unique_sent_indices: 743 | print("sentence {}:".format(sent_i)) 744 | print(self.article_sentences[sent_i]) 745 | 746 | print("\nnum of predicted unique sentences: {}".format(len(unique_sent_indices))) 747 | 748 | return self.predicted_seq_info, log_prob 749 | 750 | def calc_log_prob(self, path, emission_prob_only): 751 | """ 752 | calculates the log-probability of a given path of hidden states 753 | """ 754 | log_prob = 0 755 | for t in range(len(path)): 756 | log_prob += np.log(self.emission_prob[path[t], self.observed_seq[t]]) 757 | 758 | if emission_prob_only: 759 | return log_prob 760 | 761 | log_prob += np.log(self.start_prob[path[0]]) 762 | 763 | for t in range(1, len(path)): 764 | log_prob += np.log(self.transition_prob[path[t - 1], path[t]]) 765 | 766 | return log_prob 767 | 768 | def prepare_predicted_seq_info(self, predicted_path): 769 | # will contain the indices where state was changed 770 | change_indices = [] 771 | prev_state = None 772 | all_subseq_info = [] 773 | observed_seq_len = len(self.observed_seq) 774 | self.durations = np.zeros(self.n_article_sentences, dtype=np.int) 775 | 776 | # collect the indices where state was changed 777 | for t in range(observed_seq_len): 778 | cur_state = predicted_path[t] 779 | if cur_state != prev_state: 780 | change_indices.append(t) 781 | prev_state = cur_state 782 | # this will aid in the next loop 783 | change_indices.append(observed_seq_len) 784 | 785 | # we start at the second index 786 | for i in range(1, len(change_indices)): 787 | t = change_indices[i] 788 | prev_t = change_indices[i - 1] 789 | 790 | cur_state_i = predicted_path[prev_t] 791 | cur_sent_i, cur_backg = self.state2pair(cur_state_i) 792 | if self.cfg.labeled_data_mode: 793 | cur_sent_id = self.sentences_full_indices[cur_sent_i] 794 | is_ground_truth = int(cur_sent_id in self.gt_unique_sent_ids) 795 | else: 796 | cur_sent_id = '' 797 | is_ground_truth = 0 798 | cur_sent_text = self.article_sentences[cur_sent_i] 799 | observed_subseq = self.observed_seq[prev_t:t] 800 | if self.cfg.transcript_word_level_mode: 801 | spoken_words_subseq = [self.id2word[word_i] for word_i in observed_subseq] 802 | spoken_words_str = ' '.join(spoken_words_subseq) 803 | else: 804 | spoken_sents = [' '.join(self.transcript_sents[tran_sent_i]) for tran_sent_i in observed_subseq] 805 | spoken_words_str = ' '.join(spoken_sents) 806 | 807 | duration = len(observed_subseq) 808 | 809 | cur_subseq_info = { 810 | PredictedSeqInfoKey.SENT_I.value: cur_sent_i, 811 | PredictedSeqInfoKey.BACKGROUND.value: cur_backg, 812 | PredictedSeqInfoKey.SENT_FULL_ID.value: cur_sent_id, 813 | PredictedSeqInfoKey.SENT_TEXT.value: cur_sent_text, 814 | PredictedSeqInfoKey.DURATION.value: duration, 815 | PredictedSeqInfoKey.SPOKEN_WORDS.value: spoken_words_str, 816 | PredictedSeqInfoKey.IS_GROUND_TRUTH.value: is_ground_truth 817 | } 818 | 819 | all_subseq_info.append(cur_subseq_info) 820 | 821 | # update durations if foreground 822 | if cur_backg == 0: 823 | # the same sentence might appear several times in the path with backg == 0 824 | self.durations[cur_sent_i] += duration 825 | 826 | self.predicted_seq_info = all_subseq_info 827 | 828 | def sent_ids_to_str(self, sent_ids): 829 | """ 830 | given a list of full indices of sentences, this method creates a string of 831 | the corresponding sentences, separated by newlines 832 | """ 833 | sentences = [self.full_index_to_sentence[sent_id] for sent_id in sent_ids] 834 | out_str = '\n'.join(sentences) + '\n' 835 | return out_str 836 | 837 | def get_summary_sent_indices(self, duration_thresh=1): 838 | if self.durations is None: 839 | raise Exception("you must call predict() before calling assess()") 840 | 841 | summary_sent_indices = [] 842 | for sent_i, duration in enumerate(self.durations): 843 | if duration >= duration_thresh: 844 | summary_sent_indices.append(sent_i) 845 | 846 | return summary_sent_indices 847 | 848 | def get_summary_sent_ids(self, duration_thresh=1): 849 | """ 850 | returns the full indices of the chosen sentences 851 | """ 852 | summary_sent_indices = self.get_summary_sent_indices(duration_thresh) 853 | summary_sent_ids = [self.sentences_full_indices[i] for i in summary_sent_indices] 854 | return summary_sent_ids 855 | 856 | def assess(self, duration_thresh=1): 857 | """ 858 | This function is relevant only for labeled data 859 | duration_thresh: sentences which were included in the predicted path, but with duration less 860 | than duration_thresh, will be excluded from the summary 861 | """ 862 | if not self.cfg.labeled_data_mode: 863 | raise Exception("you can call this function in labeled-data-mode only") 864 | 865 | print("duration_thresh = {}".format(duration_thresh)) 866 | summary_sent_ids = self.get_summary_sent_ids(duration_thresh) 867 | 868 | # print("summary_sent_ids:") 869 | # print(summary_sent_ids) 870 | 871 | num_gt_sentences = len(self.gt_unique_sent_ids) 872 | print("number of unique ground-truth sentences: {}".format(num_gt_sentences)) 873 | 874 | prediction_labels = [sent_id in self.gt_unique_sent_ids for sent_id in summary_sent_ids] 875 | # print("prediction_labels:") 876 | # print(prediction_labels) 877 | 878 | true_positives = sum(prediction_labels) 879 | 880 | precision = true_positives / len(prediction_labels) 881 | recall = true_positives / num_gt_sentences 882 | # avoid division by zero 883 | if precision + recall == 0: 884 | f1 = 0.0 885 | else: 886 | f1 = (2 * precision * recall) / (precision + recall) 887 | 888 | summary_len = len(summary_sent_ids) 889 | 890 | return precision, recall, f1, summary_len 891 | 892 | def get_summary_num_of_sents(self, duration_thresh=1): 893 | summary_sent_indices = self.get_summary_sent_indices(duration_thresh) 894 | summary_num_of_sents = len(summary_sent_indices) 895 | return summary_num_of_sents 896 | 897 | def get_durations_including_excluded_sents(self): 898 | """ 899 | combines zero values into the durations_vector at the locations of the excluded sentences 900 | """ 901 | durations = np.zeros(len(self.durations) + len(self.excluded_sent_indices), dtype=np.int) 902 | idx_reduced = 0 903 | idx_all = 0 904 | 905 | while idx_all < len(durations): 906 | if idx_all not in self.excluded_sent_indices: 907 | durations[idx_all] = self.durations[idx_reduced] 908 | idx_reduced += 1 909 | idx_all += 1 910 | 911 | assert idx_reduced == len(self.durations) 912 | 913 | return durations 914 | 915 | def create_durations_file(self, out_fname): 916 | durations = self.get_durations_including_excluded_sents() 917 | 918 | out_str = '\n'.join([str(val) for val in durations]) + '\n' 919 | 920 | with open(out_fname, 'w') as out_file: 921 | out_file.write(out_str) 922 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | 635 | Copyright (C) 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see . 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | Copyright (C) 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | . 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | . 675 | -------------------------------------------------------------------------------- /Logger.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | 4 | class Logger(object): 5 | """ 6 | This class enables logging to both stdout and text file 7 | """ 8 | def __init__(self, logfile_name, terminal=sys.stdout): 9 | self.terminal = terminal 10 | self.log = open(logfile_name, "w") 11 | 12 | def write(self, message): 13 | self.terminal.write(message) 14 | self.log.write(message) 15 | 16 | def flush(self): 17 | self.terminal.flush() 18 | self.log.flush() 19 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # TalkSumm 2 | 3 | This repository contains a dataset and related code for the ACL 2019 paper *[TalkSumm: A Dataset and Scalable Annotation Method for Scientific Paper Summarization Based on Conference Talks](https://www.aclweb.org/anthology/P19-1204)*. 4 | 5 | The dataset contains 1705 automatically-generated summaries of scientific papers from ACL, NAACL, EMNLP, SIGDIAL (2015-2018), and ICML (2017-2018). 6 | We are not allowed to publish the original papers, however they are publicly available. 7 | For downloading them, refer to *data/talksumm_papers_urls.txt* which contains the papers' titles and URLs, 8 | and to the script *data/get_pdfs.py*, contributed by Tomas Goldsack, which can be used for downloading the pdf files of the papers. 9 | The summaries can be found at *data/talksumm_summaries.zip*. The name of each summary file is the title of the corresponding paper. 10 | 11 | Using our code, you can generate summaries given papers and transcripts of their conference talks. 12 | Below are instructions for running our code. 13 | 14 | If you use this repository, please cite our paper: 15 | ``` 16 | @inproceedings{lev-etal-2019-talksumm, 17 | title = "{T}alk{S}umm: A Dataset and Scalable Annotation Method for Scientific Paper Summarization Based on Conference Talks", 18 | author = "Lev, Guy and 19 | Shmueli-Scheuer, Michal and 20 | Herzig, Jonathan and 21 | Jerbi, Achiya and 22 | Konopnicki, David", 23 | booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", 24 | month = jul, 25 | year = "2019", 26 | address = "Florence, Italy", 27 | publisher = "Association for Computational Linguistics", 28 | url = "https://www.aclweb.org/anthology/P19-1204", 29 | pages = "2125--2131", 30 | abstract = "Currently, no large-scale training data is available for the task of scientific paper summarization. In this paper, we propose a novel method that automatically generates summaries for scientific papers, by utilizing videos of talks at scientific conferences. We hypothesize that such talks constitute a coherent and concise description of the papers{'} content, and can form the basis for good summaries. We collected 1716 papers and their corresponding videos, and created a dataset of paper summaries. A model trained on this dataset achieves similar performance as models trained on a dataset of summaries created manually. In addition, we validated the quality of our summaries by human experts.", 31 | } 32 | ``` 33 | 34 | # Running the TalkSumm Model 35 | 36 | ## Python Environment 37 | We used Python 3.6.8. In requirements.txt you can find the requirements. 38 | 39 | ## Word Embedding 40 | Our model relies on word embedding. We used the pre-trained GloVe 41 | trained on Wikipedia 2014 + Gigaword, available at https://nlp.stanford.edu/projects/glove. 42 | You can download it by calling: 43 | 44 | ``` 45 | wget http://nlp.stanford.edu/data/glove.6B.zip 46 | ``` 47 | 48 | This zip file contains several versions, we used the 300-dimensional one 49 | (glove.6B.300d.txt). It is uncased. 50 | 51 | ## Preparing Input for the TalkSumm Model 52 | For summarizing a paper, the input to the TalkSumm Model is as 53 | follows: (1) A text file containing the paper's sentences, each sentence 54 | in a separate line; (2) Optional text files containing information about 55 | the paper's sections; (3) A text file containing the transcript of the 56 | paper's conference talk. 57 | 58 | ### Preparing Paper Text Files 59 | Given papers in pdf format, we used [science-parse](https://github.com/allenai/science-parse) to convert them to structured json files. 60 | After you prepare the papers' json files in a folder, use *prepare_data_for_hmm.py* to process the json files and create the needed text files representing the papers. 61 | In the folder *example/json* we provide a single json file of a paper. 62 | Processing it can be done by calling: 63 | 64 | ``` 65 | python prepare_data_for_hmm.py --json_folder=example/json --out_folder=example --glove_path=/path/to/glove/embedding 66 | ``` 67 | 68 | *prepare_data_for_hmm.py* creates three folders: 69 | 70 | *text*: Contains the text files of the papers (a sentence per line). 71 | 72 | *sections_info*: Each file in this folder contains the sentence index range (first index and number of sentences) of the Introduction, Related Work, and Acknowledgments sections (in case they are identified) of the corresponding paper. 73 | 74 | *section_per_sent*: Each file in this folder stores, at line *i*, the title of the section to which sentence *i* belongs. 75 | 76 | ### Preparing Transcripts 77 | Additional input to the TalkSumm model is the transcript of the paper's conference talk. 78 | Please refer to our paper where we describe how we prepared the transcript files. 79 | In the folder *example/transcript* we provide a transcript file for the example paper (this transcript file contains multiple lines, but a file containing all text in a single line is fine as well). 80 | 81 | ## Generating Summaries 82 | After preparing the input for the TalkSumm model, you can run it to obtain importance scores for the paper's sentences. Then, creating a summary can be done by taking a subset of top-ranked sentences, up to a desired summary length. 83 | The relevant script is *summarize.py*, which goes over the papers in *data_folder*, and for each paper, it creates a corresponding HMM model, obtains the sentence-scoring, and creates a summary of *num_sents* sentences. Sentences with score less than *thresh* will not be added to the summary, so the resulting summary might contain less than *num_sents* sentences. 84 | Running this script on our example paper can be done as follows: 85 | 86 | ``` 87 | python summarize.py --data_folder=example --out_folder=example/output --word_embed_path=/path/to/glove/embedding --num_sents=30 --thresh=1 88 | ``` 89 | 90 | In case you run this script on a large number of papers, you can reduce execution time by multiprocessing - use the *num_processors* argument to set the desired number of processors (by default it is set to 1). 91 | 92 | Assuming that you use the glove.6B.300d word embedding, the "experiment name" will be *embed_glove.6B.300d*, and a folder of that name will be created for the experiment, under *example/output*. 93 | Under this folder, the following folders will be created, containing the following files: 94 | 95 | *similarity*: A file storing the similarities between each transcript word and each paper sentence. This enables faster re-running of the HMM after changing its parameters in a way that doesn't affect the word-sentence similarities. 96 | 97 | *output/durations*: A file containing the "duration" of each paper sentence, i.e. the number of time-steps in which the sentence was chosen by the Viterbi algorithm. This models the number of words uttered by the speaker to describe the sentence, and can be used as importance score. 98 | 99 | *output/top_scored_sents.num_sents_30_thresh_1*: A file containing the summary of the paper. It consists of the *num_sents* top-ranked sentences, with duration at least *thresh*. The format of this file is as follows: 100 | - Each line contains: sentence index (in original paper), sentence score (i.e. duration), then the sentence itself. The fields are tab-separated. 101 | - The order of the sentences is according to their order in the paper. 102 | 103 | *output/alignment*: A file containing a table showing the alignment between the transcript words and the paper sentences, as obtained by the HMM. The format of this table is the same as Table 4 in the appendix of our paper. 104 | 105 | *output/log*: A log file of the run. 106 | -------------------------------------------------------------------------------- /SummaryCreator.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from nltk.tokenize import word_tokenize 3 | 4 | from ArticleDataSample import ArticleDataSample, CommonSectionNames 5 | 6 | 7 | class SummaryCreator: 8 | """ 9 | Given a data sample of a paper, and the durations (scores) of the paper's sentences, obtained using HmmArticle, 10 | this class enables you to generate summaries, according to different objectives: 11 | - Given a number k, the summary will include to top-k sentences, w.r.t their durations (scores). Relevant 12 | function: create_top_scored_sents_file 13 | - Given desired number of words, the summary will include roughly this number of words (roughly - because 14 | the summary will include whole-sentences only). Relevant function: create_summary_file_by_target 15 | - Given a desired compression ratio, the summary length will be determined accordingly. Here, length means number 16 | of sentences. Relevant function: create_summary_file_by_target 17 | A summary file created by create_top_scored_sents_file contains only the sentences chosen for summary. 18 | A summary file created by create_summary_file_by_target contains all the paper's sentences, where the ones 19 | chosen for summary are marked by the string "@highlight". This is the format of the "CNN / Daily Mail" 20 | summarization benchmark. 21 | """ 22 | def __init__(self, article_data_sample: ArticleDataSample, durations=None, durations_fname=None): 23 | self.article_data_sample = article_data_sample 24 | # get the original article sentences (including related work, with original case) 25 | self.orig_sentences = self.article_data_sample.get_article_sentences_unlabeled(lower_case=False) 26 | 27 | if durations is not None: 28 | self.durations = np.copy(durations) 29 | elif durations_fname: 30 | self.load_durations_file(durations_fname) 31 | else: 32 | self.durations = None 33 | 34 | self.sent_len = self.calc_sent_len() 35 | 36 | def calc_sent_len(self): 37 | sent_len = np.zeros(len(self.orig_sentences), dtype=np.int) 38 | for sent_i, sent in enumerate(self.orig_sentences): 39 | sent_len[sent_i] = len(word_tokenize(sent)) 40 | return sent_len 41 | 42 | def load_durations_file(self, durations_fname): 43 | with open(durations_fname) as in_file: 44 | self.durations = [int(line) for line in in_file] 45 | 46 | def get_summary_sent_indices(self, duration_thresh=1): 47 | if self.durations is None: 48 | raise Exception("self.durations not initialized") 49 | 50 | summary_sent_indices = [] 51 | for sent_i, duration in enumerate(self.durations): 52 | if duration >= duration_thresh: 53 | summary_sent_indices.append(sent_i) 54 | 55 | return summary_sent_indices 56 | 57 | def create_summary_file_by_duration(self, out_fname, duration_thresh=1, exclude_related_work=False): 58 | """ 59 | creates a file in the "CNN / Daily Mail" format: the files contain all paper sentences, where 60 | the summary sentences are highlighted. 61 | exclude_related_work: if True, sentences from Related Work section will not be written to the output file. 62 | """ 63 | summary_sent_indices = self.get_summary_sent_indices(duration_thresh) 64 | if exclude_related_work: 65 | related_work_sent_indices = self.article_data_sample.get_section_sent_indices(CommonSectionNames.RELATED) 66 | ack_sent_indices = self.article_data_sample.get_section_sent_indices(CommonSectionNames.ACK) 67 | excluded_sent_indices = set(related_work_sent_indices + ack_sent_indices) 68 | 69 | else: 70 | excluded_sent_indices = {} 71 | 72 | highlight_str = "@highlight\n" 73 | 74 | with open(out_fname, 'w') as out_file: 75 | for sent_i, sent_str in enumerate(self.orig_sentences): 76 | 77 | if exclude_related_work and sent_i in excluded_sent_indices: 78 | continue 79 | 80 | sent_str += '\n' 81 | out_file.write(sent_str) 82 | 83 | if sent_i in summary_sent_indices: 84 | out_file.write(highlight_str) 85 | out_file.write(sent_str) 86 | 87 | def create_summary_file_by_target(self, out_fname, target_name, target_value, exclude_related_work=False): 88 | """ 89 | target_name: 'compress_ratio' or 'num_words' 90 | """ 91 | n_thresholds = max(self.durations) + 1 92 | 93 | sent_mask_per_thresh = [] 94 | for duration_thresh in range(n_thresholds): 95 | mask = np.asarray([duration >= duration_thresh for duration in self.durations], dtype=np.int) 96 | sent_mask_per_thresh.append(mask) 97 | 98 | if target_name == 'compress_ratio': 99 | num_summary_sents = np.zeros(n_thresholds, dtype=np.int) 100 | for duration_thresh in range(n_thresholds): 101 | num_summary_sents[duration_thresh] = sum(sent_mask_per_thresh[duration_thresh]) 102 | orig_num_of_article_sentences = len(self.orig_sentences) 103 | compress_ratios = num_summary_sents / orig_num_of_article_sentences 104 | result_per_thresh = compress_ratios 105 | 106 | elif target_name == 'num_words': 107 | num_summary_words = np.zeros(n_thresholds, dtype=np.int) 108 | for duration_thresh in range(n_thresholds): 109 | num_summary_words[duration_thresh] = sum(sent_mask_per_thresh[duration_thresh] * self.sent_len) 110 | result_per_thresh = num_summary_words 111 | 112 | else: 113 | raise Exception("unexpected target_name") 114 | 115 | deltas = np.abs(result_per_thresh - target_value) 116 | 117 | chosen_duration_thresh = np.argmin(deltas) 118 | obtained_value = result_per_thresh[chosen_duration_thresh] 119 | 120 | self.create_summary_file_by_duration(out_fname, chosen_duration_thresh, exclude_related_work) 121 | 122 | return chosen_duration_thresh, obtained_value 123 | 124 | def create_scored_sents_in_sections_file(self, out_fname): 125 | section_per_sent = self.article_data_sample.get_section_per_sent() 126 | assert len(section_per_sent) == len(self.orig_sentences) 127 | 128 | sum_durations = sum(self.durations) 129 | 130 | cur_section = None 131 | 132 | with open(out_fname, 'w') as out_file: 133 | for sent_i, section_name in enumerate(section_per_sent): 134 | if section_name != cur_section: 135 | cur_section = section_name 136 | out_file.write("--- {}\n".format(cur_section)) 137 | out_file.write("{}\t{}\t{:.2f}\t{}\n".format(sent_i, 138 | self.durations[sent_i], 139 | self.durations[sent_i] / sum_durations, 140 | self.orig_sentences[sent_i])) 141 | 142 | def create_top_scored_sents_file(self, desired_num_sents, duration_thresh, out_fname): 143 | """ 144 | sentences will be retrieved only if their duration is at least duration_thresh, which means that the number 145 | of retrieved sentences might be smaller than desired_num_sents 146 | """ 147 | # scores = np.array(self.durations) / sum(self.durations) 148 | scores = np.array(self.durations) 149 | num_eligible_sents = np.sum(scores >= duration_thresh) 150 | num_retrieved_sents = min(desired_num_sents, num_eligible_sents) 151 | sorted_indices = np.flip(np.argsort(scores)) 152 | top_score_indices = sorted_indices[:num_retrieved_sents] 153 | top_score_indices_orig_order = np.sort(top_score_indices) 154 | 155 | with open(out_fname, 'w') as out_file: 156 | for sent_i in top_score_indices_orig_order: 157 | out_file.write("{}\t{}\t{}\n".format(sent_i, 158 | scores[sent_i], 159 | self.orig_sentences[sent_i])) 160 | 161 | return num_retrieved_sents 162 | -------------------------------------------------------------------------------- /data/LICENSE: -------------------------------------------------------------------------------- 1 | Attribution-NonCommercial-ShareAlike 4.0 International 2 | 3 | ======================================================================= 4 | 5 | Creative Commons Corporation ("Creative Commons") is not a law firm and 6 | does not provide legal services or legal advice. Distribution of 7 | Creative Commons public licenses does not create a lawyer-client or 8 | other relationship. Creative Commons makes its licenses and related 9 | information available on an "as-is" basis. Creative Commons gives no 10 | warranties regarding its licenses, any material licensed under their 11 | terms and conditions, or any related information. Creative Commons 12 | disclaims all liability for damages resulting from their use to the 13 | fullest extent possible. 14 | 15 | Using Creative Commons Public Licenses 16 | 17 | Creative Commons public licenses provide a standard set of terms and 18 | conditions that creators and other rights holders may use to share 19 | original works of authorship and other material subject to copyright 20 | and certain other rights specified in the public license below. The 21 | following considerations are for informational purposes only, are not 22 | exhaustive, and do not form part of our licenses. 23 | 24 | Considerations for licensors: Our public licenses are 25 | intended for use by those authorized to give the public 26 | permission to use material in ways otherwise restricted by 27 | copyright and certain other rights. Our licenses are 28 | irrevocable. Licensors should read and understand the terms 29 | and conditions of the license they choose before applying it. 30 | Licensors should also secure all rights necessary before 31 | applying our licenses so that the public can reuse the 32 | material as expected. Licensors should clearly mark any 33 | material not subject to the license. This includes other CC- 34 | licensed material, or material used under an exception or 35 | limitation to copyright. More considerations for licensors: 36 | wiki.creativecommons.org/Considerations_for_licensors 37 | 38 | Considerations for the public: By using one of our public 39 | licenses, a licensor grants the public permission to use the 40 | licensed material under specified terms and conditions. If 41 | the licensor's permission is not necessary for any reason--for 42 | example, because of any applicable exception or limitation to 43 | copyright--then that use is not regulated by the license. Our 44 | licenses grant only permissions under copyright and certain 45 | other rights that a licensor has authority to grant. Use of 46 | the licensed material may still be restricted for other 47 | reasons, including because others have copyright or other 48 | rights in the material. A licensor may make special requests, 49 | such as asking that all changes be marked or described. 50 | Although not required by our licenses, you are encouraged to 51 | respect those requests where reasonable. More considerations 52 | for the public: 53 | wiki.creativecommons.org/Considerations_for_licensees 54 | 55 | ======================================================================= 56 | 57 | Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International 58 | Public License 59 | 60 | By exercising the Licensed Rights (defined below), You accept and agree 61 | to be bound by the terms and conditions of this Creative Commons 62 | Attribution-NonCommercial-ShareAlike 4.0 International Public License 63 | ("Public License"). To the extent this Public License may be 64 | interpreted as a contract, You are granted the Licensed Rights in 65 | consideration of Your acceptance of these terms and conditions, and the 66 | Licensor grants You such rights in consideration of benefits the 67 | Licensor receives from making the Licensed Material available under 68 | these terms and conditions. 69 | 70 | 71 | Section 1 -- Definitions. 72 | 73 | a. Adapted Material means material subject to Copyright and Similar 74 | Rights that is derived from or based upon the Licensed Material 75 | and in which the Licensed Material is translated, altered, 76 | arranged, transformed, or otherwise modified in a manner requiring 77 | permission under the Copyright and Similar Rights held by the 78 | Licensor. For purposes of this Public License, where the Licensed 79 | Material is a musical work, performance, or sound recording, 80 | Adapted Material is always produced where the Licensed Material is 81 | synched in timed relation with a moving image. 82 | 83 | b. Adapter's License means the license You apply to Your Copyright 84 | and Similar Rights in Your contributions to Adapted Material in 85 | accordance with the terms and conditions of this Public License. 86 | 87 | c. BY-NC-SA Compatible License means a license listed at 88 | creativecommons.org/compatiblelicenses, approved by Creative 89 | Commons as essentially the equivalent of this Public License. 90 | 91 | d. Copyright and Similar Rights means copyright and/or similar rights 92 | closely related to copyright including, without limitation, 93 | performance, broadcast, sound recording, and Sui Generis Database 94 | Rights, without regard to how the rights are labeled or 95 | categorized. For purposes of this Public License, the rights 96 | specified in Section 2(b)(1)-(2) are not Copyright and Similar 97 | Rights. 98 | 99 | e. Effective Technological Measures means those measures that, in the 100 | absence of proper authority, may not be circumvented under laws 101 | fulfilling obligations under Article 11 of the WIPO Copyright 102 | Treaty adopted on December 20, 1996, and/or similar international 103 | agreements. 104 | 105 | f. Exceptions and Limitations means fair use, fair dealing, and/or 106 | any other exception or limitation to Copyright and Similar Rights 107 | that applies to Your use of the Licensed Material. 108 | 109 | g. License Elements means the license attributes listed in the name 110 | of a Creative Commons Public License. The License Elements of this 111 | Public License are Attribution, NonCommercial, and ShareAlike. 112 | 113 | h. Licensed Material means the artistic or literary work, database, 114 | or other material to which the Licensor applied this Public 115 | License. 116 | 117 | i. Licensed Rights means the rights granted to You subject to the 118 | terms and conditions of this Public License, which are limited to 119 | all Copyright and Similar Rights that apply to Your use of the 120 | Licensed Material and that the Licensor has authority to license. 121 | 122 | j. Licensor means the individual(s) or entity(ies) granting rights 123 | under this Public License. 124 | 125 | k. NonCommercial means not primarily intended for or directed towards 126 | commercial advantage or monetary compensation. For purposes of 127 | this Public License, the exchange of the Licensed Material for 128 | other material subject to Copyright and Similar Rights by digital 129 | file-sharing or similar means is NonCommercial provided there is 130 | no payment of monetary compensation in connection with the 131 | exchange. 132 | 133 | l. Share means to provide material to the public by any means or 134 | process that requires permission under the Licensed Rights, such 135 | as reproduction, public display, public performance, distribution, 136 | dissemination, communication, or importation, and to make material 137 | available to the public including in ways that members of the 138 | public may access the material from a place and at a time 139 | individually chosen by them. 140 | 141 | m. Sui Generis Database Rights means rights other than copyright 142 | resulting from Directive 96/9/EC of the European Parliament and of 143 | the Council of 11 March 1996 on the legal protection of databases, 144 | as amended and/or succeeded, as well as other essentially 145 | equivalent rights anywhere in the world. 146 | 147 | n. You means the individual or entity exercising the Licensed Rights 148 | under this Public License. Your has a corresponding meaning. 149 | 150 | 151 | Section 2 -- Scope. 152 | 153 | a. License grant. 154 | 155 | 1. Subject to the terms and conditions of this Public License, 156 | the Licensor hereby grants You a worldwide, royalty-free, 157 | non-sublicensable, non-exclusive, irrevocable license to 158 | exercise the Licensed Rights in the Licensed Material to: 159 | 160 | a. reproduce and Share the Licensed Material, in whole or 161 | in part, for NonCommercial purposes only; and 162 | 163 | b. produce, reproduce, and Share Adapted Material for 164 | NonCommercial purposes only. 165 | 166 | 2. Exceptions and Limitations. For the avoidance of doubt, where 167 | Exceptions and Limitations apply to Your use, this Public 168 | License does not apply, and You do not need to comply with 169 | its terms and conditions. 170 | 171 | 3. Term. The term of this Public License is specified in Section 172 | 6(a). 173 | 174 | 4. Media and formats; technical modifications allowed. The 175 | Licensor authorizes You to exercise the Licensed Rights in 176 | all media and formats whether now known or hereafter created, 177 | and to make technical modifications necessary to do so. The 178 | Licensor waives and/or agrees not to assert any right or 179 | authority to forbid You from making technical modifications 180 | necessary to exercise the Licensed Rights, including 181 | technical modifications necessary to circumvent Effective 182 | Technological Measures. For purposes of this Public License, 183 | simply making modifications authorized by this Section 2(a) 184 | (4) never produces Adapted Material. 185 | 186 | 5. Downstream recipients. 187 | 188 | a. Offer from the Licensor -- Licensed Material. Every 189 | recipient of the Licensed Material automatically 190 | receives an offer from the Licensor to exercise the 191 | Licensed Rights under the terms and conditions of this 192 | Public License. 193 | 194 | b. Additional offer from the Licensor -- Adapted Material. 195 | Every recipient of Adapted Material from You 196 | automatically receives an offer from the Licensor to 197 | exercise the Licensed Rights in the Adapted Material 198 | under the conditions of the Adapter's License You apply. 199 | 200 | c. No downstream restrictions. You may not offer or impose 201 | any additional or different terms or conditions on, or 202 | apply any Effective Technological Measures to, the 203 | Licensed Material if doing so restricts exercise of the 204 | Licensed Rights by any recipient of the Licensed 205 | Material. 206 | 207 | 6. No endorsement. Nothing in this Public License constitutes or 208 | may be construed as permission to assert or imply that You 209 | are, or that Your use of the Licensed Material is, connected 210 | with, or sponsored, endorsed, or granted official status by, 211 | the Licensor or others designated to receive attribution as 212 | provided in Section 3(a)(1)(A)(i). 213 | 214 | b. Other rights. 215 | 216 | 1. Moral rights, such as the right of integrity, are not 217 | licensed under this Public License, nor are publicity, 218 | privacy, and/or other similar personality rights; however, to 219 | the extent possible, the Licensor waives and/or agrees not to 220 | assert any such rights held by the Licensor to the limited 221 | extent necessary to allow You to exercise the Licensed 222 | Rights, but not otherwise. 223 | 224 | 2. Patent and trademark rights are not licensed under this 225 | Public License. 226 | 227 | 3. To the extent possible, the Licensor waives any right to 228 | collect royalties from You for the exercise of the Licensed 229 | Rights, whether directly or through a collecting society 230 | under any voluntary or waivable statutory or compulsory 231 | licensing scheme. In all other cases the Licensor expressly 232 | reserves any right to collect such royalties, including when 233 | the Licensed Material is used other than for NonCommercial 234 | purposes. 235 | 236 | 237 | Section 3 -- License Conditions. 238 | 239 | Your exercise of the Licensed Rights is expressly made subject to the 240 | following conditions. 241 | 242 | a. Attribution. 243 | 244 | 1. If You Share the Licensed Material (including in modified 245 | form), You must: 246 | 247 | a. retain the following if it is supplied by the Licensor 248 | with the Licensed Material: 249 | 250 | i. identification of the creator(s) of the Licensed 251 | Material and any others designated to receive 252 | attribution, in any reasonable manner requested by 253 | the Licensor (including by pseudonym if 254 | designated); 255 | 256 | ii. a copyright notice; 257 | 258 | iii. a notice that refers to this Public License; 259 | 260 | iv. a notice that refers to the disclaimer of 261 | warranties; 262 | 263 | v. a URI or hyperlink to the Licensed Material to the 264 | extent reasonably practicable; 265 | 266 | b. indicate if You modified the Licensed Material and 267 | retain an indication of any previous modifications; and 268 | 269 | c. indicate the Licensed Material is licensed under this 270 | Public License, and include the text of, or the URI or 271 | hyperlink to, this Public License. 272 | 273 | 2. You may satisfy the conditions in Section 3(a)(1) in any 274 | reasonable manner based on the medium, means, and context in 275 | which You Share the Licensed Material. For example, it may be 276 | reasonable to satisfy the conditions by providing a URI or 277 | hyperlink to a resource that includes the required 278 | information. 279 | 3. If requested by the Licensor, You must remove any of the 280 | information required by Section 3(a)(1)(A) to the extent 281 | reasonably practicable. 282 | 283 | b. ShareAlike. 284 | 285 | In addition to the conditions in Section 3(a), if You Share 286 | Adapted Material You produce, the following conditions also apply. 287 | 288 | 1. The Adapter's License You apply must be a Creative Commons 289 | license with the same License Elements, this version or 290 | later, or a BY-NC-SA Compatible License. 291 | 292 | 2. You must include the text of, or the URI or hyperlink to, the 293 | Adapter's License You apply. You may satisfy this condition 294 | in any reasonable manner based on the medium, means, and 295 | context in which You Share Adapted Material. 296 | 297 | 3. You may not offer or impose any additional or different terms 298 | or conditions on, or apply any Effective Technological 299 | Measures to, Adapted Material that restrict exercise of the 300 | rights granted under the Adapter's License You apply. 301 | 302 | 303 | Section 4 -- Sui Generis Database Rights. 304 | 305 | Where the Licensed Rights include Sui Generis Database Rights that 306 | apply to Your use of the Licensed Material: 307 | 308 | a. for the avoidance of doubt, Section 2(a)(1) grants You the right 309 | to extract, reuse, reproduce, and Share all or a substantial 310 | portion of the contents of the database for NonCommercial purposes 311 | only; 312 | 313 | b. if You include all or a substantial portion of the database 314 | contents in a database in which You have Sui Generis Database 315 | Rights, then the database in which You have Sui Generis Database 316 | Rights (but not its individual contents) is Adapted Material, 317 | including for purposes of Section 3(b); and 318 | 319 | c. You must comply with the conditions in Section 3(a) if You Share 320 | all or a substantial portion of the contents of the database. 321 | 322 | For the avoidance of doubt, this Section 4 supplements and does not 323 | replace Your obligations under this Public License where the Licensed 324 | Rights include other Copyright and Similar Rights. 325 | 326 | 327 | Section 5 -- Disclaimer of Warranties and Limitation of Liability. 328 | 329 | a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE 330 | EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS 331 | AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF 332 | ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, 333 | IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, 334 | WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR 335 | PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, 336 | ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT 337 | KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT 338 | ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. 339 | 340 | b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE 341 | TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, 342 | NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, 343 | INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, 344 | COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR 345 | USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN 346 | ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR 347 | DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR 348 | IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. 349 | 350 | c. The disclaimer of warranties and limitation of liability provided 351 | above shall be interpreted in a manner that, to the extent 352 | possible, most closely approximates an absolute disclaimer and 353 | waiver of all liability. 354 | 355 | 356 | Section 6 -- Term and Termination. 357 | 358 | a. This Public License applies for the term of the Copyright and 359 | Similar Rights licensed here. However, if You fail to comply with 360 | this Public License, then Your rights under this Public License 361 | terminate automatically. 362 | 363 | b. Where Your right to use the Licensed Material has terminated under 364 | Section 6(a), it reinstates: 365 | 366 | 1. automatically as of the date the violation is cured, provided 367 | it is cured within 30 days of Your discovery of the 368 | violation; or 369 | 370 | 2. upon express reinstatement by the Licensor. 371 | 372 | For the avoidance of doubt, this Section 6(b) does not affect any 373 | right the Licensor may have to seek remedies for Your violations 374 | of this Public License. 375 | 376 | c. For the avoidance of doubt, the Licensor may also offer the 377 | Licensed Material under separate terms or conditions or stop 378 | distributing the Licensed Material at any time; however, doing so 379 | will not terminate this Public License. 380 | 381 | d. Sections 1, 5, 6, 7, and 8 survive termination of this Public 382 | License. 383 | 384 | 385 | Section 7 -- Other Terms and Conditions. 386 | 387 | a. The Licensor shall not be bound by any additional or different 388 | terms or conditions communicated by You unless expressly agreed. 389 | 390 | b. Any arrangements, understandings, or agreements regarding the 391 | Licensed Material not stated herein are separate from and 392 | independent of the terms and conditions of this Public License. 393 | 394 | 395 | Section 8 -- Interpretation. 396 | 397 | a. For the avoidance of doubt, this Public License does not, and 398 | shall not be interpreted to, reduce, limit, restrict, or impose 399 | conditions on any use of the Licensed Material that could lawfully 400 | be made without permission under this Public License. 401 | 402 | b. To the extent possible, if any provision of this Public License is 403 | deemed unenforceable, it shall be automatically reformed to the 404 | minimum extent necessary to make it enforceable. If the provision 405 | cannot be reformed, it shall be severed from this Public License 406 | without affecting the enforceability of the remaining terms and 407 | conditions. 408 | 409 | c. No term or condition of this Public License will be waived and no 410 | failure to comply consented to unless expressly agreed to by the 411 | Licensor. 412 | 413 | d. Nothing in this Public License constitutes or may be interpreted 414 | as a limitation upon, or waiver of, any privileges and immunities 415 | that apply to the Licensor or You, including from the legal 416 | processes of any jurisdiction or authority. 417 | 418 | ======================================================================= 419 | 420 | Creative Commons is not a party to its public 421 | licenses. Notwithstanding, Creative Commons may elect to apply one of 422 | its public licenses to material it publishes and in those instances 423 | will be considered the “Licensor.” The text of the Creative Commons 424 | public licenses is dedicated to the public domain under the CC0 Public 425 | Domain Dedication. Except for the limited purpose of indicating that 426 | material is shared under a Creative Commons public license or as 427 | otherwise permitted by the Creative Commons policies published at 428 | creativecommons.org/policies, Creative Commons does not authorize the 429 | use of the trademark "Creative Commons" or any other trademark or logo 430 | of Creative Commons without its prior written consent including, 431 | without limitation, in connection with any unauthorized modifications 432 | to any of its public licenses or any other arrangements, 433 | understandings, or agreements concerning use of licensed material. For 434 | the avoidance of doubt, this paragraph does not form part of the 435 | public licenses. 436 | 437 | Creative Commons may be contacted at creativecommons.org. 438 | 439 | -------------------------------------------------------------------------------- /data/get_pdfs.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Retrieves the pdf files from the URLs provided in 3 | /data/talksumm_papers_urls.txt 4 | Author: Tomas Goldsack 5 | ''' 6 | 7 | import urllib.request 8 | from time import sleep 9 | import numpy as np 10 | import re, logging, os, requests 11 | from datetime import datetime 12 | 13 | ACL_BASE_URL = "https://aclanthology.org/" 14 | ARXIV_BASE_URL = "https://arxiv.org/pdf/" 15 | 16 | if not os.path.exists("./logs"): 17 | os.makedirs("./logs") 18 | 19 | if not os.path.exists("./pdf"): 20 | os.makedirs("./pdf") 21 | downloaded_titles = [] 22 | else: 23 | downloaded_titles = [fpath.split("/")[-1].split(".")[0] for fpath in os.listdir("./pdf")] 24 | 25 | logging.basicConfig( 26 | filename="./logs/get_pdfs.log.{}".format(datetime.timestamp(datetime.now())), 27 | level=logging.INFO, 28 | format = '%(asctime)s | %(levelname)s | %(message)s' 29 | ) 30 | 31 | def download_file(download_url, filename): 32 | '''Downloads and saves the pdf file''' 33 | response = urllib.request.urlopen(download_url) 34 | file = open(filename + ".pdf", 'wb') 35 | file.write(response.read()) 36 | file.close() 37 | 38 | 39 | def get_pdf_links(response): 40 | '''Retrieves PDF URLs from response.text''' 41 | regex = re.compile('(http)(?!.*(http))(.*?)(\.pdf)') 42 | matches = list(set(["".join(link) for link in regex.findall(response.text)])) 43 | return matches 44 | 45 | 46 | def multiple_links_handler(pdf_urls): 47 | '''Gets PDF URLs for specific sites where the generic method finds more than 1 URL''' 48 | pdf_urls = [link for link in pdf_urls if "-supp" not in link] 49 | 50 | if len(pdf_urls) > 1: 51 | springer_urls = [link for link in pdf_urls if "link.springer.com" in link] 52 | pdf_urls = springer_urls if len(springer_urls) > 0 else pdf_urls 53 | 54 | return pdf_urls 55 | 56 | 57 | def no_links_handler(response, url): 58 | '''Gets PDF URLs for specific sites where the generic method finds no URLs''' 59 | 60 | if "aclweb" in url or "aclweb" in response.url: 61 | # Retrieve ACL code from original URL (uppercase) 62 | idx = -2 if url.endswith("/") else -1 63 | acl_code = url.split("/")[idx].upper() 64 | # PDF URL format for ACL papers 65 | return [ACL_BASE_URL + acl_code + ".pdf"] 66 | 67 | if "arxiv" in url or "arxiv" in response.url: 68 | idx = -2 if url.endswith("/") else -1 69 | arxiv_code = url.split("/")[idx] 70 | # PDF URL format for ARXIV papers 71 | return [ARXIV_BASE_URL + arxiv_code + ".pdf"] 72 | 73 | if "openreview" in url or "openreview" in response.url: 74 | openrv_url = url if "openreview" in url else response.url 75 | return [openrv_url.replace("forum", "pdf")] 76 | 77 | if "transacl" in url or "transacl" in response.url: 78 | tacl_url = url if "transacl" in url else response.url 79 | tacl_regex = re.compile('(http)(?!.*(http))(.*?)(\/tacl\/article\/view\/[[0-9]+\/[[0-9]+)') 80 | view_urls = list(set(["".join(link) for link in tacl_regex.findall(response.text)])) 81 | view_urls = [link for link in view_urls if not link.endswith("/0")] 82 | 83 | if len(view_urls) > 1: 84 | # Has more than 1 full text link, take the one with the lowest suffix id number 85 | # (this is consistenly the desired TACL site URL) 86 | suffixes = [int(link.split("/")[-1]) for link in view_urls] 87 | min_ind = suffixes.index(min(suffixes)) 88 | view_urls = [view_urls[min_ind]] 89 | 90 | if (len(view_urls) == 1): 91 | return [view_urls[0].replace("view", "download")] 92 | else: 93 | return view_urls 94 | 95 | if ("iaaa" in url or "iaaa" in response.url) or ( 96 | "aaai" in url or "aaai" in response.url 97 | ): 98 | iaaa_url = url if ("iaaa" in url or "iaaa" in url) else response.url 99 | iaaa_regex = re.compile('(http)(?!.*(http))(.*?)(\/paper\/view\/[[0-9]+\/[[0-9]+)') 100 | view_urls = list(set(["".join(link) for link in tacl_regex.findall(response.text)])) 101 | return view_urls 102 | 103 | if "mdpi" in url or "mdpi" in response.url: 104 | mdpi_url = url if "mdpi" in url else response.url 105 | mdpi_regex = re.compile('(.*?)(\/[[0-9]+\/[[0-9]+\/[[0-9]+\/pdf)') 106 | view_urls = list(set(["".join(link) for link in tacl_regex.findall(response.text)])) 107 | return view_urls 108 | 109 | if "ceur-ws" in url or "ceur-ws" in response.url: 110 | ceur_url = url if "ceur-ws" in url else response.url 111 | idx = -2 if url.endswith("/") else -1 112 | return [ceur_url + ceur_url.split("/")[idx] + ".pdf"] 113 | 114 | if "isca-speech" in url or "isca-speech" in response.url: 115 | isca_url = url if "isca-speech" in url else response.url 116 | isca_url = isca_url.replace("abstracts", "pdfs") 117 | return [isca_url.replace(".html", ".PDF")] 118 | 119 | return [] 120 | 121 | failed_titles = [] 122 | with open("./talksumm_papers_urls.txt", "r") as input_txt: 123 | for line in input_txt.readlines(): 124 | try: 125 | title, url = line.rstrip().split("\t") 126 | logging.info(f'Processing "{title}"') 127 | 128 | if title in downloaded_titles: 129 | logging.info(f'Already downloaded "{title}"') 130 | continue 131 | 132 | # Sleep to prevent connection reset 133 | sleep(np.random.randint(1, 10)) 134 | 135 | # Make request to given URL 136 | response = requests.get(url, allow_redirects=True) 137 | 138 | #Retrieve URLs to PDFs from response 139 | pdf_links = get_pdf_links(response) 140 | 141 | # Handle too many/too few links 142 | if len(pdf_links) > 1: 143 | logging.warning(f'Too many PDF URLs found: {pdf_links}') 144 | pdf_links = multiple_links_handler(pdf_links) 145 | 146 | if len(pdf_links) < 1: 147 | logging.warning(f'No PDF links found on "{url}"') 148 | pdf_links = no_links_handler(response, url) 149 | 150 | if len(pdf_links) == 1: 151 | logging.info(f'Got a single PDF URL: "{pdf_links[0]}"') 152 | download_url = pdf_links[0] 153 | elif url.endswith(".pdf"): # three provided URLs are PDF links 154 | download_url = url 155 | else: 156 | failed_titles.append((title, url)) 157 | raise Exception(f'Got {len(pdf_links)} PDF URLs ({pdf_links})') 158 | 159 | # Download PDF 160 | download_file(download_url, "./pdf/" + title) 161 | logging.info(f'Successfully retrieved PDF') 162 | 163 | except Exception as e: 164 | logging.error(f'Failed to retrieve PDF: {e}') 165 | 166 | logging.info('Finished processing paper titles') 167 | logging.warning(f'Failed to retrieve {len(failed_titles)} PDFs') 168 | logging.warning(f'Failed titles and urls: {failed_titles}') -------------------------------------------------------------------------------- /data/talksumm_summaries.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levguy/talksumm/e7b5f54248407c08e52164d32663ba13ba2971ea/data/talksumm_summaries.zip -------------------------------------------------------------------------------- /example/json/Detecting Egregious Conversations between Customers and Virtual Agents.json: -------------------------------------------------------------------------------- 1 | { 2 | "source" : "META", 3 | "title" : "Detecting Egregious Conversations between Customers and Virtual Agents", 4 | "authors" : [ "Tommy Sandbank", "Michal Shmueli-Scheuer", "Jonathan Herzig", "David Konopnicki", "John Richards", "David Piorkowski" ], 5 | "emails" : [ "tommy@il.ibm.com", "shmueli@il.ibm.com", "hjon@il.ibm.com", "davidko@il.ibm.com", "ajtr@us.ibm.com,", "david.piorkowski@ibm.com" ], 6 | "sections" : [ { 7 | "heading" : null, 8 | "text" : "Proceedings of NAACL-HLT 2018, pages 1802–1811 New Orleans, Louisiana, June 1 - 6, 2018. c©2018 Association for Computational Linguistics" 9 | }, { 10 | "heading" : "1 Introduction", 11 | "text" : "Automated conversational agents (chatbots) are becoming widely used for various tasks such as personal assistants or as customer service agents. Recent studies project that 80% of businesses plan to use chatbots by 20201, and that chatbots will power 85% of customer service interactions by the year 20202. This increasing usage is mainly due to advances in artificial intelligence and natural language processing (Hirschberg and Manning, 2015)\n1http://read.bi/2gU0szG 2http://gtnr.it/2z428RS\nalong with increasingly capable chat development environments, leading to improvements in conversational richness and robustness.\nStill, chatbots may behave extremely badly, leading to conversations so off-the-mark that only a human agent could step in and salvage them. Consequences of these failures may include loss of customer goodwill and associated revenue, and even exposure to litigation if the failures can be shown to include fraudulent claims. Due to the increasing prevalence of chatbots, even a small fraction of such egregious3 conversations could be problematic for the companies deploying chatbots and the providers of chatbot services.\nIn this paper we study detecting these egregious conversations that can arise in numerous ways. For example, incomplete or internally inconsistent training data can lead to false classification of user intent. Bugs in dialog descriptions can lead to dead ends. Failure to maintain adequate context can cause chatbots to miss anaphoric references. In the extreme case, malicious actors may provide heavily biased (e.g., the Tay chatbot4) or even hacked misbehaviors.\nIn this article, we focus on customer care systems. In such setting, a conversation usually becomes egregious due to a combination of the aforementioned problems. The resulting customer frustration may not surface in easily detectable ways such as the appearance of all caps, shouting to a speech recognizer, or the use of profanity or extreme punctuation. Consequently, the chatbot will continue as if the conversation is proceeding well, usually\n3Defined by the dictionary as outstandingly bad. 4http://bit.ly/2fwYaa5\n1802\nleading to conversational breakdown. Consider, for example, the anonymized but representative conversation depicted in Figure 1. Here the customer aims to understand the details of a flight ticket. In the first two turns, the chatbot misses the customer’s intentions, which leads to the customer asking “Are you a real person?”. The customer then tries to explain what went wrong, but the chatbot has insufficient exposure to this sort of utterance to provide anything but the default response (“I’m not trained on that”). The response seems to upset the customer and leads to a request for a human agent, which is rejected by the system (“We don’t currently have live agents”). Such rejection along with the previous responses could lead to customer frustration (Amsel, 1992).\nBeing able to automatically detect such conversations, either in real time or through log analysis, could help to improve chatbot quality. If detected in real time, a human agent can be pulled in to salvage the conversation. As an aid to chatbot improvement, analysis of egregious conversations can often point to problems in training data or system logic that can be repaired. While it is possible to scan system logs by eye, the sheer volume of conversations may overwhelm the analyst or lead to random sampling that misses important failures. If, though, we can automatically detect the worst conversations (in our experience, typically under 10% of the total),\nthe focus can be on fixing the worst problems. Our goal in this paper is to study conversational features that lead to egregious conversations. Specifically, we consider customer inputs throughout a whole conversation, and detect cues such as rephrasing, the presence of heightened emotions, and queries about whether the chatbot is a human or requests to speak to an actual human. In addition, we analyze the chatbot responses, looking for repetitions (e.g. from loops that might be due to flow problems), and the presence of ”not trained” responses. Finally, we analyze the larger conversational context exploring, for example, where the presence of a ”not trained” response might be especially problematic (e.g., in the presence of strong customer emotion).\nThe main contributions of this paper are twofold: (1) This is the first research focusing on detecting egregious conversations in conversational agent (chatbot) setting and (2) this is the first research using unique agent, customer, and customer-agent interaction features to detect egregiousness.\nThe rest of this paper is organized as follows. We review related work, then we formally define the methodology for detecting egregious conversations. We describe our data, experimental setting, and results. We then conclude and suggest future directions." 12 | }, { 13 | "heading" : "2 Related Work", 14 | "text" : "Detecting egregious conversations is a new task, however, there is related work that aim at measuring the general quality of the interactions in conversational systems. These works studied the complementary problem of detecting and measuring user satisfaction and engagement. Early work by (Walker et al., 1997, 2001) discussed a framework that maximizes the user satisfaction by considering measures such as number of inappropriate utterances, recognition rates, number of times user requests repetitions, number of turns per interaction, etc. Shortcomings of this approach are discussed by (Hajdinjak and Mihelic, 2006). Other works focus on predicting the user engagement in such systems. Examples include (Kiseleva et al., 2016b,a; Jiang et al., 2015). Specifically, these\nworks evaluated chat functionality by asking users to make conversations with an intelligent agent and measured the user satisfaction along with other features such as the automatic speech recognition (ASR) quality and intent classification quality. In (Sandbank et al., 2017) the authors presented a conversational system enhanced with emotion analysis, and suggested using emotions as triggers for human escalation. In our work, we likewise use emotion analysis as predictive features for egregious conversation. The works of (Sarikaya, 2017; Sano et al., 2017) studied reasons why users reformulated utterances in such systems. Specifically, in (Sarikaya, 2017) they reported on how the different reasons affect the users’ satisfaction. In (Sano et al., 2017) they focused on how to automatically predict the reason for user’s dissatisfaction using different features. Our work also explores user reformulation (or rephrasing) as one of the features to predict egregious conversations. We build on the previous work by leveraging some of the approaches in our classifier for egregious conversations. In (Walker et al., 2000; Hastie et al., 2002) the authors also looked for problems in a specific setting of spoken conversations. The main difference with our work is that we focus on chat logs for domains for which the expected user utterances are a bit more diverse, using interaction features as well as features that are not sensitive to any architectural aspects of the conversational system (e.g., ASR component). Several other approaches for evaluating chatbot conversations indirectly capture the notion of conversational quality. For example, several prior works borrowed from the field of pragmatics in various metrics around the principles of cooperative conversation (Chakrabarti and Luger, 2013; Saygin A. P., 2002). In (Steidl et al., 2004) they measured dialogue success at the turn level as a way of predicting the success of a conversation as a whole. (Webb et al., 2010) created a measure of dialogue appropriateness to determine its role in maintaining a conversation. Recently, (Liu et al., 2016) evaluated a number of popular measures for dialogue response generation systems and highlighted specific weaknesses in the measures. Simi-\nlarly, in (Sebastian et al., 2009) they developed a taxonomy of available measures for an enduser’s quality of experience for multimodel dialogue systems, some of which touch on conversational quality. All these measures may serve as reasons for a conversation turning egregious, but none try to capture or predict it directly.\nIn the domain of customer service, researchers mainly studied reasons for failure of such systems along with suggestions for improved design (Mimoun et al., 2012; Gnewuch et al., 2017). In (Mimoun et al., 2012) the authors analyzed reasons sales chatbots fail by interviewing chatbots experts. They found that a combination of exaggerated customer expectations along with a reduction in agent performance (e.g., failure to listen to the consumer, being too intrusive) caused customers to stop using such systems. Based on this qualitative study, they proposed an improved model for sales chatbots. In (Gnewuch et al., 2017) they studied service quality dimensions (i.e., reliability, empathy, responsiveness, and tangibility) and how to apply them during agent design. The main difference between those works and ours is that they focus on qualitative high-level analysis while we focus on automatic detection based on the conversations logs." 15 | }, { 16 | "heading" : "3 Methodology", 17 | "text" : "The objective of this work is to reliably detect egregious conversations between a human and a virtual agent. We treat this as a binary classification task, where the target classes are “egregious” and “non-egregious”. While we are currently applying this to complete conversations (i.e., the classification is done on the whole conversation), some of the features examined here could likely be used to detect egregious conversations as they were unfolding in real time. To perform egregious conversation detection, features from both customer inputs and agent responses are extracted, together with features related to the combination of specific inputs and responses. In addition, some of these features are contextual, meaning that they are dependent on where in the conversation they appear.\nUsing this set of features for detecting egre-\ngious conversations is novel, and as our experimental results show, improves performance compared to a model based solely on features extracted from the conversation’s text. We now describe the agent, customer, and combined customer-agent features." 18 | }, { 19 | "heading" : "3.1 Agent Response Features", 20 | "text" : "A virtual agent is generally expected to closely simulate interactions with a human operator (Reeves and Nass, 1996; Nass and Moon,Y, 2000; Krämer, 2008). When the agent starts losing the context of a conversation, fails in understanding the customer intention, or keeps repeating the same responses, the illusion of conversing with a human is lost and the conversation may become extremely annoying. With this in mind, we now describe the analysis of the agent’s responses and associated features (summarized in the top part of Table 1)." 21 | }, { 22 | "heading" : "3.1.1 Repeating Response Analysis", 23 | "text" : "As typically implemented, the virtual agent’s task is to reliably detect the intent of each customer’s utterance and respond meaningfully. Accurate intent detection is thus a fundamental characteristic of well-trained virtual agents, and incorrect intent analysis is reported as the leading cause of user dissatisfaction (Sarikaya, 2017). Moreover, since a classifier (e.g., SVM, neural network, etc.) is often used to detect intents, its probabilistic behavior can cause the agent to repeat the same (or semantically similar) response over and over again, despite the user’s attempt to rephrase the same intent.\nSuch agent repetitions lead to an unnatural interaction (Klüwer, 2011). To identify the agent’s repeating responses, we measured similarity between agent’s subsequent (not necessarily sequential) turns. We represented each sentence by averaging the pre-trained embeddings5 of each word in the sentence, calculating the cosine similarity between the representations. Turns with a high similarity value6 are considered as repeating responses.\n5https://code.google.com/archive/p/word2vec 6Empirically, similarity values ≥ 0.8" 24 | }, { 25 | "heading" : "3.1.2 Unsupported Intent Analysis", 26 | "text" : "Given that the knowledge of a virtual agent is necessarily limited, we can expect that training would not cover all customer intents. If the classifier technology provides an estimate of classification confidence, the agent can respond with some variant of “I’m not trained on that” when confidence is low. In some cases, customers will accept that not all requests are supported. In other cases, unsupported intents can lead to customer dissatisfaction (Sarikaya, 2017), and cascade to an egregious conversation (as discussed below in Section 3.3). We extracted the possible variants of the unsupported intent messages directly from the system, and later matched them with the agent responses from the logs." 27 | }, { 28 | "heading" : "3.2 Customer Inputs Features", 29 | "text" : "From the customer’s point of view, an ineffective interaction with a virtual agent is clearly undesirable. An ineffective interaction requires the expenditure of relatively large effort from the customer with little return on the investment (Zeithaml et al., 1990; Mimoun et al., 2012). These efforts can appear as behavioral cues in the customer’s inputs, and include emotions, repetitions, and more. We used the following customer analysis in our model. Customer features are summarized in the middle part of Table 1." 30 | }, { 31 | "heading" : "3.2.1 Rephrasing Analysis", 32 | "text" : "When a customer repeats or rephrases an utterance, it usually indicates a problem with the agent’s understanding of the customer’s intent. This can be caused by different reasons as described in (Sano et al., 2017). To measure the similarity between subsequent customer turns to detect repetition or rephrasing, we used the same approach as described in Section 3.1.1. Turns with a high similarity value6 are considered as rephrases." 33 | }, { 34 | "heading" : "3.2.2 Emotional Analysis", 35 | "text" : "The customer’s emotional state during the conversation is known to correlate with the conversation’s quality (Oliver, 2014). In order to analyze the emotions that customers exhibit in each turn, we utilized the IBM Tone Analyzer service, available publicly online7.\n7https://ibm.co/2hnYkCv\nThis service was trained using customer care interactions, and infers emotions such as frustration, sadness, happiness. We focused on negative emotions (denoted as NEG EMO) to identify turns with a negative emotional peak (i.e., single utterances that carried high negative emotional state), as well as to estimate the aggregated negative emotion throughout the conversation (i.e., the averaged negative emotion intensity). In order to get a more robust representation of the customer’s negative emotional state, we summed the score of the negative emotions (such as frustration, sadness, anger, etc.) into a single negative sentiment score (denoted as NEG SENT). Note that we used the positive emotions as a filter for other customer features, such as the rephrasing analysis. Usually, high positive emotions capture different styles of “thanking the agent”, or indicate that the customer is somewhat satisfied (Rychalski and Hudson, 2017), thus, the conversation is less likely to become egregious." 36 | }, { 37 | "heading" : "3.2.3 Asking for a Human Agent", 38 | "text" : "In examining the conversation logs, we noticed that it is not unusual to find a customer asking to be transferred to a human agent. Such a request might indicate that the virtual agent is not providing a satisfactory service. Moreover, even if there are human agents, they might not be available at all times, and thus, a rejection of such a request is sometimes reasonable, but might still lead to customer frustration (Amsel, 1992)." 39 | }, { 40 | "heading" : "3.2.4 Unigram Input", 41 | "text" : "In addition to the above analyses, we also detected customer turns that contain exactly one word. The assumption is that single word (unigram) sentences are probably short customer responses (e.g., no, yes, thanks, okay), which in most cases do not contribute to the egregiousness of the conversation. Hence, calculating the percentage of those turns out of the whole conversation gives us another measurable feature." 42 | }, { 43 | "heading" : "3.3 Customer-Agent Interaction Features", 44 | "text" : "We also looked at features across conversation utterance-response pairs in order to capture a more complete picture of the interac-\ntion between the customer and the virtual agent. Here, we considered a pair to be customer utterance followed by an agent response. For example, a pair may contain a turn in which the customer expressed negative emotions and received a response of “not trained” by the agent. In this case, we would leverage the two analyses: emotional and unsupported intent. Figure 1 gives an example of this in the customer’s penultimate turn. Such interactions may divert the conversation towards becoming egregious. These features are summarized in the last part of Table 1." 45 | }, { 46 | "heading" : "3.3.1 Similarity Analysis", 47 | "text" : "We also calculated the similarity between the customer’s turn and the virtual agent’s response in cases of customer rephrasing. This analysis aims to capture the reason for the customer rephrasing. When a similarity score between the customer’s turn and the agent’s response is low, this may indicate a misclassified intent, as the agent’s responses are likely to share some textual similarity to the customer’s utterance. Thus, a low score may indicate a poor interaction, which might lead the conversation to become egregious. Another similarity feature is between two customer’s subsequent turns when the agent’s response was “not trained”." 48 | }, { 49 | "heading" : "3.4 Conversation Egregiousness Prediction Classifier", 50 | "text" : "We trained a binary SVM classifier with a linear kernel. A feature vector for a sample in the training data is generated using the scores calculated for the described features, where each feature value is a number between [0,1]. After the model was trained, test conversations are classified by the model, after being transformed to a feature vector in the same way a training sample is transformed. The SVM classification model (denoted EGR) outputs a label “egregious” or “non-egregious” as a prediction for the conversation." 51 | }, { 52 | "heading" : "4 Experiments", 53 | "text" : "" 54 | }, { 55 | "heading" : "4.1 Dataset", 56 | "text" : "We extracted data from two commercial systems that provide customer support via conversational bots (hereafter denoted as company A and company B). Both agents are using similar underlying conversation engines, each embedded in a larger system with its own unique business logic. Company A’s system deals with sales support during an online purchase, while company B’s system deals with technical support for purchased software products. Each system logs conversations, and each conversation is a sequence of tuples, where each tuple consists of {conversation id, turn id, customer input, agent response}. From each system, we randomly extracted 10000 conversations. We further removed conversations that contained fewer than 2 turns, as these are too short to be meaningful since the customer never replied or provided more details about the issue at hand. Figure 2 depicts the frequencies of conversation lengths which follow a power-law relationship. The conversations from company A’s system tend to be longer, with an average of 8.4 turns vs. an average of 4.4 turns for company B." 57 | }, { 58 | "heading" : "4.2 Experimental Setup", 59 | "text" : "The first step in building a classification model is to obtain ground truth data. For this purpose, we randomly sampled conversations from our datasets. This sample included 1100 and 200 conversations for company A and company B respectively. The\nsampled conversations were tagged using an in-house tagging system designed to increase the consistency of human judgements. Each conversation was tagged by four different expert judges8. Given the full conversation, each judge tagged whether the conversation was egregious or not following this guideline: “Conversations which are extraordinarily bad in some way, those conversations where you’d like to see a human jump in and save the conversation”.\nWe generated true binary labels by considering a conversation to be egregious if at least three of the four judges agreed. The interrater reliability between all judges, measured by Cohen’s Kappa, was 0.72 which indicates high level agreement. This process generated the egregious class sizes of 95 (8.6%) and 16 (8%) for company A and company B, respectively. This verifies the unbalanced data expectation as previously discussed.\nWe also implemented two baseline models, rule-based and text-based, as follows:\nRule-based. In this approach, we look for cases in which the virtual agent responded with a “not trained” reply, or occurrences of the customer requesting to talk to a human agent. As discussed earlier, these may be indicative of the customer’s dissatisfaction with the nature of the virtual agent’s responses.\nText-based. A model that was trained to predict egregiousness given the conversation’s text (all customer and agent’s text dur-\n8judges that are HCI experts and have experience in designing conversational agents systems.\ning the conversation). This model was implemented using state-of-the-art textual features as in (Herzig et al., 2017). In (Herzig et al., 2017) emotions are detected from text, which can be thought of as similar to our task of predicting egregious conversations.\nWe evaluated these baseline methods against our classifier using 10-fold crossvalidation over company A’s dataset (we did not use company B’s data for training due to the low number of tagged conversations). Since class distribution is unbalanced, we evaluated classification performance by using precision (P), recall (R) and F1-score (F) for each class. The EGR classifier was implemented using an SVM with a linear kernel9." 60 | }, { 61 | "heading" : "4.3 Classification Results", 62 | "text" : "Table 2 depicts the classification results for both classes and the three models we explored. The EGR model significantly outperformed both baselines10. Specifically, for the egregious class, the precision obtained by the text-based and EGR models were similar. This indicates that the text analyzed by both models encodes some information about egregiousness. On the other hand, for the recall and hence the F1-score, the EGR model relatively improved the text-based model by 41% and 18%, respectively. We will further analyze the models below." 63 | }, { 64 | "heading" : "4.4 Feature Set Contribution Analysis", 65 | "text" : "To better understand the contributions of different sets of features to our EGR model, we examined various features in an incremental fashion. Based on the groups of feature sets that we defined in Section 3, we tested the performance of different group combinations, added in the following order: agent, customer and customer-agent interactions.\n9http://scikit-learn.org/stable/modules/svm.html 10EGR with p < 0.001, using McNemar’s test.\nFigure 3 depicts the results for the classification task. The x-axis represents specific combinations of groups, and the y-axis represents the performance obtained. Figure 3 shows that adding each group improved performance, which indicates the informative value of each group. The figure also suggests that the most informative group in terms of prediction ability is the customer group." 66 | }, { 67 | "heading" : "4.5 Cross-Domain Analysis", 68 | "text" : "We also studied how robust our features were: If our features generalize well, performance should not drop much when testing company B with the classifier trained exclusively on the data from company A. Although company A and company B share similar conversation engine platforms, they are completely different in terms of objectives, domain, terminology, etc. For this task, we utilized the 200 annotated conversations of company B as test data, and experimented with the different models, trained on company A’s data. The rule-based baseline does not require training, of course, and could be applied directly.\nTable 3 summarizes the results showing that the performance of the EGR model is relatively stable (w.r.t the model’s performance when it was trained and tested on the same domain), with a degradation of only 9% in F1-score11. In addition, the results also show that the text-based model performs poorly when applied to a different domain (F1-score of 0.11). This may occur since textual features are closely tied to the training domain.\n11EGR model results are statistically significant compared to the baselines models with p < 0.001, using McNemar’s test." 69 | }, { 70 | "heading" : "4.6 Models Analysis", 71 | "text" : "" 72 | }, { 73 | "heading" : "4.6.1 Customer Rephrasing Analysis", 74 | "text" : "Inspired by (Sarikaya, 2017; Sano et al., 2017) we analyzed the customer rephrasing motivations for both the egregious and the non-egregious classes. First, we detected customer rephrasing as described in Section 3.2.1, and then assigned to each its motivation. Specifically, in our setting, the relevant motivations are12: (1) Natural language understanding (NLU) error - the agent’s intent detection is wrong, and thus the agent’s response is semantically far from the customer’s turn; (2) Language generation (LG) limitation - the intent is detected correctly, but the customer is not satisfied by the response (for example, the response was too generic); (3) Unsupported intent error - the customer’s intent is not supported by the agent.\nIn order to detect NLU errors, we measured the similarity between the first customer turn (before the rephrasing) and the agent response. We followed the methodology presented in (Jovita et al., 2015) claiming that the best answer given by the system has the highest similarity value between the customer turn and the agent answer. Thus, if the similarity was < 0.8 we considered this as an erroneous detection. If the similarity was ≥ 0.8 we considered the detection as correct, and thus the rephrasing occurred due to LG limitation. To detect unsupported intent error we used the approach described in Section 3.1.2. As reported in table 4, rephrasing due to an unsupported intent is more common in egregious conversations (18% vs. 14%), whereas, rephrasing due to generation limitations (LG limitation) is more common in\n12We did not consider other motivations like automatic speech recognition (ASR) errors, fallback to search, and backend failure as they are not relevant to our setting.\nnon-egregious conversations (37% vs. 33%). This indicates that customers are more tolerant of cases where the system understood their intent, but the response is not exactly what they expected, rather than cases where the system’s response was “not trained”. Finally, the percentage of rephrasing due to wrong intent detection (NLU errors) is similar for both classes, which is somewhat expected as similar underlying systems provided NLU support." 75 | }, { 76 | "heading" : "4.6.2 Recall Analysis", 77 | "text" : "We further investigated why the EGR model was better at identifying egregious conversations (i.e., its recall was higher compared to the baseline models). We manually examined 26 egregious conversations that were identified justly so by the EGR model, but misclassified by the other models. Those conversations were particularly prevalent with the agent’s difficulty to identify correctly the user’s intent due to NLU errors or LG limitation. We did not encounter any unsupported intent errors leading to customer rephrasing, which affected the ability of the rule-based model to classify those conversations as egregious. In addition, the customer intents that appeared in those conversations were very diverse. While customer rephrasing was captured by the EGR model, for the text-based model some of the intents were new (did not appear in the training data) and thus were difficult for the model to capture." 78 | }, { 79 | "heading" : "5 Conclusions and Future Work", 80 | "text" : "In this paper, we have shown how it is possible to detect egregious conversations using a combination of customer utterances, agent responses, and customer-agent interactional features. As explained, the goal of this work is to give developers of automated agents tools to detect and then solve problems cre-\nated by exceptionally bad conversations. In this context, future work includes collecting more data and using neural approaches (e.g., RNN, CNN) for analysis, validating our models on a range of domains beyond the two explored here. We also plan to extend the work to detect egregious conversations in real time (e.g., for escalating to a human operators), and create log analysis tools to analyze the root causes of egregious conversations and suggest possible remedies." 81 | } ], 82 | "references" : [ { 83 | "title" : "Frustration Theory: An Analysis of Dispositional Learning and Memory", 84 | "author" : [ "Abram Amsel." ], 85 | "venue" : "Problems in the Behavioural Sciences. Cambridge University Press. https://doi.org/10.1017/CBO9780511665561.", 86 | "citeRegEx" : "Amsel.,? 1992", 87 | "shortCiteRegEx" : "Amsel.", 88 | "year" : 1992 89 | }, { 90 | "title" : "A framework for simulating and evaluating artificial chatter bot conversations", 91 | "author" : [ "Chayan Chakrabarti", "George F. Luger." ], 92 | "venue" : "FLAIRS Conference.", 93 | "citeRegEx" : "Chakrabarti and Luger.,? 2013", 94 | "shortCiteRegEx" : "Chakrabarti and Luger.", 95 | "year" : 2013 96 | }, { 97 | "title" : "Towards designing cooperative and social conversational agents for customer service", 98 | "author" : [ "Ulrich Gnewuch", "Stefan Morana", "Alexander Maedche." ], 99 | "venue" : "Proceedings of the International Conference on Information Systems (ICIS).", 100 | "citeRegEx" : "Gnewuch et al\\.,? 2017", 101 | "shortCiteRegEx" : "Gnewuch et al\\.", 102 | "year" : 2017 103 | }, { 104 | "title" : "The paradise evaluation framework: Issues and findings", 105 | "author" : [ "Melita Hajdinjak", "France Mihelic." ], 106 | "venue" : "Comput. Linguist. 32(2).", 107 | "citeRegEx" : "Hajdinjak and Mihelic.,? 2006", 108 | "shortCiteRegEx" : "Hajdinjak and Mihelic.", 109 | "year" : 2006 110 | }, { 111 | "title" : "What’s the problem: Automatically identifying problematic dialogues in DARPA communicator dialogue systems", 112 | "author" : [ "Helen Wright Hastie", "Rashmi Prasad", "Marilyn A. Walker." ], 113 | "venue" : "Proceedings of the 40th Annual Meeting of the As-", 114 | "citeRegEx" : "Hastie et al\\.,? 2002", 115 | "shortCiteRegEx" : "Hastie et al\\.", 116 | "year" : 2002 117 | }, { 118 | "title" : "Emotion detection from text via ensemble classification using word embeddings", 119 | "author" : [ "Jonathan Herzig", "Michal Shmueli-Scheuer", "David Konopnicki." ], 120 | "venue" : "Proceedings of the ACM SIGIR International Conference on", 121 | "citeRegEx" : "Herzig et al\\.,? 2017", 122 | "shortCiteRegEx" : "Herzig et al\\.", 123 | "year" : 2017 124 | }, { 125 | "title" : "Advances in natural language processing", 126 | "author" : [ "Julia Hirschberg", "Christopher D. Manning." ], 127 | "venue" : "Science 349(6245):261–266.", 128 | "citeRegEx" : "Hirschberg and Manning.,? 2015", 129 | "shortCiteRegEx" : "Hirschberg and Manning.", 130 | "year" : 2015 131 | }, { 132 | "title" : "Automatic online evaluation of intelligent assistants", 133 | "author" : [ "Jiepu Jiang", "Ahmed Hassan Awadallah", "Rosie Jones", "Umut Ozertem", "Imed Zitouni", "Ranjitha Gurunath Kulkarni", "Omar Zia Khan." ], 134 | "venue" : "Proceedings of the 24th Inter-", 135 | "citeRegEx" : "Jiang et al\\.,? 2015", 136 | "shortCiteRegEx" : "Jiang et al\\.", 137 | "year" : 2015 138 | }, { 139 | "title" : "Using vector space model in question answering system", 140 | "author" : [ "Jovita", "Linda", "Andrei Hartawan", "Derwin Suhartono." ], 141 | "venue" : "Procedia Computer Science 59:305 – 311. International Conference on Computer Science and Computational Intel-", 142 | "citeRegEx" : "Jovita et al\\.,? 2015", 143 | "shortCiteRegEx" : "Jovita et al\\.", 144 | "year" : 2015 145 | }, { 146 | "title" : "Predicting user satisfaction with intelligent assistants", 147 | "author" : [ "Julia Kiseleva", "Kyle Williams", "Ahmed Hassan Awadallah", "Aidan C. Crook", "Imed Zitouni", "Tasos Anastasakos." ], 148 | "venue" : "Proceedings of the 39th International ACM SIGIR Con-", 149 | "citeRegEx" : "Kiseleva et al\\.,? 2016a", 150 | "shortCiteRegEx" : "Kiseleva et al\\.", 151 | "year" : 2016 152 | }, { 153 | "title" : "Understanding user satisfaction with intelligent assistants", 154 | "author" : [ "Julia Kiseleva", "Kyle Williams", "Jiepu Jiang", "Ahmed Hassan Awadallah", "Aidan C. Crook", "Imed Zitouni", "Tasos Anastasakos." ], 155 | "venue" : "Proceedings of the 2016 ACM on Con-", 156 | "citeRegEx" : "Kiseleva et al\\.,? 2016b", 157 | "shortCiteRegEx" : "Kiseleva et al\\.", 158 | "year" : 2016 159 | }, { 160 | "title" : "I Like Your Shirt” - Dialogue Acts for Enabling Social Talk in Conversational Agents, pages 14–27", 161 | "author" : [ "Tina Klüwer" ], 162 | "venue" : null, 163 | "citeRegEx" : "Klüwer.,? \\Q2011\\E", 164 | "shortCiteRegEx" : "Klüwer.", 165 | "year" : 2011 166 | }, { 167 | "title" : "Social effects of virtual assistants", 168 | "author" : [ "Nicole C. Krämer." ], 169 | "venue" : "a review of empirical results with regard to communication. In Proceedings of the 8th International Conference on Intelligent Virtual Agents. IVA ’08.", 170 | "citeRegEx" : "Krämer.,? 2008", 171 | "shortCiteRegEx" : "Krämer.", 172 | "year" : 2008 173 | }, { 174 | "title" : "How NOT to evaluate your dialogue system: An empirical study of unsupervised evaluation metrics for dialogue", 175 | "author" : [ "Chia-Wei Liu", "Ryan Lowe", "Iulian Serban", "Michael Noseworthy", "Laurent Charlin", "Joelle Pineau" ], 176 | "venue" : null, 177 | "citeRegEx" : "Liu et al\\.,? \\Q2016\\E", 178 | "shortCiteRegEx" : "Liu et al\\.", 179 | "year" : 2016 180 | }, { 181 | "title" : "Case study embodied virtual agents: An analysis on reasons for failure", 182 | "author" : [ "Mohammed Slim Ben Mimoun", "Ingrid Poncin", "Marion Garnier." ], 183 | "venue" : "Journal of Retailing and Consumer Services 19(6):605 – 612.", 184 | "citeRegEx" : "Mimoun et al\\.,? 2012", 185 | "shortCiteRegEx" : "Mimoun et al\\.", 186 | "year" : 2012 187 | }, { 188 | "title" : "Machines and mindlessness: Social responses to computers", 189 | "author" : [ "C. Nass", "Y. Moon" ], 190 | "venue" : "Journal of Social Issues", 191 | "citeRegEx" : "Nass and Moon,? \\Q2000\\E", 192 | "shortCiteRegEx" : "Nass and Moon", 193 | "year" : 2000 194 | }, { 195 | "title" : "Satisfaction: A behavioral perspective on the consumer", 196 | "author" : [ "Richard L Oliver." ], 197 | "venue" : "Routledge.", 198 | "citeRegEx" : "Oliver.,? 2014", 199 | "shortCiteRegEx" : "Oliver.", 200 | "year" : 2014 201 | }, { 202 | "title" : "The Media Equation: How People Treat Computers, Television, and New Media Like Real People and Places", 203 | "author" : [ "Byron Reeves", "Clifford Nass." ], 204 | "venue" : "Cambridge University Press, New York, NY, USA.", 205 | "citeRegEx" : "Reeves and Nass.,? 1996", 206 | "shortCiteRegEx" : "Reeves and Nass.", 207 | "year" : 1996 208 | }, { 209 | "title" : "Asymmetric effects of customer emotions on satisfaction and loyalty in a utilitarian service context", 210 | "author" : [ "Aude Rychalski", "Sarah Hudson." ], 211 | "venue" : "Journal of Business Research 71:84 – 91.", 212 | "citeRegEx" : "Rychalski and Hudson.,? 2017", 213 | "shortCiteRegEx" : "Rychalski and Hudson.", 214 | "year" : 2017 215 | }, { 216 | "title" : "Ehctool: Managing emotional hotspots for conversational agents", 217 | "author" : [ "Tommy Sandbank", "Michal Shmueli-Scheuer", "Jonathan Herzig", "David Konopnicki", "Rottem Shaul." ], 218 | "venue" : "Proceedings of the 22nd Inter-", 219 | "citeRegEx" : "Sandbank et al\\.,? 2017", 220 | "shortCiteRegEx" : "Sandbank et al\\.", 221 | "year" : 2017 222 | }, { 223 | "title" : "Predicting causes of reformulation in intelligent assistants", 224 | "author" : [ "Shumpei Sano", "Nobuhiro Kaji", "Manabu Sassano." ], 225 | "venue" : "Proceedings of the 18th Annual SIGdial Meeting on Discourse and Dialogue. pages 299–309.", 226 | "citeRegEx" : "Sano et al\\.,? 2017", 227 | "shortCiteRegEx" : "Sano et al\\.", 228 | "year" : 2017 229 | }, { 230 | "title" : "The technology behind personal digital assistants: An overview of the system architecture and key components", 231 | "author" : [ "R. Sarikaya." ], 232 | "venue" : "IEEE Signal Processing Magazine 34(1):67–81.", 233 | "citeRegEx" : "Sarikaya.,? 2017", 234 | "shortCiteRegEx" : "Sarikaya.", 235 | "year" : 2017 236 | }, { 237 | "title" : "Pragmatics in humancomputer conversations", 238 | "author" : [ "P. Cicekli I. Saygin A." ], 239 | "venue" : "Journal of Pragmatics 34(3).", 240 | "citeRegEx" : "A.,? 2002", 241 | "shortCiteRegEx" : "A.", 242 | "year" : 2002 243 | }, { 244 | "title" : "A taxonomy of quality of service and quality of experience of multimodal human-machine interaction", 245 | "author" : [ "Moller Sebastian", "Klaus-Peter Engelbrecht", "Christine Kuhnel", "Ina Wechsung", "Benjamin Weiss." ], 246 | "venue" : "Quality of Mul-", 247 | "citeRegEx" : "Sebastian et al\\.,? 2009", 248 | "shortCiteRegEx" : "Sebastian et al\\.", 249 | "year" : 2009 250 | }, { 251 | "title" : "Looking at the Last Two Turns, I’d Say", 252 | "author" : [ "Stefan Steidl", "Christian Hacker", "Christine Ruff", "Anton Batliner", "Elmar Nöth", "Jürgen Haas" ], 253 | "venue" : null, 254 | "citeRegEx" : "Steidl et al\\.,? \\Q2004\\E", 255 | "shortCiteRegEx" : "Steidl et al\\.", 256 | "year" : 2004 257 | }, { 258 | "title" : "Paradise: A framework for evaluating spoken dialogue agents", 259 | "author" : [ "Marilyn A. Walker", "Diane J. Litman", "Candace A. Kamm", "Alicia Abella." ], 260 | "venue" : "Proceedings of the 35th Annual Meeting of the Association for Computational Linguis-", 261 | "citeRegEx" : "Walker et al\\.,? 1997", 262 | "shortCiteRegEx" : "Walker et al\\.", 263 | "year" : 1997 264 | }, { 265 | "title" : "Quantitative and qualitative evaluation of darpa communicator spoken dialogue systems", 266 | "author" : [ "Marilyn A. Walker", "Rebecca Passonneau", "Julie E. Boland." ], 267 | "venue" : "Proceedings of the 39th Annual Meeting on Association for Computational", 268 | "citeRegEx" : "Walker et al\\.,? 2001", 269 | "shortCiteRegEx" : "Walker et al\\.", 270 | "year" : 2001 271 | }, { 272 | "title" : "Using natural language processing and discourse features to identify understanding errors", 273 | "author" : [ "Marilyn A. Walker", "Jeremy H. Wright", "Irene Langkilde." ], 274 | "venue" : "Proceedings of the Seventeenth International Conference on Machine Learn-", 275 | "citeRegEx" : "Walker et al\\.,? 2000", 276 | "shortCiteRegEx" : "Walker et al\\.", 277 | "year" : 2000 278 | }, { 279 | "title" : "Evaluating human-machine conversation for appropriateness", 280 | "author" : [ "Nick Webb", "David Benyon", "Preben Hansen", "Oil Mival." ], 281 | "venue" : "Proceedings of the Seventh International Conference on Language Resources and Evaluation (LREC’10).", 282 | "citeRegEx" : "Webb et al\\.,? 2010", 283 | "shortCiteRegEx" : "Webb et al\\.", 284 | "year" : 2010 285 | }, { 286 | "title" : "Delivering quality service: Balancing customer perceptions and expectations", 287 | "author" : [ "Valarie Zeithaml", "A Parsu Parasuraman", "Leonard Berry." ], 288 | "venue" : "1811", 289 | "citeRegEx" : "Zeithaml et al\\.,? 1990", 290 | "shortCiteRegEx" : "Zeithaml et al\\.", 291 | "year" : 1990 292 | } ], 293 | "referenceMentions" : [ { 294 | "referenceID" : 6, 295 | "context" : "in artificial intelligence and natural language processing (Hirschberg and Manning, 2015)", 296 | "startOffset" : 59, 297 | "endOffset" : 89 298 | }, { 299 | "referenceID" : 0, 300 | "context" : "Such rejection along with the previous responses could lead to customer frustration (Amsel, 1992).", 301 | "startOffset" : 84, 302 | "endOffset" : 97 303 | }, { 304 | "referenceID" : 3, 305 | "context" : "Shortcomings of this approach are discussed by (Hajdinjak and Mihelic, 2006).", 306 | "startOffset" : 47, 307 | "endOffset" : 76 308 | }, { 309 | "referenceID" : 19, 310 | "context" : "In (Sandbank et al., 2017) the authors presented a conversa-", 311 | "startOffset" : 3, 312 | "endOffset" : 26 313 | }, { 314 | "referenceID" : 21, 315 | "context" : "of (Sarikaya, 2017; Sano et al., 2017) studied reasons why users reformulated utterances in such systems.", 316 | "startOffset" : 3, 317 | "endOffset" : 38 318 | }, { 319 | "referenceID" : 20, 320 | "context" : "of (Sarikaya, 2017; Sano et al., 2017) studied reasons why users reformulated utterances in such systems.", 321 | "startOffset" : 3, 322 | "endOffset" : 38 323 | }, { 324 | "referenceID" : 21, 325 | "context" : "Specifically, in (Sarikaya, 2017) they reported on how the different reasons affect the users’ satisfaction.", 326 | "startOffset" : 17, 327 | "endOffset" : 33 328 | }, { 329 | "referenceID" : 20, 330 | "context" : "In (Sano et al., 2017) they focused on how to automatically predict the reason for user’s dissatisfaction using different features.", 331 | "startOffset" : 3, 332 | "endOffset" : 22 333 | }, { 334 | "referenceID" : 27, 335 | "context" : "In (Walker et al., 2000; Hastie et al., 2002) the authors also looked for problems in a specific setting of spoken conversations.", 336 | "startOffset" : 3, 337 | "endOffset" : 45 338 | }, { 339 | "referenceID" : 4, 340 | "context" : "In (Walker et al., 2000; Hastie et al., 2002) the authors also looked for problems in a specific setting of spoken conversations.", 341 | "startOffset" : 3, 342 | "endOffset" : 45 343 | }, { 344 | "referenceID" : 1, 345 | "context" : "the field of pragmatics in various metrics around the principles of cooperative conversation (Chakrabarti and Luger, 2013; Saygin A. P., 2002).", 346 | "startOffset" : 93, 347 | "endOffset" : 142 348 | }, { 349 | "referenceID" : 24, 350 | "context" : "In (Steidl et al., 2004) they measured dialogue success at the turn level as", 351 | "startOffset" : 3, 352 | "endOffset" : 24 353 | }, { 354 | "referenceID" : 28, 355 | "context" : "(Webb et al., 2010) created a measure of dialogue appropriateness to determine its role in maintaining a conversation.", 356 | "startOffset" : 0, 357 | "endOffset" : 19 358 | }, { 359 | "referenceID" : 23, 360 | "context" : "Similarly, in (Sebastian et al., 2009) they developed a taxonomy of available measures for an enduser’s quality of experience for multimodel dialogue systems, some of which touch on conversational quality.", 361 | "startOffset" : 14, 362 | "endOffset" : 38 363 | }, { 364 | "referenceID" : 14, 365 | "context" : "In (Mimoun et al., 2012) the authors analyzed reasons sales chatbots fail by interviewing chatbots experts.", 366 | "startOffset" : 3, 367 | "endOffset" : 24 368 | }, { 369 | "referenceID" : 2, 370 | "context" : "In (Gnewuch et al., 2017) they studied service quality dimensions (i.", 371 | "startOffset" : 3, 372 | "endOffset" : 25 373 | }, { 374 | "referenceID" : 21, 375 | "context" : "Accurate intent detection is thus a fundamental characteristic of well-trained virtual agents, and incorrect intent analysis is reported as the leading cause of user dissatisfaction (Sarikaya, 2017).", 376 | "startOffset" : 182, 377 | "endOffset" : 198 378 | }, { 379 | "referenceID" : 11, 380 | "context" : "Such agent repetitions lead to an unnatural interaction (Klüwer, 2011).", 381 | "startOffset" : 56, 382 | "endOffset" : 70 383 | }, { 384 | "referenceID" : 21, 385 | "context" : "satisfaction (Sarikaya, 2017), and cascade to an egregious conversation (as discussed below in Section 3.", 386 | "startOffset" : 13, 387 | "endOffset" : 29 388 | }, { 389 | "referenceID" : 29, 390 | "context" : "An ineffective interaction requires the expenditure of relatively large effort from the customer with little return on the investment (Zeithaml et al., 1990; Mimoun et al., 2012).", 391 | "startOffset" : 134, 392 | "endOffset" : 178 393 | }, { 394 | "referenceID" : 14, 395 | "context" : "An ineffective interaction requires the expenditure of relatively large effort from the customer with little return on the investment (Zeithaml et al., 1990; Mimoun et al., 2012).", 396 | "startOffset" : 134, 397 | "endOffset" : 178 398 | }, { 399 | "referenceID" : 20, 400 | "context" : "This can be caused by different reasons as described in (Sano et al., 2017).", 401 | "startOffset" : 56, 402 | "endOffset" : 75 403 | }, { 404 | "referenceID" : 16, 405 | "context" : "The customer’s emotional state during the conversation is known to correlate with the conversation’s quality (Oliver, 2014).", 406 | "startOffset" : 109, 407 | "endOffset" : 123 408 | }, { 409 | "referenceID" : 18, 410 | "context" : "Usually, high positive emotions capture different styles of “thanking the agent”, or indicate that the customer is somewhat satisfied (Rychalski and Hudson, 2017), thus, the conversation is less likely to become egregious.", 411 | "startOffset" : 134, 412 | "endOffset" : 162 413 | }, { 414 | "referenceID" : 0, 415 | "context" : "Moreover, even if there are human agents, they might not be available at all times, and thus, a rejection of such a request is sometimes reasonable, but might still lead to customer frustration (Amsel, 1992).", 416 | "startOffset" : 194, 417 | "endOffset" : 207 418 | }, { 419 | "referenceID" : 5, 420 | "context" : "This model was implemented using state-of-the-art textual features as in (Herzig et al., 2017).", 421 | "startOffset" : 73, 422 | "endOffset" : 94 423 | }, { 424 | "referenceID" : 5, 425 | "context" : "In (Herzig et al., 2017) emotions are detected from text, which", 426 | "startOffset" : 3, 427 | "endOffset" : 24 428 | }, { 429 | "referenceID" : 8, 430 | "context" : "ogy presented in (Jovita et al., 2015) claiming that the best answer given by the system has the highest similarity value between the customer turn and the agent answer.", 431 | "startOffset" : 17, 432 | "endOffset" : 38 433 | } ], 434 | "year" : 2018, 435 | "abstractText" : "Virtual agents are becoming a prominent channel of interaction in customer service. Not all customer interactions are smooth, however, and some can become almost comically bad. In such instances, a human agent might need to step in and salvage the conversation. Detecting bad conversations is important since disappointing customer service may threaten customer loyalty and impact revenue. In this paper, we outline an approach to detecting such egregious conversations, using behavioral cues from the user, patterns in agent responses, and useragent interaction. Using logs of two commercial systems, we show that using these features improves the detection F1-score by around 20% over using textual features alone. In addition, we show that those features are common across two quite different domains and, arguably, universal.", 436 | "creator" : "LaTeX with hyperref package" 437 | } -------------------------------------------------------------------------------- /example/transcript/Detecting Egregious Conversations between Customers and Virtual Agents.txt: -------------------------------------------------------------------------------- 1 | Okay, high I'm. 2 | My name is much more the show that the death present to you on, I will work on detecting we conversations are between customers in the virtual all agents are this is the joint work between between IBM on such loving the high fi in in and yorktown. 3 | Okay, one second. 4 | Okay, so sorry about that. 5 | Okay, so I think ours this stuff with we did the phoenician, if what is egregious so egregious these some by definition, outstandingly bad or shocking. 6 | So as we all heard the known, the interest of using virtual agent is increasing and not specifically there are some prediction of by twenty twenty that eighty percent of the businesses will have virtual agents on. 7 | And even more specifically for look on the customer care. 8 | The main that the eighty five percent of the interactions will be on of provide all powered by virtual agents, which is, of course, good, the on for us. 9 | However, from either I will own experience other, we know that those will to virtual agents might behave really bad, are which can lead to loss in customer loyalty in even in some ways the use. 10 | Okay, sir on this is the real example of conversation with we haven't, we deal with, let me just take you through it. 11 | Okay, so we have the customer on the left and the agents on the right, the customer is asking for quotes to the travel and wants to the to know some details on the agent kind of understand in of telling the too you can consider by too purchased the ticket on the customer is not interested, no, I don't want to buy it. 12 | I of I want to know the the details, the agents actually missing gifts, and I think that the already wants to go to the next online lyor in process and to rent the cop, not the customer asking of you will of your rule person, the just response I did you develop a since assistant of trained to answer questions about drivers. 13 | You can get ask me any question you have the customer is trying to start getting a little bit annoyed by all these ask you who specific question in got some them on and sell, and then the agent, not picking it up at all. 14 | And the answering with I'm not trained on that yet I'm still learning our you may want to forget the phase of question at that time. 15 | The customer is thing frustrated. 16 | The anger. 17 | Well, both of them this the this is pointless can to talk to a real life person. 18 | And then the on both and answer with that, we don't currently have live agents, the checked with online. 19 | Okay, so this is those of what we define as egregious conversations. 20 | Okay, so of course, the bunch to well will the top elected through our book festal works the todd at to looking on the computer complementary problems and try to maximize the is the such data does some previous work on coming from the is out of mine on those systems, the data use dialogue logo in the very specific setting are on the late. 21 | The late, the stood those the two thousand and seventeen does somewhat the thought of trying to study what other reason for customers rephrasing and how the different reasons affect the satisfaction and dissatisfaction then finally, the also will looking on dialog break the break downs, those the walker trying to on all torrents level, try to understand where the each utterance is a break down of the data or not other, they doing the or on japanese, too chat. 22 | Okay, so what we are dealing with so I would like to sit down extracted from too. 23 | We lead a companies are the provide the them as customer support to using virtual agents, the to company the using of similar on the line, and but on those will totally too different domains. 24 | And the each one of the company as its own business logic, of course, arm for each of the system we extract the that thousand conversations are randomly, and you can see here the local distribution of the conversation then, and the never do see that the conversation on not from company a and this much as twice longer, then company be okay, so recall the thought to try on is to detect the those group this conversation. 25 | So we treat these problem as an the binary classification to problem with too target classes, egregious the awning gregis on the input to for the for the classifieds, basically the complete conversation. 26 | So the classification is done only at the end of the conversation with struck dot, three different feature sets for each of conversation feature or coming from the agent response from the customer input in some interaction feature between the customer on the agents. 27 | And we also low each feature to be a context while which basically is whether we count to where its appeal in our conversation or on. 28 | Okay. 29 | So all point few of the features are for the complete if to can just a check of the paper. 30 | So I'll I'll I'll, begin with some agents response. 31 | So on the first to the to week to ease them with, they called repeating the sponson analysis, which is basically aims to find how many time the agents keep from our rephrasing in saying be if they're the same response will similarly spahn's in order to calculate the sauna will on representing each sentence as the of the leveraging of the will and and buildings of of the sentence. 32 | And then we are using cause in in similarity too find similar a similar sentences on the set up feature. 33 | Second feature is what we call unsupported intent analysis, which basically means that on the ball, the they didn't use, not supporting these intent and usually can answer so with something like the like with so I'm, not trained on the or something similar. 34 | Okay, the next those that the to feature the the coming from the customers site in here on point of of done. 35 | So from the customer we may want to could on some behavioral cues on the first family's from the emotion analysis in as you saw in the example earlier in the real example, and as was discussed, and actually in the morning in the all emotion is really important in the seems to be one of the causes that lead to can lead to egregious conversation. 36 | So we were looking of different behavior of the emotion were looking on big online, the emotions, the for example, this is the max negative emotions, but will also looking on variation of too all the conversation, so we will doing some ever doing going on now the emotions what the conversation and compared it to some pick on in specific utterance, are we done our looking on some rephrasing analysis, so this is it equivalent to what they say before. 37 | So here we trying to capture to how many times what of the efforts that the customer is trying to rephrase on again and again, until the agents, well, hopefully understand or not also another feature is the asking for human agents, but here I want to make clear the it's, the perfectly okay to ask for you an agent, however, in some cases, it's not and one of the cases here is for example, when the customizable for human when they did, but we also the is carreon negative emotions, and this should be taken care. 38 | Okay, finally were looking on some interaction between the agent in the customer on the agent. 39 | So the first group of feature, the looking on some behavior or some inputs from the customer that on all of these phones, the something like I'm not trained on that. 40 | And here give you an example, civil the third one, what the call long sentences. 41 | So you can maybe imagine yourself starting to on write down some very long sentence, explaining some question the to have or something that you need enough that use spend quite a lot of time in a fourth in the efforts to press enter and immediately to got I'm, not turn on that this could of closely too high frustration of the customer and later on for an group, just cause the section, our we looking on, also the customer aside on some freezing analysis. 42 | The of for example, getting results of similar results by the agents with which that basically means that the customer is trying to phrase. 43 | The room is the the input, but the agent keep for getting the wrong thing, and keep of with the turn out wrong on are not to what the customer expected to I not the conversation lens is both of the feature. 44 | Okay, so for the setting we sample full eleven hundred conversation for company a in another two hundred from our company be an it's conversation are we give it, we give you too for on HCI experts judge too using this to our guideline conversation with all exploded mute extra ordinarily bad in some ways those conversations where you like to see a human jump in and save the conversation. 45 | Okay, like a superman or something like that are the they delay delay, delay ability between the judges was high around on point of two. 46 | Okay, so for that run through the actually non for both companies around eight percent of the conversation with doug as egregious I'm. 47 | So this is our the the the the the tools, the we looking on. 48 | We also implemented of focus. 49 | Some are based on whether the compel with such due on the first model is a text based model. 50 | So we looked on a unique on by grimes on some feature, an of the lexicon for each of those of coming for those other emotion, emotional features and the other few on the for door based on looking very simple with the on the agent to response with the I'm, not trained on that, or on the customer is asking for you an agent. 51 | Finally, we implemented our class for using the s v s SVM with the the now can okay, so some results some okay, so we we look on on the f one school. 52 | We see the some now the egregious classifiers outperforming, both on bass baseline approach in around forty percent in eighty percent from the text based on. 53 | I think what is also interesting that here was up to the to look on the precision because here it seems that the at least the text based approaches can capture will send some of the egregious this using text while features, which is interesting our we then continue with the feature sets contribution analyses, which is basically heading incremental each time one of the different sets of the feature. 54 | So we starting with the agents features only then we are adding customer on finally also of of to get up, and as you can see on the gray column, adding also of the goodwill of gave us the highest quality. 55 | What is also interesting here is that the same that the features that are coming from the customer along. 56 | They are the more informative with respect to our detecting egregious this. 57 | Okay, our with the look on was the main. 58 | So remember we all we had these company be on conversations. 59 | So what we did we trying the data only using company a on data, and then we didn't do on any of the musician note tuning on for that those not and we destroyed those this very simple on on simply on company be did data. 60 | So if we look not on the of when score also as somehow expected the those was some degradation, the on nine percent, but still seems the from these, it was able to detect I'd, we this on what is also nice to seize the annoyance nice, but if you look now both on the performance of the text based now is much lower in this are used that to the fact that text feature allows the tied to the main the to the were trained on okay. 61 | So finally, we did some mom customer, the phrasing analysis, so inspired by the too works by saikai un, these sunal, we wanted so that idea was to understand the difference reasons of our customer, the phrasing in the the setting the use the heard the all systems, so they have a little bit differently stuff problems, but those problems of related to what was setting, and the idea was to turn and analyze the whole. 62 | The do distribution is different between the egregious just to the on angry just class. 63 | So the first time I always l coming from, which is called in all basically the intent to was not detected correctly. 64 | And this means the the and agents response is very semantically file for what the customer is was expected too. 65 | Then the all on able what is coming from on is limitation from the language generation limitation, it's at him on item is the the intent section was score, but the customer was not satisfied by the. 66 | And so the two got for the agents, an an example for the that could be that the agents are going to speak very specific thing and got us may be a broad on as of that was not use he's on needs in the last elway's they'll come from dunn in support the intent, which is basically I'm, not not trained on data of these. 67 | This is not the supported by the agent. 68 | And now if you look on the distribution between the good just as the not gorgeous so on, we see that dumb much more unsupported intent will in the previous one error, and also of it. 69 | LG reduction of are heiau in the non gregis one, and I think both of them to get the on indicates that customer of tolerance too blow blames when when they get some feedback from the agent, the time he actually understand or the case on the system, some of the things that are they ask, and thus a the and conversation is not getting egregious that fast also if you look on the arm and then you also on the the same percentage in this is, of course, due to the weight, the it's, the same on the line, it's the same at an annual company. 70 | Okay, so to conclude so in this will walker are we show how we can detect egregious conversation using the feature of coming from the agent customer and on the direction on also so that the the feature on whole bust and can will can first the main in for future work consul first, we want to do it in every time. 71 | So we don't want to now we are waiting to the end of the call conversation, and we plan to do it in real time. 72 | We want to collect more data, so we can look on other on you are approaches, and in order to be useful, we also on to I integrates in some real alice's tools and explained the root cause for those egregious just conversations high thanks for the great holiday, and on the on as the foundation of our as the you lose track these so many feature from the dollar contacts. 73 | So I'm wondering which type of feature is most important lie compositional lands or big because in some data tenet is in repeating in repented of that state repeating imports may help to complete the task. 74 | So so I'm wondering which tapley feature is most important, I expected you'd be the last, but a I couldn't hear the last part of can you repeat the last also you recall you used, you have three three feature sets right, are the first ones extracted from the agent in put the segments from the customer in this decline from into action. 75 | So in each features as you have so many features right so I'm, wondering which type of features are the most important one. 76 | Okay, so actually it's not appear in the system, but we did the analysis for to get the featuring both those within the group itself with it's, not part of the work. 77 | So we can discuss the the line, but the they here. 78 | -------------------------------------------------------------------------------- /prepare_data_for_hmm.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import re 4 | from nltk.tokenize import word_tokenize, sent_tokenize 5 | from nltk.corpus import stopwords 6 | import pandas as pd 7 | import argparse 8 | 9 | from util import files_in_dir 10 | from ArticleDataSample import CommonSectionNames 11 | 12 | 13 | class DataCreator: 14 | """ 15 | A class for converting json files of scientific papers into text files containing each sentence in a separate line. 16 | The json files are assumed to be the output of Allen-AI's science-parse which converts pdf into a json file. 17 | During conversion, we try to remove some noise (e.g. footnotes), however not all noise is removed. 18 | For sentence splitting we use NLTK's sent_tokenize which is not perfect as well. 19 | In addition to creating a paper's text file, we also create files with some information about the paper's 20 | sections - it is used by HmmArticle class. 21 | """ 22 | def __init__(self, 23 | json_folder, 24 | out_folder, 25 | glove_fname: str, 26 | vocab_size=100000): 27 | 28 | self.vocab = self.get_vocab(glove_fname, vocab_size) 29 | 30 | # workaround for some strings which are not handled correctly by sent_tokenize 31 | self.substitutions = [ 32 | ("e.g.", ""), 33 | ("i.e.", ""), 34 | ("et al.", "") 35 | ] 36 | 37 | self.forbidden_prefixes = ("proceedings", "copyright 201", "correspondence to", 'c©') 38 | 39 | # the footnote-related regular expressions below were defined assuming the papers were converted from pdf to 40 | # text using science-parse. in this case, there is no space between the footnote number and the following text 41 | 42 | # regular expression for capturing footnote at beginning of sentence: 1 or 2 digits followed by capital letter or "http" 43 | self.regexp_footnote_at_beginning = re.compile('\d{1,2}([A-Z]|http)') 44 | 45 | # the following 2 regular expressions are for capturing footnote in the middle of a sentence (it may happen, 46 | # depending on the sentence-splitter's decision) 47 | 48 | # capture 1 or 2 digits followed by a capital letter. 49 | # the letter should not be K as it is often used after digits to denote Kilo 50 | # \D*: to capture the latest possible digit followed by a letter 51 | # then capture "code" or "available". 52 | # then capture last occurrence of "http" 53 | self.regexp_footnote_url1 = re.compile('\d{1,2}[A-JL-Z]\D*( code | available ).*(http)(?!.*(http))') 54 | 55 | # 1 or 2 digits and "http" right afterwards - a footnote starting with url 56 | self.regexp_footnote_url2 = re.compile('\d{1,2}(http)') 57 | 58 | # characters that appear in equations 59 | self.regexp_equation = re.compile('[\+=≡{}\[\]<>≤≥|∑∇∆‖√∈∠×εσλ∂αβδθµ]') 60 | 61 | # asterisk (or alike), then zero or more characters, then "equal contribution", optional 's' at the end 62 | self.regexp_equal_contribution = re.compile('(\*|∗|⇤|†).*equal contributions?') 63 | 64 | self.regexp_footnote_misc = re.compile('(\*|∗|⇤|†)(this|work|these|corresponding|the|available|code|http|both)') 65 | 66 | # input & output folders 67 | self.json_folder = json_folder 68 | self.out_folder = out_folder 69 | self.out_text_path = os.path.join(self.out_folder, 'text') 70 | os.makedirs(self.out_text_path, mode=0o775, exist_ok=False) 71 | self.out_sections_info_path = os.path.join(self.out_folder, 'sections_info') 72 | os.makedirs(self.out_sections_info_path, mode=0o775, exist_ok=False) 73 | self.out_section_per_sent_path = os.path.join(self.out_folder, 'section_per_sent') 74 | os.makedirs(self.out_section_per_sent_path, mode=0o775, exist_ok=False) 75 | 76 | self.failed_papers = [] 77 | self.footnotes = [] 78 | 79 | # extracts the vocabulary from a GloVe embedding file, and remove stop words 80 | @staticmethod 81 | def get_vocab(glove_fname, vocab_size): 82 | print("reading file: {}".format(glove_fname)) 83 | w2vec = pd.read_csv(glove_fname, header=None, sep=' ', quoting=3, encoding="ISO-8859-1") 84 | print("done") 85 | vocab = w2vec.ix[:, 0].values 86 | vocab = set(vocab[:vocab_size]) 87 | 88 | stop_words = set(stopwords.words("english")) 89 | stop_words.update({'min', 'max', 'argmin', 'agrmax', 's.t.', 'w.r.t.'}) 90 | 91 | for stop_word in stop_words: 92 | if stop_word in vocab: 93 | vocab.remove(stop_word) 94 | 95 | return vocab 96 | 97 | @staticmethod 98 | def isfloat(value): 99 | try: 100 | float(value) 101 | return True 102 | except ValueError: 103 | return False 104 | 105 | def is_footnote_at_beginning(self, sent): 106 | # match() -> find at the beginning 107 | return self.regexp_footnote_at_beginning.match(sent) is not None 108 | 109 | def is_equation(self, sent, num_words_thresh): 110 | 111 | # lower() because we use a lowercase vocab 112 | sent_tokens = word_tokenize(sent.lower()) 113 | 114 | num_words = 0 115 | for token in sent_tokens: 116 | if len(token) > 1 and token in self.vocab: 117 | num_words += 1 118 | if num_words >= num_words_thresh: 119 | # if there are enough words, the sentence will not be omitted 120 | return False 121 | 122 | # we omit the sentence if it has too few words AND it contains at least one "equation character" 123 | return self.regexp_equation.search(sent) is not None 124 | 125 | def remove_footnote_with_url_within_sent(self, sent): 126 | r = self.regexp_footnote_url1.search(sent) 127 | 128 | if r is not None: 129 | start_i, end_i = r.span() 130 | else: 131 | r = self.regexp_footnote_url2.search(sent) 132 | if r is None: 133 | # nothing found - return the sentence as is 134 | return sent 135 | else: 136 | start_i = r.span()[0] 137 | http_str = 'http' 138 | # find last occurrence of "http" (sometimes there are several) 139 | last_http_idx = sent.rfind(http_str) 140 | # last_http_idx is necessarily non-negative as we know 'http' occurs in the string 141 | end_i = last_http_idx + len(http_str) 142 | 143 | # set end_i to be the index of first space after the url, or end-of-string if space is not found 144 | 145 | # find the last slash (as sometimes a space appears within the url) 146 | slash_idx = sent[end_i:].rfind('/') 147 | end_i += slash_idx 148 | 149 | space_idx = sent[end_i:].find(' ') 150 | if space_idx >= 0: 151 | end_i += space_idx 152 | else: 153 | end_i = len(sent) 154 | 155 | # remove the captured footnote 156 | new_sent = sent.replace(sent[start_i:end_i], '') 157 | 158 | self.footnotes.append(f"{sent} !@!@ {new_sent}") 159 | 160 | return new_sent 161 | 162 | def has_forbidden_prefix(self, sent): 163 | return sent.lower().startswith(self.forbidden_prefixes) 164 | 165 | # if regexp matches, the start index is returned. otherwise, -1 is returned 166 | def search_footnotes(self, sent): 167 | if self.is_footnote_at_beginning(sent) or self.has_forbidden_prefix(sent): 168 | return 0 169 | 170 | r = self.regexp_equal_contribution.search(sent.lower()) 171 | if r is not None: 172 | return r.span()[0] 173 | 174 | r = self.regexp_footnote_misc.search(sent.lower()) 175 | if r is not None: 176 | return r.span()[0] 177 | 178 | return -1 179 | 180 | def filter_sentences(self, paper_sents, minimal_num_chars=30): 181 | valid_sents = [] 182 | 183 | for sent in paper_sents: 184 | if self.is_footnote_at_beginning(sent) or self.has_forbidden_prefix(sent): 185 | self.footnotes.append(sent) 186 | continue 187 | 188 | sent = self.remove_footnote_with_url_within_sent(sent) 189 | 190 | if len(sent) < minimal_num_chars: 191 | continue 192 | if self.is_equation(sent, num_words_thresh=3): 193 | continue 194 | 195 | valid_sents.append(sent) 196 | 197 | return valid_sents 198 | 199 | # returns True on success, False otherwise 200 | def prepare_textual_data(self, json_fname): 201 | with open(json_fname, encoding='utf-8') as json_file: 202 | json_data = json.load(json_file) 203 | success = self.process_paper(json_data) 204 | return success 205 | 206 | def process_section_text(self,sec_text): 207 | if len(sec_text) == 0: 208 | return sec_text 209 | 210 | # split by newline as it makes it easier to capture some unwanted text 211 | lines = sec_text.split('\n') 212 | new_lines = [] 213 | footnote_removed = False 214 | 215 | for cur_line in lines: 216 | if len(cur_line) == 0: 217 | continue 218 | 219 | footnote_i = self.search_footnotes(cur_line) 220 | if footnote_i >= 0: 221 | footnote_removed = True 222 | 223 | # check if footnote index is positive, i.e. it was found within the line (rather than at the beginning) 224 | # in this case we don't omit the entire line 225 | if footnote_i > 0: 226 | footnote_text = cur_line[footnote_i:] 227 | cur_line = cur_line[:footnote_i] 228 | new_lines.append(cur_line) 229 | else: 230 | # entire line is omitted 231 | footnote_text = cur_line 232 | 233 | self.footnotes.append(footnote_text) 234 | continue 235 | 236 | if footnote_removed: 237 | footnote_removed = False 238 | 239 | if len(new_lines) > 0: 240 | # handle the case where the last word of the line is split, but ScienceParse didn't recover it due to 241 | # existence of footnotes between the two parts of the split word 242 | last_line = new_lines[-1] 243 | if last_line[-1] == '-': 244 | last_line_tokens = word_tokenize(last_line.lower()) 245 | cur_line_tokens = word_tokenize(cur_line.lower()) 246 | # remove the dash from last token of last line, and try to append it to first token of current line 247 | candidate_word = last_line_tokens[-1][:-1] + cur_line_tokens[0] 248 | if candidate_word in self.vocab: 249 | # update the last line of new_lines 250 | updated_line = last_line[:-1] + cur_line 251 | new_lines[-1] = updated_line 252 | continue 253 | 254 | new_lines.append(cur_line) 255 | 256 | # concatenate all lines - the nltk sentence splitter will split this text into sentences 257 | updated_sec_text = ' '.join(new_lines) 258 | return updated_sec_text 259 | 260 | # returns True on success, False otherwise 261 | def process_paper(self, json_data): 262 | j_title = json_data['title'] 263 | if j_title is None: 264 | return False 265 | 266 | indices_dict = {CommonSectionNames.INTRO.value: [-1, -1], 267 | CommonSectionNames.RELATED.value: [-1, -1], 268 | CommonSectionNames.ACK.value: [-1, -1]} # 'section_name': [sec_start, sec_end] 269 | # print(j_title) 270 | sections = json_data['sections'] 271 | section_per_sent = [] 272 | 273 | paper_content = [] 274 | end_id = '' 275 | flag_end = False 276 | intro_flag_end = False 277 | intro_passed = False # did we pass the introduction section yet 278 | ack_section_found = False 279 | for sec in sections: 280 | if not sec['heading']: 281 | # print('myspecialtoken! ' + sec['text']) 282 | continue 283 | 284 | sec_text = sec['text'] 285 | 286 | # workaround for some strings which are not handled correctly by sent_tokenize 287 | for subs_tuple in self.substitutions: 288 | # replace the problematic tokens with temporary substitution 289 | sec_text = sec_text.replace(subs_tuple[0], subs_tuple[1]) 290 | 291 | sec_text = self.process_section_text(sec_text) 292 | 293 | sents = sent_tokenize(sec_text) 294 | sents = self.filter_sentences(sents) 295 | 296 | section_per_sent += [sec['heading'] + '\n'] * len(sents) 297 | 298 | # parse "related work" section and subsections 299 | if "Related" in sec['heading']: 300 | indices_dict[CommonSectionNames.RELATED.value][0] = len(paper_content) 301 | indices_dict[CommonSectionNames.RELATED.value][1] = len(sents) 302 | if indices_dict[CommonSectionNames.RELATED.value][1] == 0: # means that there are sub sections 303 | flag_end = True 304 | related_sec = sec['heading'].strip().split(" ") 305 | end_id = related_sec[0] 306 | if end_id.endswith("."): 307 | end_id = end_id[:-1] 308 | # print("related sub sections: " + end_id) 309 | if flag_end: 310 | related_sec = sec['heading'].strip().split(" ") 311 | id_related = related_sec[0] 312 | if id_related.endswith("."): 313 | id_related = id_related[:-1] 314 | if self.isfloat(id_related): 315 | if float(id_related) < int(end_id) + 1: 316 | indices_dict[CommonSectionNames.RELATED.value][1] += len(sents) 317 | 318 | # parse "introduction" section and subsections 319 | if not intro_passed and ("Introduction" in sec['heading'] or sec['heading'].startswith(("1. ", "1 "))): 320 | # print(sec['heading']) 321 | indices_dict[CommonSectionNames.INTRO.value][0] = len(paper_content) 322 | indices_dict[CommonSectionNames.INTRO.value][1] = len(sents) 323 | intro_passed = True 324 | if indices_dict[CommonSectionNames.INTRO.value][1] == 0: # means that there are sub sections 325 | intro_flag_end = True 326 | intro_sec = sec['heading'].strip().split(" ") 327 | intro_end_id = intro_sec[0] 328 | if intro_end_id.endswith("."): 329 | intro_end_id = intro_end_id[:-1] 330 | # print("related sub sections: " + intro_end_id) 331 | if intro_flag_end: 332 | intro_sec = sec['heading'].strip().split(" ") 333 | id_intro = intro_sec[0] 334 | if id_intro.endswith("."): 335 | id_intro = id_intro[:-1] 336 | if self.isfloat(id_intro): 337 | if float(id_intro) < int(intro_end_id) + 1: 338 | indices_dict[CommonSectionNames.INTRO.value][1] += len(sents) 339 | 340 | # parse "acknowledgment" section and subsections 341 | if "Acknowledgment" in sec['heading'] or "Acknowledgement" in sec['heading']: 342 | ack_section_found = True 343 | indices_dict[CommonSectionNames.ACK.value][0] = len(paper_content) 344 | indices_dict[CommonSectionNames.ACK.value][1] = len(sents) 345 | 346 | for sent_i, sent in enumerate(sents): 347 | line = sent + '\n' 348 | paper_content.append(line) 349 | 350 | if len(paper_content) == 0: 351 | print("something is wrong with paper: {}".format(j_title)) 352 | return False 353 | 354 | if not ack_section_found: 355 | # in some cases, acknowledgment sentences are not in a dedicated section, but it's easy to capture them 356 | # we go back few sentences and look for a sentence starting with "Acknowledgment" 357 | total_num_sents = len(paper_content) 358 | start_idx = max(total_num_sents - 10, 0) 359 | for sent_i in range(start_idx, total_num_sents): 360 | if paper_content[sent_i].startswith("Acknowledgment") or paper_content[sent_i].startswith( 361 | "Acknowledgement"): 362 | indices_dict[CommonSectionNames.ACK.value][0] = sent_i 363 | indices_dict[CommonSectionNames.ACK.value][1] = total_num_sents - sent_i 364 | break 365 | 366 | # if there is intersection between Introduction and Related Work, we will not use the Related Work indices 367 | # (it happens in rare cases where Related Work is a sub-section of Introduction 368 | intro_last_idx = indices_dict[CommonSectionNames.INTRO.value][0] + indices_dict[CommonSectionNames.INTRO.value][ 369 | 1] - 1 370 | related_start_idx = indices_dict[CommonSectionNames.RELATED.value][0] 371 | if related_start_idx <= intro_last_idx: 372 | indices_dict[CommonSectionNames.RELATED.value] = [-1, -1] 373 | 374 | # update section_per_sent according to indices_dict for better section titles for the common sections 375 | for section_title in indices_dict: 376 | start_idx = indices_dict[section_title][0] 377 | num_sents = indices_dict[section_title][1] 378 | 379 | if start_idx >= 0: 380 | for idx in range(start_idx, start_idx + num_sents): 381 | section_per_sent[idx] = section_title + '\n' 382 | 383 | with open(os.path.join(self.out_sections_info_path, j_title + ".txt"), 384 | "w", encoding='utf-8') as related_out_file: 385 | def write_section(sec_name, sec_start, sec_end): 386 | if sec_start > -1: 387 | related_out_file.write("{0}\t{1}\t{2}\n".format(sec_name, sec_start, sec_end)) 388 | 389 | for section_name in CommonSectionNames: 390 | write_section(section_name.value, indices_dict[section_name.value][0], 391 | indices_dict[section_name.value][1]) 392 | 393 | with open(os.path.join(self.out_text_path, j_title + ".txt"), 394 | "w", encoding='utf-8') as out_file: 395 | out_str = ''.join(paper_content) 396 | 397 | # workaround for some strings which are not handled correctly by sent_tokenize 398 | for subs_tuple in self.substitutions: 399 | # replace the temporary substitutions back to original tokens 400 | out_str = out_str.replace(subs_tuple[1], subs_tuple[0]) 401 | 402 | out_file.write(out_str) 403 | 404 | with open(os.path.join(self.out_section_per_sent_path, j_title + ".txt"), 405 | "w", encoding='utf-8') as out_file: 406 | out_str = ''.join(section_per_sent) 407 | out_file.write(out_str) 408 | 409 | return True 410 | 411 | def save_captured_footnotes(self): 412 | out_fname = os.path.join(self.out_folder, "footnotes_log.txt") 413 | with open(out_fname, 'w') as out_file: 414 | for sent in self.footnotes: 415 | out_file.write(sent + '\n') 416 | 417 | def run(self): 418 | json_filenames = files_in_dir(self.json_folder) 419 | print("number of papers: {}".format(len(json_filenames))) 420 | 421 | for fname_i, fname in enumerate(json_filenames): 422 | print("--- paper {}: {}".format(fname_i, fname)) 423 | fname = os.path.join(self.json_folder, fname) 424 | success = self.prepare_textual_data(fname) 425 | if not success: 426 | print("FAILED: {}".format(fname)) 427 | self.failed_papers.append(fname) 428 | 429 | if len(self.failed_papers) > 0: 430 | print("FAILURE with the following papers:") 431 | for paper_fname in self.failed_papers: 432 | print(paper_fname) 433 | 434 | 435 | def main(args): 436 | data_creator = DataCreator(args.json_folder, args.out_folder, args.glove_path) 437 | data_creator.run() 438 | 439 | 440 | if __name__ == '__main__': 441 | 442 | parser = argparse.ArgumentParser( 443 | description='Given json files of scientific papers, this script creates the data which the HMM model expects' 444 | 'as input' 445 | ) 446 | parser.add_argument('--json_folder', help='folder of the json files of the papers') 447 | parser.add_argument('--out_folder', help='output folder') 448 | parser.add_argument('--glove_path', help='path to GloVe embedding file (GloVe format is assumed') 449 | 450 | args = parser.parse_args() 451 | main(args) 452 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | nltk>=3.4 2 | scipy>=1.2.0 3 | pandas>=0.23.4 4 | gensim>=3.7.0 5 | numpy>=1.16.0 6 | tqdm>=4.38.0 7 | -------------------------------------------------------------------------------- /summarize.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | from multiprocessing import Pool 4 | import math 5 | import argparse 6 | import copy 7 | import itertools 8 | 9 | from Logger import Logger 10 | from util import print_table, files_in_dir 11 | from SummaryCreator import SummaryCreator 12 | from ArticleDataSample import ArticleDataSample 13 | from HmmArticle import HmmArticle, HmmArticleConfig, PredictedSeqInfoKey 14 | 15 | 16 | def summarize(args): 17 | col_order = PredictedSeqInfoKey.get_columns_order() 18 | failed_articles = [] 19 | 20 | articles_folder = os.path.join(args.data_folder, "text") 21 | transcript_folder = os.path.join(args.data_folder, "transcript") 22 | sections_info_folder = os.path.join(args.data_folder, "sections_info") 23 | section_per_sent_folder = os.path.join(args.data_folder, "section_per_sent") 24 | 25 | article_names = args.article_names 26 | print("number of articles: {}".format(len(article_names))) 27 | 28 | predict_enable = not args.no_predict 29 | # log only if we are in predict mode 30 | logging_enable = predict_enable 31 | 32 | for article_i, article_name in enumerate(article_names): 33 | if logging_enable: 34 | # set up log file for current article 35 | log_filename = os.path.join(args.log_folder, article_name) 36 | if os.path.isfile(log_filename): 37 | raise Exception("log file already exists: {}".format(log_filename)) 38 | 39 | logger = Logger(log_filename) 40 | sys.stdout = sys.stderr = logger 41 | print("Logging to file: {}\n".format(log_filename)) 42 | 43 | print("--- paper {}: {}\n".format(article_i, article_name)) 44 | 45 | article_fname = os.path.join(articles_folder, article_name) 46 | transcript_fname = os.path.join(transcript_folder, article_name) 47 | sections_info_fname = os.path.join(sections_info_folder, article_name) 48 | section_per_sent_fname = os.path.join(section_per_sent_folder, article_name) 49 | 50 | # remove the ".txt" extension and add numpy extension 51 | similarity_fname = article_name[:-4] + '.npy' 52 | similarity_fname = os.path.join(args.similarity_folder, similarity_fname) 53 | 54 | try: 55 | article_data_sample = ArticleDataSample(transcript_fname, 56 | article_fname, 57 | sections_info_fname, 58 | section_per_sent_fname) 59 | 60 | # prepare configuration 61 | cfg = HmmArticleConfig(args.word_embed_path, labeled_data_mode=False) 62 | cfg.similarity_fname = similarity_fname 63 | 64 | cfg.print_configuration() 65 | print("") 66 | 67 | durations_folder = os.path.join(args.base_summaries_folder, "durations") 68 | os.makedirs(durations_folder, mode=0o775, exist_ok=True) 69 | durations_fname = os.path.join(durations_folder, article_name) 70 | 71 | alignment_folder = os.path.join(args.base_summaries_folder, "alignment") 72 | os.makedirs(alignment_folder, mode=0o775, exist_ok=True) 73 | alignment_fname = os.path.join(alignment_folder, article_name) 74 | 75 | top_scored_sents_folder = os.path.join(args.base_summaries_folder, 76 | "top_scored_sents.num_sents_{}_thresh_{}".format(args.num_sents, 77 | args.thresh)) 78 | os.makedirs(top_scored_sents_folder, mode=0o775, exist_ok=True) 79 | top_scored_sents_fname = os.path.join(top_scored_sents_folder, article_name) 80 | 81 | if predict_enable: 82 | hmm_article = HmmArticle(article_data_sample, cfg) 83 | 84 | predicted_seq_info, log_prob = hmm_article.predict() 85 | 86 | print("log_prob = {}".format(log_prob)) 87 | 88 | print("predicted sequence info:\n") 89 | alignment_str = print_table(predicted_seq_info, col_order) 90 | with open(alignment_fname, 'w') as out_file: 91 | out_file.write(alignment_str + "\n") 92 | 93 | print("\n") 94 | 95 | hmm_article.create_durations_file(durations_fname) 96 | 97 | summary_creator = SummaryCreator(article_data_sample, 98 | durations_fname=durations_fname) 99 | 100 | if os.path.isfile(top_scored_sents_fname): 101 | print("file exists: {}".format(top_scored_sents_fname)) 102 | else: 103 | summary_creator.create_top_scored_sents_file(args.num_sents, 104 | args.thresh, 105 | top_scored_sents_fname) 106 | 107 | if predict_enable: 108 | warnings = hmm_article.get_warnings() 109 | if len(warnings) > 0: 110 | for warning in warnings: 111 | print("- {}".format(warning)) 112 | 113 | except Exception as ex: 114 | print("EXCEPTION WAS CAUGHT FOR PAPER: {}".format(article_name)) 115 | print(ex) 116 | failed_articles.append(article_name) 117 | 118 | return failed_articles 119 | 120 | 121 | def main(args): 122 | predict_enable = not args.no_predict 123 | 124 | os.makedirs(args.out_folder, mode=0o775, exist_ok=True) 125 | 126 | # take the basename and remove the extension 127 | word_embed_description = os.path.basename(args.word_embed_path)[:-4] 128 | 129 | experiment_folder = f'embed_{word_embed_description}' 130 | 131 | args.base_summaries_folder = os.path.join(args.out_folder, experiment_folder, "output") 132 | os.makedirs(args.base_summaries_folder, mode=0o775, exist_ok=(not predict_enable)) 133 | 134 | args.similarity_folder = os.path.join(args.out_folder, experiment_folder, "similarity") 135 | os.makedirs(args.similarity_folder, mode=0o775, exist_ok=True) 136 | args.log_folder = os.path.join(args.base_summaries_folder, "log") 137 | os.makedirs(args.log_folder, mode=0o775, exist_ok=(not predict_enable)) 138 | 139 | article_names = files_in_dir(os.path.join(args.data_folder, "transcript")) 140 | 141 | num_processors = args.num_processors 142 | print("num_processors: {}".format(num_processors)) 143 | 144 | if args.num_processors > 1: # multiprocessing 145 | num_articles = len(article_names) 146 | papers_per_process = math.ceil(num_articles / num_processors) 147 | args_list = [copy.copy(args) for _ in range(num_processors)] 148 | for i in range(num_processors): 149 | args_list[i].article_names = article_names[i*papers_per_process: (i+1)*papers_per_process] 150 | 151 | p = Pool(num_processors) 152 | failed_lists = p.map(summarize, args_list) 153 | 154 | # list of lists -> one list 155 | failed_list = list(itertools.chain.from_iterable(failed_lists)) 156 | 157 | else: # run on single processor 158 | args.article_names = article_names 159 | failed_list = summarize(args) 160 | 161 | num_failed = len(failed_list) 162 | if num_failed > 0: 163 | print("FAILED ARTICLES ({}):".format(num_failed)) 164 | for article_name in failed_list: 165 | print(article_name) 166 | 167 | 168 | if __name__ == '__main__': 169 | 170 | parser = argparse.ArgumentParser( 171 | description='This script applies the HMM to generate scores for the papers sentences, and to create summaries' 172 | ) 173 | parser.add_argument('--data_folder', 174 | help='data folder') 175 | parser.add_argument('--out_folder', 176 | help='output folder') 177 | parser.add_argument('--word_embed_path', 178 | help='path to word embedding file (both GloVe & word2vec bin-file formats are supported') 179 | parser.add_argument('--num_processors', type=int, default=1, 180 | help='number of processors (use 1 to avoid multiprocessing)') 181 | parser.add_argument('--no_predict', action='store_true', 182 | help='disable HMM prediction (relevant if you have already applied the HMM and obtained' 183 | 'sentence scores)') 184 | parser.add_argument('--num_sents', type=int, default=30, 185 | help='desired number of top-scored sentences in the generated summary. ' 186 | 'sentences will be retrieved only if their duration is at least \'thresh\', which means ' 187 | 'that the number of retrieved sentences might be smaller than \'num_sents\'') 188 | parser.add_argument('--thresh', type=int, default=1, 189 | help='duration threshold for retrieving sentences, as described in the help of \'num_sents\'') 190 | 191 | args = parser.parse_args() 192 | main(args) 193 | -------------------------------------------------------------------------------- /util.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from datetime import datetime 4 | from scipy import spatial 5 | import math 6 | import textwrap 7 | 8 | 9 | def tprint(str): 10 | print('[{0}] {1}'.format(datetime.now().strftime('%d.%m|%H:%M:%S'), str)) 11 | sys.stdout.flush() 12 | return 13 | 14 | 15 | def cosine_similarity(u, v): 16 | cosine_distance = spatial.distance.cosine(u, v) 17 | assert (not math.isnan(cosine_distance)) 18 | cosine_sim = 1 - cosine_distance 19 | return cosine_sim 20 | 21 | 22 | def files_in_dir(dir_name, sort=True): 23 | """ 24 | returns a list of the files in a given directory 25 | """ 26 | file_names = [fname for fname in os.listdir(dir_name) if os.path.isfile(os.path.join(dir_name, fname))] 27 | if sort: 28 | file_names.sort() 29 | return file_names 30 | 31 | 32 | # based on: 33 | # https://stackoverflow.com/questions/17330139/python-printing-a-dictionary-as-a-horizontal-table-with-headers 34 | def print_table(rows, col_names=None, sep='\uFFFA', num_fraction_digits=None, max_col_width=30, print_en=True): 35 | """ 36 | Pretty print a list of dictionaries (rows) as a dynamically sized table. 37 | If column names (col_names) aren't specified, they will show in random order. 38 | The function prepares and returns a string representing the table, and optionally prints it, depending on print_en 39 | sep: row separator. Ex: sep='\n' on Linux. Default: dummy to not split line. 40 | num_fraction_digits: number of fraction digits to be printed, in case of float (set to None for printing all digits) 41 | example: 42 | print_table([{'a': 123, 'bigtitle': 456, 'c': 0.0, 'split\ntitle': 7}, 43 | {'a': 'x', 'bigtitle': 'y', 'c': 'long text to be split', 'split\ntitle': 8}, 44 | {'a': '2016-11-02', 'bigtitle': 1.23, 'c': 7891231, 'split\ntitle': 9}], 45 | ['a', 'bigtitle', 'c', 'split\ntitle'], 46 | sep='\n', 47 | num_fraction_digits=1, 48 | max_col_width=10) 49 | """ 50 | if not col_names: 51 | col_names = list(rows[0].keys() if rows else []) 52 | my_list = [col_names] # 1st row = header 53 | for item in rows: 54 | if num_fraction_digits is not None: 55 | for key in item.keys(): 56 | if isinstance(item[key], float): 57 | format_str = '{:.' + str(num_fraction_digits) + 'f}' 58 | item[key] = format_str.format(item[key]) 59 | 60 | my_list.append([sep.join(textwrap.wrap(str(item[col]), max_col_width)) for col in col_names]) 61 | col_size = [max(map(len, (sep.join(col)).split(sep))) for col in zip(*my_list)] 62 | format_str = ' | '.join(["{{:<{}}}".format(i) for i in col_size]) 63 | dash_line = format_str.replace(' | ', '-+-').format(*['-' * i for i in col_size]) 64 | item = my_list.pop(0) 65 | dash_line_done = False 66 | lines = [] 67 | while my_list: 68 | if all(not i for i in item): 69 | item = my_list.pop(0) 70 | if dash_line and (sep != '\uFFFA' or not dash_line_done): 71 | lines.append(dash_line) 72 | dash_line_done = True 73 | 74 | while any(item): 75 | row = [i.split(sep, 1) for i in item] 76 | line = format_str.format(*[i[0] for i in row]) 77 | lines.append(line) 78 | item = [i[1] if len(i) > 1 else '' for i in row] 79 | 80 | out_str = '\n'.join(lines) 81 | if print_en: 82 | print(out_str) 83 | return out_str 84 | -------------------------------------------------------------------------------- /viterbi.py: -------------------------------------------------------------------------------- 1 | """ 2 | implementation of the Viterbi algorithm 3 | """ 4 | 5 | import numpy as np 6 | import operator 7 | 8 | 9 | # based on: 10 | # https://stackoverflow.com/questions/9729968/python-implementation-of-viterbi-algorithm 11 | def viterbi(start_prob, transition_prob, emission_prob, observations): 12 | """Return the best path, given an HMM model and a sequence of observations""" 13 | # A - initialise stuff 14 | n_samples = len(observations) 15 | n_states = transition_prob.shape[0] # number of states 16 | c = np.zeros(n_samples) # scale factors (necessary to prevent underflow) 17 | viterbi = np.zeros((n_states, n_samples)) # initialise viterbi table 18 | psi = np.zeros((n_states, n_samples)) # initialise the best path table 19 | best_path = np.zeros(n_samples).astype(int) # this will be your output 20 | 21 | # B- appoint initial values for viterbi and best path (bp) tables - Eq (32a-32b) 22 | viterbi[:, 0] = start_prob.T * emission_prob[:, observations[0]].reshape(n_states) 23 | c[0] = 1.0 / np.sum(viterbi[:, 0]) 24 | viterbi[:, 0] = c[0] * viterbi[:, 0] # apply the scaling factor 25 | psi[0] = 0 26 | 27 | # C- Do the iterations for viterbi and psi for time>0 until T 28 | for t in range(1, n_samples): # loop through time 29 | for s in range(0, n_states): # loop through the states @(t-1) 30 | trans_p = viterbi[:, t - 1] * transition_prob[:, s] 31 | psi[s, t], viterbi[s, t] = max(enumerate(trans_p), key=operator.itemgetter(1)) 32 | viterbi[s, t] = viterbi[s, t] * emission_prob[s, observations[t]] 33 | 34 | c[t] = 1.0 / np.sum(viterbi[:, t]) # scaling factor 35 | viterbi[:, t] = c[t] * viterbi[:, t] 36 | 37 | # D - Back-tracking 38 | best_path[n_samples - 1] = viterbi[:, n_samples - 1].argmax() # last state 39 | for t in range(n_samples - 1, 0, -1): # states of (last-1)th to 0th time step 40 | best_path[t - 1] = psi[best_path[t], t] 41 | 42 | return best_path 43 | -------------------------------------------------------------------------------- /w2v_utils.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import numpy as np 3 | from util import tprint 4 | from gensim.models import KeyedVectors 5 | 6 | 7 | def read_pretrained_w2v(pretrained_w2v_fname, is_glove): 8 | """ 9 | reads pretrained word embedding from the given file name 10 | is_glove: if True, assumes the format of GloVe text file. otherwise, word2vec bin file is assumed. 11 | in Glove case: returns a dictionary which maps a word to its vector 12 | in word2vec case: returns Word2VecKeyedVectors (of gensim) 13 | in addition, mean_vec is returned, which is the mean of all vectors (can be used for ) 14 | """ 15 | tprint("reading file: {}".format(pretrained_w2v_fname)) 16 | 17 | if is_glove: 18 | w2vec = pd.read_csv(pretrained_w2v_fname, header=None, sep=' ', quoting=3, encoding="ISO-8859-1") 19 | tprint("done") 20 | 21 | w2v_words = w2vec.iloc[:, 0].values 22 | w2v_vectors = w2vec.iloc[:, 1:].values 23 | 24 | num_words, dim = w2v_vectors.shape 25 | 26 | mean_vec = np.mean(w2v_vectors, 0) 27 | 28 | w2v = {} 29 | 30 | for word_i, word in enumerate(w2v_words): 31 | w2v[word] = w2v_vectors[word_i, :] 32 | 33 | else: 34 | w2v = KeyedVectors.load_word2vec_format(pretrained_w2v_fname, binary=True) 35 | tprint("done") 36 | 37 | num_words = len(w2v.vocab) 38 | dim = w2v.vector_size 39 | 40 | mean_vec = np.mean(w2v.syn0, 0) 41 | 42 | print("dim: {}".format(dim)) 43 | print("num_words: {}".format(num_words)) 44 | 45 | return w2v, mean_vec 46 | --------------------------------------------------------------------------------