├── Dicts ├── Decreasing.yml ├── Inverting.yml ├── Increasing.yml ├── threatWords.yml └── Positive.yml ├── DatasetCreator ├── Threat.yml ├── result.json ├── MergeJsonFiles.py └── SentimentAssignment.py ├── ThreatDetectionSentimentAnalysis ├── Dicts │ ├── Decreasing.yml │ ├── Inverting.yml │ ├── Increasing.yml │ └── threatWords.yml ├── DatasetCreator │ ├── Threat.yml │ ├── result.json │ ├── MergeJsonFiles.py │ └── SentimentAssignment.py ├── Neural Network │ ├── TestData.txt │ ├── TestNN.py │ ├── Train.py │ └── threatSentiment.json ├── testdata.txt ├── inputparameters.txt ├── testinput.txt ├── Input.txt ├── TrainAndTest.py ├── Lexical.py ├── CreateTestInput.py ├── CreateInput.py └── threat.json ├── Neural Network ├── TestData.txt ├── TestNN.py ├── Train.py └── threatSentiment.json ├── README.md ├── testdata.txt ├── inputparams.txt ├── testinputdata.txt ├── inputparameters.txt ├── testinput.txt ├── BigramTest.py ├── Input.txt ├── TrainAndTest.py ├── Lexical.py ├── CreateInput.py ├── CreateTestInput.py ├── threat.json ├── op.py └── ip.py /Dicts/Decreasing.yml: -------------------------------------------------------------------------------- 1 | barely: [dec] 2 | little: [dec] -------------------------------------------------------------------------------- /DatasetCreator/Threat.yml: -------------------------------------------------------------------------------- 1 | Attack 2 | Suicide 3 | Suicidal -------------------------------------------------------------------------------- /DatasetCreator/result.json: -------------------------------------------------------------------------------- 1 | {"Attack": "0.9", "Suicide": ".8", "Suicidal": ".7"} -------------------------------------------------------------------------------- /Dicts/Inverting.yml: -------------------------------------------------------------------------------- 1 | lack of: [inv] 2 | not: [inv] 3 | end of war: [inv] 4 | -------------------------------------------------------------------------------- /ThreatDetectionSentimentAnalysis/Dicts/Decreasing.yml: -------------------------------------------------------------------------------- 1 | barely: [dec] 2 | little: [dec] -------------------------------------------------------------------------------- /ThreatDetectionSentimentAnalysis/DatasetCreator/Threat.yml: -------------------------------------------------------------------------------- 1 | Attack 2 | Suicide 3 | Suicidal -------------------------------------------------------------------------------- /ThreatDetectionSentimentAnalysis/DatasetCreator/result.json: -------------------------------------------------------------------------------- 1 | {"Attack": "0.9", "Suicide": ".8", "Suicidal": ".7"} -------------------------------------------------------------------------------- /ThreatDetectionSentimentAnalysis/Dicts/Inverting.yml: -------------------------------------------------------------------------------- 1 | lack of: [inv] 2 | not: [inv] 3 | end of war: [inv] 4 | -------------------------------------------------------------------------------- /Dicts/Increasing.yml: -------------------------------------------------------------------------------- 1 | too: [inc] 2 | very: [inc] 3 | sorely: [inc] 4 | strong: [inc] 5 | more: [inc] 6 | strongest: [inc] -------------------------------------------------------------------------------- /ThreatDetectionSentimentAnalysis/Dicts/Increasing.yml: -------------------------------------------------------------------------------- 1 | too: [inc] 2 | very: [inc] 3 | sorely: [inc] 4 | strong: [inc] 5 | more: [inc] 6 | strongest: [inc] -------------------------------------------------------------------------------- /Neural Network/TestData.txt: -------------------------------------------------------------------------------- 1 | I am warning I will kill you all and decapitate you 2 | I will massacre each and every person in this place 3 | I am very happy for your success. Stay blessed -------------------------------------------------------------------------------- /ThreatDetectionSentimentAnalysis/Neural Network/TestData.txt: -------------------------------------------------------------------------------- 1 | I am warning I will kill you all and decapitate you 2 | I will massacre each and every person in this place 3 | I am very happy for your success. Stay blessed -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Final-Year-Project 2 | 3 | Threat Detection System using Hybrid (Machine Learning + Lexical Analysis) learning Approach. 4 | 5 | • Generated Lexicons by processing the data using Lexical Analysis. 6 | 7 | • Used Lexicons as input in a machine learning approach for training to get better accuracy. 8 | 9 | • Compared accuracy with different hidden layers. 10 | 11 | • Neural Network with back propogation using one hidden layer. 12 | 13 | • Activation function: Unipolar Sigmoidal Function. 14 | 15 | -------------------------------------------------------------------------------- /DatasetCreator/MergeJsonFiles.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import json 3 | 4 | def merge_dicts(*dict_args): 5 | 6 | result = {} 7 | for dictionary in dict_args: 8 | result.update(dictionary) 9 | return result 10 | 11 | if __name__=="__main__": 12 | j = json.load(open('result.json')) 13 | j1 = json.load(open('result1.json')) 14 | j2 = json.load(open('result2.json')) 15 | j3 = json.load(open('result3.json')) 16 | 17 | z = merge_dicts(j, j1, j2, j3) 18 | with open('output.json', 'w') as outfile: 19 | json.dump(z, outfile) 20 | #print(z) -------------------------------------------------------------------------------- /ThreatDetectionSentimentAnalysis/DatasetCreator/MergeJsonFiles.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import json 3 | 4 | def merge_dicts(*dict_args): 5 | 6 | result = {} 7 | for dictionary in dict_args: 8 | result.update(dictionary) 9 | return result 10 | 11 | if __name__=="__main__": 12 | j = json.load(open('result.json')) 13 | j1 = json.load(open('result1.json')) 14 | j2 = json.load(open('result2.json')) 15 | j3 = json.load(open('result3.json')) 16 | 17 | z = merge_dicts(j, j1, j2, j3) 18 | with open('output.json', 'w') as outfile: 19 | json.dump(z, outfile) 20 | #print(z) -------------------------------------------------------------------------------- /DatasetCreator/SentimentAssignment.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | from __future__ import print_function 4 | import json 5 | 6 | if __name__=="__main__": 7 | 8 | pos = {} 9 | f = file("Threat.yml").read() 10 | for word in f.split(): 11 | print('%s'%word) 12 | pos[word] = raw_input('Enter the threat level:') 13 | 14 | #print(pos) 15 | print(json.dumps(pos)) 16 | 17 | with open('result.json', 'w') as fp: 18 | json.dump(pos, fp) 19 | 20 | 21 | 22 | count = 0 23 | output_json = json.load(open('result.json')) 24 | 25 | for majorkey, subdict in output_json.iteritems(): 26 | #print(majorkey, subdict) 27 | count+= float(subdict) 28 | 29 | print(count) 30 | 31 | -------------------------------------------------------------------------------- /ThreatDetectionSentimentAnalysis/DatasetCreator/SentimentAssignment.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | from __future__ import print_function 4 | import json 5 | 6 | if __name__=="__main__": 7 | 8 | pos = {} 9 | f = file("Threat.yml").read() 10 | for word in f.split(): 11 | print('%s'%word) 12 | pos[word] = raw_input('Enter the threat level:') 13 | 14 | #print(pos) 15 | print(json.dumps(pos)) 16 | 17 | with open('result.json', 'w') as fp: 18 | json.dump(pos, fp) 19 | 20 | 21 | 22 | count = 0 23 | output_json = json.load(open('result.json')) 24 | 25 | for majorkey, subdict in output_json.iteritems(): 26 | #print(majorkey, subdict) 27 | count+= float(subdict) 28 | 29 | print(count) 30 | 31 | -------------------------------------------------------------------------------- /testdata.txt: -------------------------------------------------------------------------------- 1 | I am warning I will kill you all and decapitate you 2 | I will massacre each and every person in this place 3 | I am very happy for your success. Stay blessed 4 | I will decapitate you. 5 | I will punch your face till you cry blood. 6 | I will pull your guts out. 7 | Brutality now becomes my appetite. 8 | I will smash your head, until brains seeps in 9 | He jammed the knife up his friend's anus 10 | You will never live again, soon your life will end 11 | She buried her friend alive. 12 | Homicide or suicide will be the cause of death. 13 | Bring your daughter to the slaughter. 14 | I will wrench the blade from your chest till your crotch. 15 | She hacked his head off. 16 | He cut of the limbs of the captured soldier. 17 | I will cut you like a pig 18 | I am happy you were not killed -------------------------------------------------------------------------------- /ThreatDetectionSentimentAnalysis/testdata.txt: -------------------------------------------------------------------------------- 1 | I am warning I will kill you all and decapitate you 2 | I will massacre each and every person in this place 3 | I am very happy for your success. Stay blessed 4 | I will decapitate you. 5 | I will punch your face till you cry blood. 6 | I will pull your guts out. 7 | Brutality now becomes my appetite. 8 | I will smash your head, until brains seeps in 9 | He jammed the knife up his friend's anus 10 | You will never live again, soon your life will end 11 | She buried her friend alive. 12 | Homicide or suicide will be the cause of death. 13 | Bring your daughter to the slaughter. 14 | I will wrench the blade from your chest till your crotch. 15 | She hacked his head off. 16 | He cut of the limbs of the captured soldier. 17 | I will cut you like a pig 18 | I am happy you were not killed -------------------------------------------------------------------------------- /inputparams.txt: -------------------------------------------------------------------------------- 1 | (0.0, 5.181818181818182, 1.0, 3, 0.13636363636363635, 0) 2 | (0.0, 5.444444444444445, 0.0, 2, 0.2222222222222222, 0) 3 | (1.0, 5.594594594594595, 2.0, 3, 0.08108108108108109, 1) 4 | (0.0, 6.08, 1.0, 4, 0.16, 1) 5 | (1.0, 5.108695652173913, 7.0, 11, 0.2391304347826087, 1) 6 | (0.0, 5.166666666666667, 1.0, 5, 0.11904761904761904, 0) 7 | (2.0, 5.043478260869565, 5.0, 10, 0.21739130434782608, 1) 8 | (4.0, 5.466666666666667, 8.0, 7, 0.23333333333333334, 1) 9 | (1.0, 3.7, 3.0, 3, 0.3, 1) 10 | (0.0, 6.285714285714286, 1.0, 1, 0.07142857142857142, 1) 11 | (3.0, 5.2, 4.0, 1, 0.1, 0) 12 | (2.0, 4.538461538461538, 4.0, 6, 0.46153846153846156, 1) 13 | (4.0, 5.913043478260869, 13.0, 21, 0.22826086956521738, 1) 14 | (3.0, 5.844444444444444, 9.0, 10, 0.2222222222222222, 1) 15 | (0.0, 5.545454545454546, 0.0, 6, 0.10909090909090909, 0) 16 | -------------------------------------------------------------------------------- /testinputdata.txt: -------------------------------------------------------------------------------- 1 | (1.0, 4.636363636363637, 4.0, 3, 0.2727272727272727) 2 | (1.0, 5.1, 2.0, 3, 0.3) 3 | (-3.0, 5.111111111111111, -3.0, 1, 0.1111111111111111) 4 | (0.0, 5.5, 1.0, 1, 0.25) 5 | (1.0, 4.666666666666667, 1.0, 1, 0.1111111111111111) 6 | (0.0, 4.333333333333333, 0.0, 1, 0.16666666666666666) 7 | (1.0, 6.8, 1.0, 0, 0.0) 8 | (0.0, 4.888888888888889, 0.0, 2, 0.2222222222222222) 9 | (1.0, 5.0, 1.0, 1, 0.125) 10 | (0.0, 4.9, 0.0, 2, 0.2) 11 | (0.0, 5.6, 0.0, 0, 0.0) 12 | (1.0, 5.222222222222222, 3.0, 4, 0.4444444444444444) 13 | (0.0, 6.166666666666667, 1.0, 2, 0.3333333333333333) 14 | (0.0, 5.181818181818182, 0.0, 1, 0.09090909090909091) 15 | (0.0, 4.8, 0.0, 0, 0.0) 16 | (0.0, 4.888888888888889, 0.0, 0, 0.0) 17 | (-1.0, 3.5714285714285716, -1.0, 1, 0.14285714285714285) 18 | (-2.0, 4.285714285714286, -2.0, 1, 0.14285714285714285) 19 | -------------------------------------------------------------------------------- /inputparameters.txt: -------------------------------------------------------------------------------- 1 | (0.0, 5.181818181818182, 1.0, 3, 0.13636363636363635, 0) 2 | (0.0, 5.444444444444445, 0.0, 2, 0.2222222222222222, 0) 3 | (1.0, 5.594594594594595, 2.0, 3, 0.08108108108108109, 1) 4 | (0.0, 6.08, 1.0, 4, 0.16, 1) 5 | (1.0, 5.108695652173913, 7.0, 11, 0.2391304347826087, 1) 6 | (0.0, 5.166666666666667, 1.0, 5, 0.11904761904761904, 0) 7 | (2.0, 5.043478260869565, 5.0, 10, 0.21739130434782608, 1) 8 | (4.0, 5.466666666666667, 8.0, 7, 0.23333333333333334, 1) 9 | (1.0, 3.7, 3.0, 3, 0.3, 1) 10 | (0.0, 6.285714285714286, 1.0, 1, 0.07142857142857142, 1) 11 | (3.0, 5.2, 4.0, 1, 0.1, 0) 12 | (2.0, 4.538461538461538, 4.0, 6, 0.46153846153846156, 1) 13 | (4.0, 5.913043478260869, 13.0, 21, 0.22826086956521738, 1) 14 | (3.0, 5.844444444444444, 9.0, 10, 0.2222222222222222, 1) 15 | (0.0, 5.545454545454546, 0.0, 6, 0.10909090909090909, 0) 16 | -------------------------------------------------------------------------------- /testinput.txt: -------------------------------------------------------------------------------- 1 | (4.636363636363637, 4.0, 1.7000000000000002, 3, 0.2727272727272727) 2 | (5.1, 2.0, 0.6, 3, 0.3) 3 | (5.111111111111111, -3.0, 0.0, 1, 0.1111111111111111) 4 | (5.5, 1.0, 0.0, 1, 0.25) 5 | (4.666666666666667, 1.0, 0.0, 1, 0.1111111111111111) 6 | (4.333333333333333, 0.0, 0.0, 1, 0.16666666666666666) 7 | (6.8, 1.0, 0.0, 0, 0.0) 8 | (4.888888888888889, 0.0, 0.0, 2, 0.2222222222222222) 9 | (5.0, 1.0, 0.1, 1, 0.125) 10 | (4.9, 0.0, 0.0, 2, 0.2) 11 | (5.6, 0.0, 0.0, 0, 0.0) 12 | (5.222222222222222, 3.0, 1.7000000000000002, 4, 0.4444444444444444) 13 | (6.166666666666667, 1.0, 0.8, 2, 0.3333333333333333) 14 | (5.181818181818182, 0.0, 0.0, 1, 0.09090909090909091) 15 | (4.8, 0.0, 0.0, 0, 0.0) 16 | (4.888888888888889, 0.0, 0.0, 0, 0.0) 17 | (3.5714285714285716, -1.0, 0.0, 1, 0.14285714285714285) 18 | (4.285714285714286, -2.0, 0.9, 1, 0.14285714285714285) 19 | -------------------------------------------------------------------------------- /ThreatDetectionSentimentAnalysis/inputparameters.txt: -------------------------------------------------------------------------------- 1 | (5.181818181818182, 1.0, 0.0, 3, 0.13636363636363635, 0) 2 | (5.444444444444445, 0.0, 0.0, 2, 0.2222222222222222, 0) 3 | (5.594594594594595, 2.0, 0.9, 3, 0.08108108108108109, 1) 4 | (6.08, 1.0, 0.9, 4, 0.16, 1) 5 | (5.108695652173913, 7.0, 4.1, 11, 0.2391304347826087, 1) 6 | (5.166666666666667, 1.0, 0.1, 5, 0.11904761904761904, 0) 7 | (5.043478260869565, 5.0, 2.8, 10, 0.21739130434782608, 1) 8 | (5.466666666666667, 8.0, 3.3, 7, 0.23333333333333334, 1) 9 | (3.7, 3.0, 1.0, 3, 0.3, 1) 10 | (6.285714285714286, 1.0, 0.8, 1, 0.07142857142857142, 1) 11 | (5.2, 4.0, 0.2, 1, 0.1, 0) 12 | (4.538461538461538, 4.0, 1.8, 6, 0.46153846153846156, 1) 13 | (5.913043478260869, 13.0, 4.8, 21, 0.22826086956521738, 1) 14 | (5.844444444444444, 9.0, 2.1, 10, 0.2222222222222222, 1) 15 | (5.545454545454546, 0.0, 0.0, 6, 0.10909090909090909, 0) 16 | -------------------------------------------------------------------------------- /ThreatDetectionSentimentAnalysis/testinput.txt: -------------------------------------------------------------------------------- 1 | (4.636363636363637, 4.0, 1.7000000000000002, 3, 0.2727272727272727) 2 | (5.1, 2.0, 0.6, 3, 0.3) 3 | (5.111111111111111, -3.0, 0.0, 1, 0.1111111111111111) 4 | (5.5, 1.0, 0.0, 1, 0.25) 5 | (4.666666666666667, 1.0, 0.0, 1, 0.1111111111111111) 6 | (4.333333333333333, 0.0, 0.0, 1, 0.16666666666666666) 7 | (6.8, 1.0, 0.0, 0, 0.0) 8 | (4.888888888888889, 0.0, 0.0, 2, 0.2222222222222222) 9 | (5.0, 1.0, 0.1, 1, 0.125) 10 | (4.9, 0.0, 0.0, 2, 0.2) 11 | (5.6, 0.0, 0.0, 0, 0.0) 12 | (5.222222222222222, 3.0, 1.7000000000000002, 4, 0.4444444444444444) 13 | (6.166666666666667, 1.0, 0.8, 2, 0.3333333333333333) 14 | (5.181818181818182, 0.0, 0.0, 1, 0.09090909090909091) 15 | (4.8, 0.0, 0.0, 0, 0.0) 16 | (4.888888888888889, 0.0, 0.0, 0, 0.0) 17 | (3.5714285714285716, -1.0, 0.0, 1, 0.14285714285714285) 18 | (4.285714285714286, -2.0, 0.9, 1, 0.14285714285714285) 19 | -------------------------------------------------------------------------------- /BigramTest.py: -------------------------------------------------------------------------------- 1 | import nltk 2 | import yaml 3 | 4 | dictTag = ['Dicts/threatWords.yml', 'Dicts/Positive.yml', 'Dicts/Decreasing.yml', 'Dicts/Inverting.yml'] 5 | 6 | sentence = "i will kill you and will blow up your house in despair in prospect in store good boy in the cards" 7 | 8 | files = [open(path, 'r') for path in dictTag] 9 | dicts = [yaml.load(dict_file) for dict_file in files] 10 | # with open('Dicts/threatWords.yml', 'r') as f: 11 | # doc = yaml.load(f) 12 | 13 | tokens = nltk.word_tokenize(sentence) 14 | print(tokens) 15 | 16 | bigrams = [" ".join(pair) for pair in nltk.bigrams(tokens)] 17 | print(bigrams) 18 | 19 | trigrams = [" ".join(trio) for trio in nltk.trigrams(tokens)] 20 | print(trigrams) 21 | 22 | bigramThreatCount = 0 23 | bigramPositiveCount = 0 24 | bigramNegativeCount = 0 25 | 26 | trioThreatCount = 0 27 | trioPositiveCount = 0 28 | trioNegativeCount = 0 29 | 30 | for bigram in bigrams: 31 | for dicto in dicts: 32 | if bigram in dicto and dicto[bigram] == ['threat']: 33 | bigramThreatCount += 1 34 | if bigram in dicto and dicto[bigram] == ['positive']: 35 | bigramPositiveCount += 1 36 | if bigram in dicto and dicto[bigram] == ['negative']: 37 | bigramNegativeCount += 1 38 | 39 | for trigram in trigrams: 40 | for dicto in dicts: 41 | if trigram in dicto and dicto[trigram] == ['threat']: 42 | trioThreatCount += 1 43 | if trigram in dicto and dicto[trigram] == ['positive']: 44 | trioPositiveCount += 1 45 | if trigram in dicto and dicto[trigram] == ['negative']: 46 | trioNegativeCount += 1 47 | 48 | print("ThreatCount ", bigramThreatCount + trioThreatCount) 49 | print("PositiveCount ", bigramPositiveCount + trioPositiveCount) 50 | print("NegativeCount ", bigramNegativeCount + trioNegativeCount) 51 | 52 | print('done') -------------------------------------------------------------------------------- /Neural Network/TestNN.py: -------------------------------------------------------------------------------- 1 | from pprint import pprint 2 | import json 3 | import csv 4 | 5 | 6 | def number_of_threat(review): 7 | output_json = json.load(open('result.json')) 8 | count = 0 9 | for word in review.split(): 10 | for majorkey, subdict in output_json.iteritems(): 11 | if word == majorkey: 12 | count+=1 13 | return count 14 | 15 | 16 | def threat_score(review): 17 | output_json = json.load(open('result.json')) 18 | count = 0.0 19 | for word in review.split(): 20 | for majorkey, subdict in output_json.iteritems(): 21 | if word == majorkey: 22 | #print(subdict) 23 | count += float(subdict) 24 | return count 25 | 26 | 27 | 28 | if __name__ == "__main__": 29 | 30 | with open ('test.txt','w') as f: 31 | with open("testdata.txt") as file: 32 | reader = csv.reader(file) 33 | for row in reader: 34 | #print(row) 35 | text = ''.join(row) 36 | new_text = text.replace(',', '') 37 | new_text = new_text.replace('.', '') 38 | new_text = new_text.replace(' .', '') 39 | #pprint(new_text) 40 | output_json = json.load(open('output.json')) 41 | threat_score = 0.0 42 | number_of_threat = 0 43 | for word in new_text.split(): 44 | for majorkey, subdict in output_json.iteritems(): 45 | if word == majorkey: 46 | #print(subdict) 47 | threat_score+=float(subdict) 48 | number_of_threat+=1 49 | #pprint(threat_score) 50 | #pprint(number_of_threat) 51 | 52 | 53 | 54 | charLength = float(len(text)) 55 | wordLength = float(len(text.split())) 56 | average = float(charLength/wordLength) 57 | #pprint('Character Length-> %d'%charLength) 58 | #pprint('Word Length-> %d'%wordLength) 59 | system = charLength/544, wordLength/92, average/6, threat_score, number_of_threat 60 | f.write(str(system)+'\n') 61 | 62 | 63 | 64 | 65 | -------------------------------------------------------------------------------- /ThreatDetectionSentimentAnalysis/Neural Network/TestNN.py: -------------------------------------------------------------------------------- 1 | from pprint import pprint 2 | import json 3 | import csv 4 | 5 | 6 | def number_of_threat(review): 7 | output_json = json.load(open('result.json')) 8 | count = 0 9 | for word in review.split(): 10 | for majorkey, subdict in output_json.iteritems(): 11 | if word == majorkey: 12 | count+=1 13 | return count 14 | 15 | 16 | def threat_score(review): 17 | output_json = json.load(open('result.json')) 18 | count = 0.0 19 | for word in review.split(): 20 | for majorkey, subdict in output_json.iteritems(): 21 | if word == majorkey: 22 | #print(subdict) 23 | count += float(subdict) 24 | return count 25 | 26 | 27 | 28 | if __name__ == "__main__": 29 | 30 | with open ('test.txt','w') as f: 31 | with open("testdata.txt") as file: 32 | reader = csv.reader(file) 33 | for row in reader: 34 | #print(row) 35 | text = ''.join(row) 36 | new_text = text.replace(',', '') 37 | new_text = new_text.replace('.', '') 38 | new_text = new_text.replace(' .', '') 39 | #pprint(new_text) 40 | output_json = json.load(open('output.json')) 41 | threat_score = 0.0 42 | number_of_threat = 0 43 | for word in new_text.split(): 44 | for majorkey, subdict in output_json.iteritems(): 45 | if word == majorkey: 46 | #print(subdict) 47 | threat_score+=float(subdict) 48 | number_of_threat+=1 49 | #pprint(threat_score) 50 | #pprint(number_of_threat) 51 | 52 | 53 | 54 | charLength = float(len(text)) 55 | wordLength = float(len(text.split())) 56 | average = float(charLength/wordLength) 57 | #pprint('Character Length-> %d'%charLength) 58 | #pprint('Word Length-> %d'%wordLength) 59 | system = charLength/544, wordLength/92, average/6, threat_score, number_of_threat 60 | f.write(str(system)+'\n') 61 | 62 | 63 | 64 | 65 | -------------------------------------------------------------------------------- /Neural Network/Train.py: -------------------------------------------------------------------------------- 1 | from pprint import pprint 2 | import json 3 | import csv 4 | 5 | 6 | def number_of_threat(review): 7 | output_json = json.load(open('result.json')) 8 | count = 0 9 | for word in review.split(): 10 | for majorkey, subdict in output_json.iteritems(): 11 | if word == majorkey: 12 | count+=1 13 | return count 14 | 15 | 16 | def threat_score(review): 17 | output_json = json.load(open('result.json')) 18 | count = 0.0 19 | for word in review.split(): 20 | for majorkey, subdict in output_json.iteritems(): 21 | if word == majorkey: 22 | #print(subdict) 23 | count += float(subdict) 24 | return count 25 | 26 | 27 | 28 | if __name__ == "__main__": 29 | 30 | with open ('new.txt','w') as f: 31 | with open("dicts/input.txt") as file: 32 | reader = csv.reader(file) 33 | for row in reader: 34 | #print(row) 35 | text = ''.join(row) 36 | new_text = text.replace(',', '') 37 | new_text = new_text.replace('.', '') 38 | #pprint(new_text) 39 | output_json = json.load(open('output.json')) 40 | threat_score = 0.0 41 | number_of_threat = 0 42 | for word in new_text.split(): 43 | for majorkey, subdict in output_json.iteritems(): 44 | if word == majorkey: 45 | #print(subdict) 46 | threat_score+=float(subdict) 47 | number_of_threat+=1 48 | #pprint(threat_score) 49 | #pprint(number_of_threat) 50 | 51 | if threat_score >= 0.8: 52 | output = 1 53 | else: 54 | output = 0 55 | charLength = float(len(text)) 56 | wordLength = float(len(text.split())) 57 | average = float(charLength/wordLength) 58 | #pprint('Character Length-> %d'%charLength) 59 | #pprint('Word Length-> %d'%wordLength) 60 | system = charLength/544, wordLength/92, average/6, threat_score, number_of_threat, output 61 | f.write(str(system)+'\n') 62 | 63 | 64 | 65 | 66 | -------------------------------------------------------------------------------- /ThreatDetectionSentimentAnalysis/Neural Network/Train.py: -------------------------------------------------------------------------------- 1 | from pprint import pprint 2 | import json 3 | import csv 4 | 5 | 6 | def number_of_threat(review): 7 | output_json = json.load(open('result.json')) 8 | count = 0 9 | for word in review.split(): 10 | for majorkey, subdict in output_json.iteritems(): 11 | if word == majorkey: 12 | count+=1 13 | return count 14 | 15 | 16 | def threat_score(review): 17 | output_json = json.load(open('result.json')) 18 | count = 0.0 19 | for word in review.split(): 20 | for majorkey, subdict in output_json.iteritems(): 21 | if word == majorkey: 22 | #print(subdict) 23 | count += float(subdict) 24 | return count 25 | 26 | 27 | 28 | if __name__ == "__main__": 29 | 30 | with open ('new.txt','w') as f: 31 | with open("dicts/input.txt") as file: 32 | reader = csv.reader(file) 33 | for row in reader: 34 | #print(row) 35 | text = ''.join(row) 36 | new_text = text.replace(',', '') 37 | new_text = new_text.replace('.', '') 38 | #pprint(new_text) 39 | output_json = json.load(open('output.json')) 40 | threat_score = 0.0 41 | number_of_threat = 0 42 | for word in new_text.split(): 43 | for majorkey, subdict in output_json.iteritems(): 44 | if word == majorkey: 45 | #print(subdict) 46 | threat_score+=float(subdict) 47 | number_of_threat+=1 48 | #pprint(threat_score) 49 | #pprint(number_of_threat) 50 | 51 | if threat_score >= 0.8: 52 | output = 1 53 | else: 54 | output = 0 55 | charLength = float(len(text)) 56 | wordLength = float(len(text.split())) 57 | average = float(charLength/wordLength) 58 | #pprint('Character Length-> %d'%charLength) 59 | #pprint('Word Length-> %d'%wordLength) 60 | system = charLength/544, wordLength/92, average/6, threat_score, number_of_threat, output 61 | f.write(str(system)+'\n') 62 | 63 | 64 | 65 | 66 | -------------------------------------------------------------------------------- /Input.txt: -------------------------------------------------------------------------------- 1 | First I will call the PD and report constant harassment of her and her to others as well and in her neighborhood. 2 | And that she is always suicidal and self harming. 3 | I should have sent someone plenty of times, and she was sitting there self harming with clawing and beating herself with me on the phone and screaming in meltdowns and telling me how she would kill herself. 4 | Someone paid me to kill you get spared,forty eight hours to pay five thousand dollar.If you inform the police or anybody,death is promised...email me now. 5 | Fine then I will bring a gun to school and kill all the girls.I will I am serious.I Will do it at nine thirty Friday during first hour.I am serious I have twelve gauge shotgun and nine mm pistol I will bring and start killing everyone. 6 | By the way ramen noodles are not made from soba or buckwheat.You are retarded and have no business when you have no idea what you are talking about .If I ever see you near my restaurant again you will be in trouble.$$ 7 | My father has 3 guns.I am planning on killing him first and putting his body in the dumpster. Then i will take the motor and driving to school and I am going in fast .I will kill hopefully at least two hundred before I kill myself. 8 | You nasty ugly fuck face cunt.i will do more than burn your house down .I will rape ur stupid shit fuck daughters decapitate them and send you their butcher bodies. 9 | I will rape you if I ever see u cunt. 10 | Sometimes young boy started send me rape threats,So I have started telling their mothers. 11 | Go and fuck yourself you fucking piece of shit cunt. 12 | There's one way to kill you but a thousand way to kill you. 13 | ISIS have threatened an attack on Britain so horrific it will 'turn children's hair white' as latest video warns of further outrages across the continent.The terror group has warned the UK will suffer 'the lion's share' of the slaughter it intends to wield in Europe, according to its Arabic language newspaper, al-Naba.However, in a video released on Saturday, the millitants suggest it is Spain which will come under the strongest attack as the West is forced to 'pay dearly' for having crushed Muslim rule in Andalusia - more than 500 years ago. 14 | Meanwhile, the video released at the weekend features a long-haired jihadi, who appears to be white, who warns ISIS is going to carry out atrocities which will make the West forget the September 11 attacks against the United States, or the Paris massacre in November. 15 | A sentence is a group of words that are put together to mean something. A sentence is the basic unit of language which expresses a complete thought. It does this by following the grammatical rules of syntax. A complete sentence has at least a subject and a main verb to state (declare) a complete thought. -------------------------------------------------------------------------------- /ThreatDetectionSentimentAnalysis/Input.txt: -------------------------------------------------------------------------------- 1 | First I will call the PD and report constant harassment of her and her to others as well and in her neighborhood. 2 | And that she is always suicidal and self harming. 3 | I should have sent someone plenty of times, and she was sitting there self harming with clawing and beating herself with me on the phone and screaming in meltdowns and telling me how she would kill herself. 4 | Someone paid me to kill you get spared,forty eight hours to pay five thousand dollar.If you inform the police or anybody,death is promised...email me now. 5 | Fine then I will bring a gun to school and kill all the girls.I will I am serious.I Will do it at nine thirty Friday during first hour.I am serious I have twelve gauge shotgun and nine mm pistol I will bring and start killing everyone. 6 | By the way ramen noodles are not made from soba or buckwheat.You are retarded and have no business when you have no idea what you are talking about .If I ever see you near my restaurant again you will be in trouble.$$ 7 | My father has 3 guns.I am planning on killing him first and putting his body in the dumpster. Then i will take the motor and driving to school and I am going in fast .I will kill hopefully at least two hundred before I kill myself. 8 | You nasty ugly fuck face cunt.i will do more than burn your house down .I will rape ur stupid shit fuck daughters decapitate them and send you their butcher bodies. 9 | I will rape you if I ever see u cunt. 10 | Sometimes young boy started send me rape threats,So I have started telling their mothers. 11 | Go and fuck yourself you fucking piece of shit cunt. 12 | There's one way to kill you but a thousand way to kill you. 13 | ISIS have threatened an attack on Britain so horrific it will 'turn children's hair white' as latest video warns of further outrages across the continent.The terror group has warned the UK will suffer 'the lion's share' of the slaughter it intends to wield in Europe, according to its Arabic language newspaper, al-Naba.However, in a video released on Saturday, the millitants suggest it is Spain which will come under the strongest attack as the West is forced to 'pay dearly' for having crushed Muslim rule in Andalusia - more than 500 years ago. 14 | Meanwhile, the video released at the weekend features a long-haired jihadi, who appears to be white, who warns ISIS is going to carry out atrocities which will make the West forget the September 11 attacks against the United States, or the Paris massacre in November. 15 | A sentence is a group of words that are put together to mean something. A sentence is the basic unit of language which expresses a complete thought. It does this by following the grammatical rules of syntax. A complete sentence has at least a subject and a main verb to state (declare) a complete thought. -------------------------------------------------------------------------------- /TrainAndTest.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def nonlin(x, deriv=False): 4 | if deriv == True: 5 | return x*(1-x) 6 | return 1/(1+np.exp(-x)) 7 | 8 | def train(): 9 | 10 | list_of_lists = [] 11 | 12 | with open('inputparameters.txt') as f: 13 | for line in f: 14 | for ch in ['(', ')']: 15 | line = line.replace(ch, '') 16 | inner_list = [elt.strip() for elt in line.split(',')] 17 | inner_list2 = list(map(float, inner_list)) 18 | list_of_lists.append(inner_list2) 19 | 20 | list1 = [] 21 | list2 = [] 22 | for i in list_of_lists: 23 | max1 = len(i)-1 24 | list1.append(i[:max1]) 25 | list2.append(i[max1:]) 26 | 27 | X = np.array(list1) 28 | Y = np.array(list2) 29 | 30 | np.random.seed(1) 31 | 32 | 33 | syn0 = 2*np.random.random((5,6)) - 1 34 | syn1 = 2*np.random.random((6,1)) - 1 35 | 36 | for i in range(100000): 37 | 38 | l0 = X 39 | l1 = nonlin(np.dot(l0, syn0)) 40 | l2 = nonlin(np.dot(l1, syn1)) 41 | 42 | l2_error = Y - l2 43 | 44 | l2_delta = l2_error * nonlin(l2, deriv=True) 45 | 46 | l1_error = l2_delta.dot(syn1.T) 47 | 48 | l1_delta = l1_error * nonlin(l1, deriv=True)*.5 49 | 50 | syn0 += np.dot(l0.T, l1_delta) 51 | syn1 += np.dot(l1.T, l2_delta) 52 | 53 | np.set_printoptions(suppress=True) 54 | 55 | return syn0, syn1, l2 56 | 57 | def test(syn0, syn1): 58 | 59 | list_of_lists = [] 60 | 61 | with open('testinput.txt') as f: 62 | for line in f: 63 | for ch in ['(', ')']: 64 | line = line.replace(ch, '') 65 | inner_list = [elt.strip() for elt in line.split(',')] 66 | inner_list2 = list(map(float, inner_list)) 67 | list_of_lists.append(inner_list2) 68 | 69 | X = np.array(list_of_lists) 70 | l0 = X 71 | l1 = nonlin(np.dot(l0, syn0)) 72 | l2 = nonlin(np.dot(l1, syn1)) 73 | np.set_printoptions(suppress=True) 74 | 75 | return l2 76 | 77 | 78 | 79 | if __name__ == "__main__": 80 | 81 | syn0, syn1, l2 = train() 82 | #print("Weights after training SYN0: ", syn0, "\n") 83 | #print("Weights after training SYN1: ", syn1, "\n") 84 | #np.set_printoptions(suppress=True) 85 | #print("Output After Training:\n", l2) 86 | l1 = test(syn0, syn1) 87 | print("Output:",l1) 88 | Y = [[1],[1],[0],[1],[1],[1],[0],[1],[0],[1],[0],[0],[1],[1],[0],[0],[1],[0]] #expected output of the test data 89 | l = abs(Y - l1)*10 90 | #print("Error:", l) 91 | line = 0 92 | count = 0 93 | line = len(l) 94 | print(line) 95 | for i in l: 96 | #print(i) 97 | if(int(i)>0): 98 | #print(i) 99 | count+=1 100 | 101 | print(count) 102 | accuracy = (count*100)/line 103 | print("Accuracy: ", accuracy) 104 | 105 | -------------------------------------------------------------------------------- /ThreatDetectionSentimentAnalysis/TrainAndTest.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def nonlin(x, deriv=False): 4 | if deriv == True: 5 | return x*(1-x) 6 | return 1/(1+np.exp(-x)) 7 | 8 | def train(): 9 | 10 | list_of_lists = [] 11 | 12 | with open('inputparameters.txt') as f: 13 | for line in f: 14 | for ch in ['(', ')']: 15 | line = line.replace(ch, '') 16 | inner_list = [elt.strip() for elt in line.split(',')] 17 | inner_list2 = list(map(float, inner_list)) 18 | list_of_lists.append(inner_list2) 19 | 20 | list1 = [] 21 | list2 = [] 22 | for i in list_of_lists: 23 | max1 = len(i)-1 24 | list1.append(i[:max1]) 25 | list2.append(i[max1:]) 26 | 27 | X = np.array(list1) 28 | Y = np.array(list2) 29 | 30 | np.random.seed(1) 31 | 32 | 33 | syn0 = 2*np.random.random((5,6)) - 1 34 | syn1 = 2*np.random.random((6,1)) - 1 35 | 36 | for i in range(100000): 37 | 38 | l0 = X 39 | l1 = nonlin(np.dot(l0, syn0)) 40 | l2 = nonlin(np.dot(l1, syn1)) 41 | 42 | l2_error = Y - l2 43 | 44 | l2_delta = l2_error * nonlin(l2, deriv=True) 45 | 46 | l1_error = l2_delta.dot(syn1.T) 47 | 48 | l1_delta = l1_error * nonlin(l1, deriv=True)*.5 49 | 50 | syn0 += np.dot(l0.T, l1_delta) 51 | syn1 += np.dot(l1.T, l2_delta) 52 | 53 | np.set_printoptions(suppress=True) 54 | 55 | return syn0, syn1, l2 56 | 57 | def test(syn0, syn1): 58 | 59 | list_of_lists = [] 60 | 61 | with open('testinput.txt') as f: 62 | for line in f: 63 | for ch in ['(', ')']: 64 | line = line.replace(ch, '') 65 | inner_list = [elt.strip() for elt in line.split(',')] 66 | inner_list2 = list(map(float, inner_list)) 67 | list_of_lists.append(inner_list2) 68 | 69 | X = np.array(list_of_lists) 70 | l0 = X 71 | l1 = nonlin(np.dot(l0, syn0)) 72 | l2 = nonlin(np.dot(l1, syn1)) 73 | np.set_printoptions(suppress=True) 74 | 75 | return l2 76 | 77 | 78 | 79 | if __name__ == "__main__": 80 | 81 | syn0, syn1, l2 = train() 82 | #print("Weights after training SYN0: ", syn0, "\n") 83 | #print("Weights after training SYN1: ", syn1, "\n") 84 | #np.set_printoptions(suppress=True) 85 | #print("Output After Training:\n", l2) 86 | l1 = test(syn0, syn1) 87 | print("Output:",l1) 88 | Y = [[1],[1],[0],[1],[1],[1],[0],[1],[0],[1],[0],[0],[1],[1],[0],[0],[1],[0]] #expected output of the test data 89 | l = abs(Y - l1)*10 90 | #print("Error:", l) 91 | line = 0 92 | count = 0 93 | line = len(l) 94 | print(line) 95 | for i in l: 96 | #print(i) 97 | if(int(i)>0): 98 | #print(i) 99 | count+=1 100 | 101 | print(count) 102 | accuracy = (count*100)/line 103 | print("Accuracy: ", accuracy) 104 | 105 | -------------------------------------------------------------------------------- /Lexical.py: -------------------------------------------------------------------------------- 1 | from pprint import pprint 2 | import nltk 3 | import yaml 4 | import sys 5 | import os 6 | import re 7 | import csv 8 | 9 | class Splitter(object): 10 | 11 | def __init__(self): 12 | self.nltk_splitter = nltk.data.load('tokenizers/punkt/english.pickle') 13 | self.nltk_tokenizer = nltk.tokenize.TreebankWordTokenizer() 14 | 15 | def split(self, text): 16 | """ 17 | input format: a paragraph of text 18 | output format: a list of lists of words. 19 | e.g.: [['this', 'is', 'a', 'sentence'], ['this', 'is', 'another', 'one']] 20 | """ 21 | sentences = self.nltk_splitter.tokenize(text) 22 | tokenized_sentences = [self.nltk_tokenizer.tokenize(sent) for sent in sentences] 23 | return tokenized_sentences 24 | 25 | 26 | class POSTagger(object): 27 | 28 | def __init__(self): 29 | pass 30 | 31 | def pos_tag(self, sentences): 32 | """ 33 | input format: list of lists of words 34 | e.g.: [['this', 'is', 'a', 'sentence'], ['this', 'is', 'another', 'one']] 35 | output format: list of lists of tagged tokens. Each tagged tokens has a 36 | form, a lemma, and a list of tags 37 | e.g: [[('this', 'this', ['DT']), ('is', 'be', ['VB']), ('a', 'a', ['DT']), ('sentence', 'sentence', ['NN'])], 38 | [('this', 'this', ['DT']), ('is', 'be', ['VB']), ('another', 'another', ['DT']), ('one', 'one', ['CARD'])]] 39 | """ 40 | 41 | pos = [nltk.pos_tag(sentence) for sentence in sentences] 42 | #adapt format 43 | pos = [[(word, word, [postag]) for (word, postag) in sentence] for sentence in pos] 44 | return pos 45 | 46 | class DictionaryTagger(object): 47 | 48 | def __init__(self, dictionary_paths): 49 | files = [open(path, 'r') for path in dictionary_paths] 50 | dictionaries = [yaml.load(dict_file) for dict_file in files] 51 | map(lambda x: x.close(), files) 52 | self.dictionary = {} 53 | self.max_key_size = 0 54 | for curr_dict in dictionaries: 55 | for key in curr_dict: 56 | if key in self.dictionary: 57 | self.dictionary[key].extend(curr_dict[key]) 58 | else: 59 | self.dictionary[key] = curr_dict[key] 60 | self.max_key_size = max(self.max_key_size, len(key)) 61 | 62 | def tag(self, postagged_sentences): 63 | return [self.tag_sentence(sentence) for sentence in postagged_sentences] 64 | 65 | def tag_sentence(self, sentence, tag_with_lemmas=False): 66 | """ 67 | the result is only one tagging of all the possible ones. 68 | The resulting tagging is determined by these two priority rules: 69 | - longest matches have higher priority 70 | - search is made from left to right 71 | """ 72 | tag_sentence = [] 73 | N = len(sentence) 74 | if self.max_key_size == 0: 75 | self.max_key_size = N 76 | i = 0 77 | while (i < N): 78 | j = min(i + self.max_key_size, N) #avoid overflow 79 | tagged = False 80 | while (j > i): 81 | expression_form = ' '.join([word[0] for word in sentence[i:j]]).lower() 82 | expression_lemma = ' '.join([word[1] for word in sentence[i:j]]).lower() 83 | if tag_with_lemmas: 84 | literal = expression_lemma 85 | else: 86 | literal = expression_form 87 | if literal in self.dictionary: 88 | #self.logger.debug("found: %s" % literal) 89 | is_single_token = j - i == 1 90 | original_position = i 91 | i = j 92 | taggings = [tag for tag in self.dictionary[literal]] 93 | tagged_expression = (expression_form, expression_lemma, taggings) 94 | if is_single_token: #if the tagged literal is a single token, conserve its previous taggings: 95 | original_token_tagging = sentence[original_position][2] 96 | tagged_expression[2].extend(original_token_tagging) 97 | tag_sentence.append(tagged_expression) 98 | tagged = True 99 | else: 100 | j = j - 1 101 | if not tagged: 102 | tag_sentence.append(sentence[i]) 103 | i += 1 104 | return tag_sentence 105 | 106 | def value_of(sentiment): 107 | if sentiment == 'threat': return 1 108 | else: return 0 109 | return 0 110 | 111 | def sentence_score(sentence_tokens, previous_token, acum_score): 112 | if not sentence_tokens: 113 | return acum_score 114 | else: 115 | current_token = sentence_tokens[0] 116 | tags = current_token[2] 117 | token_score = sum([value_of(tag) for tag in tags]) 118 | if previous_token is not None: 119 | previous_tags = previous_token[2] 120 | if 'inc' in previous_tags: 121 | token_score *= 2.0 122 | elif 'dec' in previous_tags: 123 | token_score /= 2.0 124 | elif 'inv' in previous_tags: 125 | token_score *= -1.0 126 | return sentence_score(sentence_tokens[1:], current_token, acum_score + token_score) 127 | 128 | def sentiment_score(review): 129 | return sum([sentence_score(sentence, None, 0.0) for sentence in review]) 130 | 131 | if __name__ == "__main__": 132 | #text = """A sentence is a group of words that are put together to mean something.""" 133 | with open("Input.txt") as file: 134 | reader = csv.reader(file) 135 | for row in reader: 136 | #print(row) 137 | text = ''.join(row) 138 | splitter = Splitter() 139 | postagger = POSTagger() 140 | dicttagger = DictionaryTagger([ 'Dicts/threatWords.yml', 141 | 'Dicts/Increasing.yml', 'Dicts/Decreasing.yml', 'Dicts/Inverting.yml']) 142 | 143 | splitted_sentences = splitter.split(text) 144 | #pprint(splitted_sentences) 145 | 146 | pos_tagged_sentences = postagger.pos_tag(splitted_sentences) 147 | #pprint(pos_tagged_sentences) 148 | 149 | dict_tagged_sentences = dicttagger.tag(pos_tagged_sentences) 150 | #pprint(dict_tagged_sentences) 151 | 152 | print("analyzing sentiment...") 153 | score = sentiment_score(dict_tagged_sentences) 154 | #print(score) 155 | if score >= 2: 156 | pprint("Threat") 157 | else: 158 | pprint("Not Threat") 159 | -------------------------------------------------------------------------------- /ThreatDetectionSentimentAnalysis/Lexical.py: -------------------------------------------------------------------------------- 1 | from pprint import pprint 2 | import nltk 3 | import yaml 4 | import sys 5 | import os 6 | import re 7 | import csv 8 | 9 | class Splitter(object): 10 | 11 | def __init__(self): 12 | self.nltk_splitter = nltk.data.load('tokenizers/punkt/english.pickle') 13 | self.nltk_tokenizer = nltk.tokenize.TreebankWordTokenizer() 14 | 15 | def split(self, text): 16 | """ 17 | input format: a paragraph of text 18 | output format: a list of lists of words. 19 | e.g.: [['this', 'is', 'a', 'sentence'], ['this', 'is', 'another', 'one']] 20 | """ 21 | sentences = self.nltk_splitter.tokenize(text) 22 | tokenized_sentences = [self.nltk_tokenizer.tokenize(sent) for sent in sentences] 23 | return tokenized_sentences 24 | 25 | 26 | class POSTagger(object): 27 | 28 | def __init__(self): 29 | pass 30 | 31 | def pos_tag(self, sentences): 32 | """ 33 | input format: list of lists of words 34 | e.g.: [['this', 'is', 'a', 'sentence'], ['this', 'is', 'another', 'one']] 35 | output format: list of lists of tagged tokens. Each tagged tokens has a 36 | form, a lemma, and a list of tags 37 | e.g: [[('this', 'this', ['DT']), ('is', 'be', ['VB']), ('a', 'a', ['DT']), ('sentence', 'sentence', ['NN'])], 38 | [('this', 'this', ['DT']), ('is', 'be', ['VB']), ('another', 'another', ['DT']), ('one', 'one', ['CARD'])]] 39 | """ 40 | 41 | pos = [nltk.pos_tag(sentence) for sentence in sentences] 42 | #adapt format 43 | pos = [[(word, word, [postag]) for (word, postag) in sentence] for sentence in pos] 44 | return pos 45 | 46 | class DictionaryTagger(object): 47 | 48 | def __init__(self, dictionary_paths): 49 | files = [open(path, 'r') for path in dictionary_paths] 50 | dictionaries = [yaml.load(dict_file) for dict_file in files] 51 | map(lambda x: x.close(), files) 52 | self.dictionary = {} 53 | self.max_key_size = 0 54 | for curr_dict in dictionaries: 55 | for key in curr_dict: 56 | if key in self.dictionary: 57 | self.dictionary[key].extend(curr_dict[key]) 58 | else: 59 | self.dictionary[key] = curr_dict[key] 60 | self.max_key_size = max(self.max_key_size, len(key)) 61 | 62 | def tag(self, postagged_sentences): 63 | return [self.tag_sentence(sentence) for sentence in postagged_sentences] 64 | 65 | def tag_sentence(self, sentence, tag_with_lemmas=False): 66 | """ 67 | the result is only one tagging of all the possible ones. 68 | The resulting tagging is determined by these two priority rules: 69 | - longest matches have higher priority 70 | - search is made from left to right 71 | """ 72 | tag_sentence = [] 73 | N = len(sentence) 74 | if self.max_key_size == 0: 75 | self.max_key_size = N 76 | i = 0 77 | while (i < N): 78 | j = min(i + self.max_key_size, N) #avoid overflow 79 | tagged = False 80 | while (j > i): 81 | expression_form = ' '.join([word[0] for word in sentence[i:j]]).lower() 82 | expression_lemma = ' '.join([word[1] for word in sentence[i:j]]).lower() 83 | if tag_with_lemmas: 84 | literal = expression_lemma 85 | else: 86 | literal = expression_form 87 | if literal in self.dictionary: 88 | #self.logger.debug("found: %s" % literal) 89 | is_single_token = j - i == 1 90 | original_position = i 91 | i = j 92 | taggings = [tag for tag in self.dictionary[literal]] 93 | tagged_expression = (expression_form, expression_lemma, taggings) 94 | if is_single_token: #if the tagged literal is a single token, conserve its previous taggings: 95 | original_token_tagging = sentence[original_position][2] 96 | tagged_expression[2].extend(original_token_tagging) 97 | tag_sentence.append(tagged_expression) 98 | tagged = True 99 | else: 100 | j = j - 1 101 | if not tagged: 102 | tag_sentence.append(sentence[i]) 103 | i += 1 104 | return tag_sentence 105 | 106 | def value_of(sentiment): 107 | if sentiment == 'threat': return 1 108 | else: return 0 109 | return 0 110 | 111 | def sentence_score(sentence_tokens, previous_token, acum_score): 112 | if not sentence_tokens: 113 | return acum_score 114 | else: 115 | current_token = sentence_tokens[0] 116 | tags = current_token[2] 117 | token_score = sum([value_of(tag) for tag in tags]) 118 | if previous_token is not None: 119 | previous_tags = previous_token[2] 120 | if 'inc' in previous_tags: 121 | token_score *= 2.0 122 | elif 'dec' in previous_tags: 123 | token_score /= 2.0 124 | elif 'inv' in previous_tags: 125 | token_score *= -1.0 126 | return sentence_score(sentence_tokens[1:], current_token, acum_score + token_score) 127 | 128 | def sentiment_score(review): 129 | return sum([sentence_score(sentence, None, 0.0) for sentence in review]) 130 | 131 | if __name__ == "__main__": 132 | #text = """A sentence is a group of words that are put together to mean something.""" 133 | with open("Input.txt") as file: 134 | reader = csv.reader(file) 135 | for row in reader: 136 | #print(row) 137 | text = ''.join(row) 138 | splitter = Splitter() 139 | postagger = POSTagger() 140 | dicttagger = DictionaryTagger([ 'Dicts/threatWords.yml', 141 | 'Dicts/Increasing.yml', 'Dicts/Decreasing.yml', 'Dicts/Inverting.yml']) 142 | 143 | splitted_sentences = splitter.split(text) 144 | #pprint(splitted_sentences) 145 | 146 | pos_tagged_sentences = postagger.pos_tag(splitted_sentences) 147 | #pprint(pos_tagged_sentences) 148 | 149 | dict_tagged_sentences = dicttagger.tag(pos_tagged_sentences) 150 | #pprint(dict_tagged_sentences) 151 | 152 | print("analyzing sentiment...") 153 | score = sentiment_score(dict_tagged_sentences) 154 | #print(score) 155 | if score >= 2: 156 | pprint("Threat") 157 | else: 158 | pprint("Not Threat") 159 | -------------------------------------------------------------------------------- /ThreatDetectionSentimentAnalysis/CreateTestInput.py: -------------------------------------------------------------------------------- 1 | from pprint import pprint 2 | import json 3 | import csv 4 | import numpy as np 5 | from pprint import pprint 6 | import nltk 7 | import yaml 8 | import sys 9 | import os 10 | import re 11 | 12 | class Splitter(object): 13 | 14 | def __init__(self): 15 | self.nltk_splitter = nltk.data.load('tokenizers/punkt/english.pickle') 16 | self.nltk_tokenizer = nltk.tokenize.TreebankWordTokenizer() 17 | 18 | def split(self, text): 19 | sentences = self.nltk_splitter.tokenize(text) 20 | tokenized_sentences = [self.nltk_tokenizer.tokenize(sent) for sent in sentences] 21 | return tokenized_sentences 22 | 23 | 24 | class POSTagger(object): 25 | 26 | def __init__(self): 27 | pass 28 | 29 | def pos_tag(self, sentences): 30 | pos = [nltk.pos_tag(sentence) for sentence in sentences] 31 | #adapt format 32 | pos = [[(word, word, [postag]) for (word, postag) in sentence] for sentence in pos] 33 | return pos 34 | 35 | class DictionaryTagger(object): 36 | 37 | def __init__(self, dictionary_paths): 38 | files = [open(path, 'r') for path in dictionary_paths] 39 | dictionaries = [yaml.load(dict_file) for dict_file in files] 40 | map(lambda x: x.close(), files) 41 | self.dictionary = {} 42 | self.max_key_size = 0 43 | for curr_dict in dictionaries: 44 | for key in curr_dict: 45 | if key in self.dictionary: 46 | self.dictionary[key].extend(curr_dict[key]) 47 | else: 48 | self.dictionary[key] = curr_dict[key] 49 | self.max_key_size = max(self.max_key_size, len(key)) 50 | 51 | def tag(self, postagged_sentences): 52 | return [self.tag_sentence(sentence) for sentence in postagged_sentences] 53 | 54 | def tag_sentence(self, sentence, tag_with_lemmas=False): 55 | tag_sentence = [] 56 | N = len(sentence) 57 | if self.max_key_size == 0: 58 | self.max_key_size = N 59 | i = 0 60 | while (i < N): 61 | j = min(i + self.max_key_size, N) #avoid overflow 62 | tagged = False 63 | while (j > i): 64 | expression_form = ' '.join([word[0] for word in sentence[i:j]]).lower() 65 | expression_lemma = ' '.join([word[1] for word in sentence[i:j]]).lower() 66 | if tag_with_lemmas: 67 | literal = expression_lemma 68 | else: 69 | literal = expression_form 70 | if literal in self.dictionary: 71 | #self.logger.debug("found: %s" % literal) 72 | is_single_token = j - i == 1 73 | original_position = i 74 | i = j 75 | taggings = [tag for tag in self.dictionary[literal]] 76 | tagged_expression = (expression_form, expression_lemma, taggings) 77 | if is_single_token: #if the tagged literal is a single token, conserve its previous taggings: 78 | original_token_tagging = sentence[original_position][2] 79 | tagged_expression[2].extend(original_token_tagging) 80 | tag_sentence.append(tagged_expression) 81 | tagged = True 82 | else: 83 | j = j - 1 84 | if not tagged: 85 | tag_sentence.append(sentence[i]) 86 | i += 1 87 | return tag_sentence 88 | 89 | def value_of_threat(sentiment): 90 | if sentiment == 'positive': return -1 91 | if sentiment == 'negative': return 1 92 | if sentiment == 'threat': return 1 93 | return 0 94 | 95 | def threatening_score(sentence_tokens, previous_token, acum_score): 96 | if not sentence_tokens: 97 | return acum_score 98 | else: 99 | current_token = sentence_tokens[0] 100 | tags = current_token[2] 101 | token_score = sum([value_of_threat(tag) for tag in tags]) 102 | if previous_token is not None: 103 | previous_tags = previous_token[2] 104 | if 'inc' in previous_tags: 105 | token_score *= 2.0 106 | elif 'dec' in previous_tags: 107 | token_score /= 2.0 108 | elif 'inv' in previous_tags: 109 | token_score *= -1.0 110 | return threatening_score(sentence_tokens[1:], current_token, acum_score + token_score) 111 | 112 | def threat_score(review): 113 | return sum([threatening_score(sentence, None, 0.0) for sentence in review]) 114 | 115 | 116 | if __name__ == "__main__": 117 | 118 | with open ('testinput.txt','w') as f: 119 | with open("testdata.txt") as file: 120 | reader = csv.reader(file) 121 | for row in reader: 122 | #print(row) 123 | text = ''.join(row) 124 | new_text = text.replace(',', '') 125 | new_text = new_text.replace('.', '') 126 | #pprint(new_text) 127 | output_json = json.load(open('threat.json')) 128 | threat_scores = 0.0 129 | number_of_threat = 0 130 | for word in new_text.split(): 131 | for majorkey, subdict in output_json.iteritems(): 132 | if word == majorkey: 133 | #print(subdict) 134 | threat_scores+=float(subdict) 135 | number_of_threat+=1 136 | #pprint(threat_score) 137 | #pprint(number_of_threat) 138 | 139 | charLength = float(len(text)) 140 | wordLength = float(len(text.split())) 141 | averages = float(charLength/wordLength) 142 | #pprint('Character Length-> %d'%charLength) 143 | #pprint('Word Length-> %d'%wordLength) 144 | 145 | splitter = Splitter() 146 | postagger = POSTagger() 147 | 148 | dicttagger1 = DictionaryTagger([ 'Dicts/threatWords.yml','Dicts/Positive.yml', 'Dicts/Negative.yml', 149 | 'Dicts/Increasing.yml', 'Dicts/Decreasing.yml', 'Dicts/Inverting.yml']) 150 | 151 | splitted_sentences1 = splitter.split(text) 152 | 153 | pos_tagged_sentences1 = postagger.pos_tag(splitted_sentences1) 154 | 155 | dict_tagged_sentences1 = dicttagger1.tag(pos_tagged_sentences1) 156 | 157 | #print("analyzing threat...") 158 | threatscore = threat_score(dict_tagged_sentences1) 159 | average = number_of_threat/wordLength 160 | #print(threatscore) 161 | 162 | system = averages , threatscore, threat_scores, number_of_threat, average 163 | f.write(str(system)+'\n') 164 | 165 | 166 | 167 | 168 | -------------------------------------------------------------------------------- /CreateInput.py: -------------------------------------------------------------------------------- 1 | from pprint import pprint 2 | import json 3 | import csv 4 | import numpy as np 5 | from pprint import pprint 6 | import nltk 7 | import yaml 8 | import sys 9 | import os 10 | import re 11 | 12 | class Splitter(object): 13 | 14 | def __init__(self): 15 | self.nltk_splitter = nltk.data.load('tokenizers/punkt/english.pickle') 16 | self.nltk_tokenizer = nltk.tokenize.TreebankWordTokenizer() 17 | 18 | def split(self, text): 19 | sentences = self.nltk_splitter.tokenize(text) 20 | tokenized_sentences = [self.nltk_tokenizer.tokenize(sent) for sent in sentences] 21 | return tokenized_sentences 22 | 23 | 24 | class POSTagger(object): 25 | 26 | def __init__(self): 27 | pass 28 | 29 | def pos_tag(self, sentences): 30 | pos = [nltk.pos_tag(sentence) for sentence in sentences] 31 | #adapt format 32 | pos = [[(word, word, [postag]) for (word, postag) in sentence] for sentence in pos] 33 | return pos 34 | 35 | class DictionaryTagger(object): 36 | 37 | def __init__(self, dictionary_paths): 38 | files = [open(path, 'r') for path in dictionary_paths] 39 | dictionaries = [yaml.load(dict_file) for dict_file in files] 40 | map(lambda x: x.close(), files) 41 | self.dictionary = {} 42 | self.max_key_size = 0 43 | for curr_dict in dictionaries: 44 | for key in curr_dict: 45 | if key in self.dictionary: 46 | self.dictionary[key].extend(curr_dict[key]) 47 | else: 48 | self.dictionary[key] = curr_dict[key] 49 | self.max_key_size = max(self.max_key_size, len(key)) 50 | 51 | def tag(self, postagged_sentences): 52 | return [self.tag_sentence(sentence) for sentence in postagged_sentences] 53 | 54 | def tag_sentence(self, sentence, tag_with_lemmas=False): 55 | tag_sentence = [] 56 | N = len(sentence) 57 | if self.max_key_size == 0: 58 | self.max_key_size = N 59 | i = 0 60 | while (i < N): 61 | j = min(i + self.max_key_size, N) #avoid overflow 62 | tagged = False 63 | while (j > i): 64 | expression_form = ' '.join([word[0] for word in sentence[i:j]]).lower() 65 | expression_lemma = ' '.join([word[1] for word in sentence[i:j]]).lower() 66 | if tag_with_lemmas: 67 | literal = expression_lemma 68 | else: 69 | literal = expression_form 70 | if literal in self.dictionary: 71 | #self.logger.debug("found: %s" % literal) 72 | is_single_token = j - i == 1 73 | original_position = i 74 | i = j 75 | taggings = [tag for tag in self.dictionary[literal]] 76 | tagged_expression = (expression_form, expression_lemma, taggings) 77 | if is_single_token: #if the tagged literal is a single token, conserve its previous taggings: 78 | original_token_tagging = sentence[original_position][2] 79 | tagged_expression[2].extend(original_token_tagging) 80 | tag_sentence.append(tagged_expression) 81 | tagged = True 82 | else: 83 | j = j - 1 84 | if not tagged: 85 | tag_sentence.append(sentence[i]) 86 | i += 1 87 | return tag_sentence 88 | 89 | def value_of_threat(sentiment): 90 | if sentiment == 'positive': return -1 91 | if sentiment == 'negative': return 1 92 | if sentiment == 'threat': return 1 93 | return 0 94 | 95 | 96 | def threatening_score(sentence_tokens, previous_token, acum_score): 97 | if not sentence_tokens: 98 | return acum_score 99 | else: 100 | current_token = sentence_tokens[0] 101 | tags = current_token[2] 102 | token_score = sum([value_of_threat(tag) for tag in tags]) 103 | if previous_token is not None: 104 | previous_tags = previous_token[2] 105 | if 'inc' in previous_tags: 106 | token_score *= 2.0 107 | elif 'dec' in previous_tags: 108 | token_score /= 2.0 109 | elif 'inv' in previous_tags: 110 | token_score *= -1.0 111 | return threatening_score(sentence_tokens[1:], current_token, acum_score + token_score) 112 | 113 | def threat_score(review): 114 | return sum([threatening_score(sentence, None, 0.0) for sentence in review]) 115 | 116 | if __name__ == "__main__": 117 | 118 | with open ('inputparameters.txt','w') as f: #create new file for writing input parameters 119 | with open("Input.txt") as file: #read input file 120 | reader = csv.reader(file) 121 | for row in reader: 122 | #print(row) 123 | text = ''.join(row) 124 | new_text = text.replace(',', '') 125 | new_text = new_text.replace('.', '') 126 | #pprint(new_text) 127 | output_json = json.load(open('threat.json')) #read the threat.json file 128 | threat_scores = 0.0 129 | number_of_threat = 0 130 | for word in new_text.split(): #check if word is in threat.json file 131 | for majorkey, subdict in output_json.iteritems(): 132 | if word == majorkey: 133 | #print(subdict) 134 | threat_scores+=float(subdict) 135 | number_of_threat+=1 136 | #pprint(threat_score) 137 | #pprint(number_of_threat) 138 | 139 | if threat_scores >= 0.8: #assign expected output value of the input sentences 140 | output = 1 141 | else: 142 | output = 0 143 | charLength = float(len(text)) 144 | wordLength = float(len(text.split())) 145 | averages = float(charLength/wordLength) 146 | #pprint('Character Length-> %d'%charLength) 147 | #pprint('Word Length-> %d'%wordLength) 148 | 149 | """ Tag the words as threat and calulate threat score""" 150 | splitter = Splitter() 151 | postagger = POSTagger() 152 | 153 | dicttagger1 = DictionaryTagger([ 'Dicts/threatWords.yml','Dicts/Positive.yml', 'Dicts/Negative.yml', 154 | 'Dicts/Increasing.yml', 'Dicts/Decreasing.yml', 'Dicts/Inverting.yml']) 155 | 156 | splitted_sentences1 = splitter.split(text) 157 | 158 | pos_tagged_sentences1 = postagger.pos_tag(splitted_sentences1) 159 | 160 | dict_tagged_sentences1 = dicttagger1.tag(pos_tagged_sentences1) 161 | 162 | #print("analyzing threat...") 163 | threatscore = threat_score(dict_tagged_sentences1) 164 | average = number_of_threat/wordLength 165 | #print(threatscore) 166 | """form the input parameters for the NN""" 167 | system = averages , threatscore, threat_scores, number_of_threat, average, output 168 | #write the value system in the file 169 | f.write(str(system)+'\n') 170 | 171 | 172 | 173 | 174 | -------------------------------------------------------------------------------- /ThreatDetectionSentimentAnalysis/CreateInput.py: -------------------------------------------------------------------------------- 1 | from pprint import pprint 2 | import json 3 | import csv 4 | import numpy as np 5 | from pprint import pprint 6 | import nltk 7 | import yaml 8 | import sys 9 | import os 10 | import re 11 | 12 | class Splitter(object): 13 | 14 | def __init__(self): 15 | self.nltk_splitter = nltk.data.load('tokenizers/punkt/english.pickle') 16 | self.nltk_tokenizer = nltk.tokenize.TreebankWordTokenizer() 17 | 18 | def split(self, text): 19 | sentences = self.nltk_splitter.tokenize(text) 20 | tokenized_sentences = [self.nltk_tokenizer.tokenize(sent) for sent in sentences] 21 | return tokenized_sentences 22 | 23 | 24 | class POSTagger(object): 25 | 26 | def __init__(self): 27 | pass 28 | 29 | def pos_tag(self, sentences): 30 | pos = [nltk.pos_tag(sentence) for sentence in sentences] 31 | #adapt format 32 | pos = [[(word, word, [postag]) for (word, postag) in sentence] for sentence in pos] 33 | return pos 34 | 35 | class DictionaryTagger(object): 36 | 37 | def __init__(self, dictionary_paths): 38 | files = [open(path, 'r') for path in dictionary_paths] 39 | dictionaries = [yaml.load(dict_file) for dict_file in files] 40 | map(lambda x: x.close(), files) 41 | self.dictionary = {} 42 | self.max_key_size = 0 43 | for curr_dict in dictionaries: 44 | for key in curr_dict: 45 | if key in self.dictionary: 46 | self.dictionary[key].extend(curr_dict[key]) 47 | else: 48 | self.dictionary[key] = curr_dict[key] 49 | self.max_key_size = max(self.max_key_size, len(key)) 50 | 51 | def tag(self, postagged_sentences): 52 | return [self.tag_sentence(sentence) for sentence in postagged_sentences] 53 | 54 | def tag_sentence(self, sentence, tag_with_lemmas=False): 55 | tag_sentence = [] 56 | N = len(sentence) 57 | if self.max_key_size == 0: 58 | self.max_key_size = N 59 | i = 0 60 | while (i < N): 61 | j = min(i + self.max_key_size, N) #avoid overflow 62 | tagged = False 63 | while (j > i): 64 | expression_form = ' '.join([word[0] for word in sentence[i:j]]).lower() 65 | expression_lemma = ' '.join([word[1] for word in sentence[i:j]]).lower() 66 | if tag_with_lemmas: 67 | literal = expression_lemma 68 | else: 69 | literal = expression_form 70 | if literal in self.dictionary: 71 | #self.logger.debug("found: %s" % literal) 72 | is_single_token = j - i == 1 73 | original_position = i 74 | i = j 75 | taggings = [tag for tag in self.dictionary[literal]] 76 | tagged_expression = (expression_form, expression_lemma, taggings) 77 | if is_single_token: #if the tagged literal is a single token, conserve its previous taggings: 78 | original_token_tagging = sentence[original_position][2] 79 | tagged_expression[2].extend(original_token_tagging) 80 | tag_sentence.append(tagged_expression) 81 | tagged = True 82 | else: 83 | j = j - 1 84 | if not tagged: 85 | tag_sentence.append(sentence[i]) 86 | i += 1 87 | return tag_sentence 88 | 89 | def value_of_threat(sentiment): 90 | if sentiment == 'positive': return -1 91 | if sentiment == 'negative': return 1 92 | if sentiment == 'threat': return 1 93 | return 0 94 | 95 | 96 | def threatening_score(sentence_tokens, previous_token, acum_score): 97 | if not sentence_tokens: 98 | return acum_score 99 | else: 100 | current_token = sentence_tokens[0] 101 | tags = current_token[2] 102 | token_score = sum([value_of_threat(tag) for tag in tags]) 103 | if previous_token is not None: 104 | previous_tags = previous_token[2] 105 | if 'inc' in previous_tags: 106 | token_score *= 2.0 107 | elif 'dec' in previous_tags: 108 | token_score /= 2.0 109 | elif 'inv' in previous_tags: 110 | token_score *= -1.0 111 | return threatening_score(sentence_tokens[1:], current_token, acum_score + token_score) 112 | 113 | def threat_score(review): 114 | return sum([threatening_score(sentence, None, 0.0) for sentence in review]) 115 | 116 | if __name__ == "__main__": 117 | 118 | with open ('inputparameters.txt','w') as f: #create new file for writing input parameters 119 | with open("Input.txt") as file: #read input file 120 | reader = csv.reader(file) 121 | for row in reader: 122 | #print(row) 123 | text = ''.join(row) 124 | new_text = text.replace(',', '') 125 | new_text = new_text.replace('.', '') 126 | #pprint(new_text) 127 | output_json = json.load(open('threat.json')) #read the threat.json file 128 | threat_scores = 0.0 129 | number_of_threat = 0 130 | for word in new_text.split(): #check if word is in threat.json file 131 | for majorkey, subdict in output_json.iteritems(): 132 | if word == majorkey: 133 | #print(subdict) 134 | threat_scores+=float(subdict) 135 | number_of_threat+=1 136 | #pprint(threat_score) 137 | #pprint(number_of_threat) 138 | 139 | if threat_scores >= 0.8: #assign expected output value of the input sentences 140 | output = 1 141 | else: 142 | output = 0 143 | charLength = float(len(text)) 144 | wordLength = float(len(text.split())) 145 | averages = float(charLength/wordLength) 146 | #pprint('Character Length-> %d'%charLength) 147 | #pprint('Word Length-> %d'%wordLength) 148 | 149 | """ Tag the words as threat and calulate threat score""" 150 | splitter = Splitter() 151 | postagger = POSTagger() 152 | 153 | dicttagger1 = DictionaryTagger([ 'Dicts/threatWords.yml','Dicts/Positive.yml', 'Dicts/Negative.yml', 154 | 'Dicts/Increasing.yml', 'Dicts/Decreasing.yml', 'Dicts/Inverting.yml']) 155 | 156 | splitted_sentences1 = splitter.split(text) 157 | 158 | pos_tagged_sentences1 = postagger.pos_tag(splitted_sentences1) 159 | 160 | dict_tagged_sentences1 = dicttagger1.tag(pos_tagged_sentences1) 161 | 162 | #print("analyzing threat...") 163 | threatscore = threat_score(dict_tagged_sentences1) 164 | average = number_of_threat/wordLength 165 | #print(threatscore) 166 | """form the input parameters for the NN""" 167 | system = averages , threatscore, threat_scores, number_of_threat, average, output 168 | #write the value system in the file 169 | f.write(str(system)+'\n') 170 | 171 | 172 | 173 | 174 | -------------------------------------------------------------------------------- /CreateTestInput.py: -------------------------------------------------------------------------------- 1 | from pprint import pprint 2 | import json 3 | import csv 4 | import numpy as np 5 | from pprint import pprint 6 | import nltk 7 | import yaml 8 | import sys 9 | import os 10 | import re 11 | 12 | class Splitter(object): 13 | 14 | def __init__(self): 15 | self.nltk_splitter = nltk.data.load('tokenizers/punkt/english.pickle') 16 | self.nltk_tokenizer = nltk.tokenize.TreebankWordTokenizer() 17 | 18 | def split(self, text): 19 | sentences = self.nltk_splitter.tokenize(text) 20 | tokenized_sentences = [self.nltk_tokenizer.tokenize(sent) for sent in sentences] 21 | return tokenized_sentences 22 | 23 | 24 | class POSTagger(object): 25 | 26 | def __init__(self): 27 | pass 28 | 29 | def pos_tag(self, sentences): 30 | pos = [nltk.pos_tag(sentence) for sentence in sentences] 31 | #adapt format 32 | pos = [[(word, word, [postag]) for (word, postag) in sentence] for sentence in pos] 33 | return pos 34 | 35 | class DictionaryTagger(object): 36 | 37 | def __init__(self, dictionary_paths): 38 | files = [open(path, 'r') for path in dictionary_paths] 39 | dictionaries = [yaml.load(dict_file) for dict_file in files] 40 | map(lambda x: x.close(), files) 41 | self.dictionary = {} 42 | self.max_key_size = 0 43 | for curr_dict in dictionaries: 44 | for key in curr_dict: 45 | if key in self.dictionary: 46 | self.dictionary[key].extend(curr_dict[key]) 47 | else: 48 | self.dictionary[key] = curr_dict[key] 49 | self.max_key_size = max(self.max_key_size, len(key)) 50 | 51 | def tag(self, postagged_sentences): 52 | return [self.tag_sentence(sentence) for sentence in postagged_sentences] 53 | 54 | def tag_sentence(self, sentence, tag_with_lemmas=False): 55 | tag_sentence = [] 56 | N = len(sentence) 57 | if self.max_key_size == 0: 58 | self.max_key_size = N 59 | i = 0 60 | while (i < N): 61 | j = min(i + self.max_key_size, N) #avoid overflow 62 | tagged = False 63 | while (j > i): 64 | expression_form = ' '.join([word[0] for word in sentence[i:j]]).lower() 65 | expression_lemma = ' '.join([word[1] for word in sentence[i:j]]).lower() 66 | if tag_with_lemmas: 67 | literal = expression_lemma 68 | else: 69 | literal = expression_form 70 | if literal in self.dictionary: 71 | #self.logger.debug("found: %s" % literal) 72 | is_single_token = j - i == 1 73 | original_position = i 74 | i = j 75 | taggings = [tag for tag in self.dictionary[literal]] 76 | tagged_expression = (expression_form, expression_lemma, taggings) 77 | if is_single_token: #if the tagged literal is a single token, conserve its previous taggings: 78 | original_token_tagging = sentence[original_position][2] 79 | tagged_expression[2].extend(original_token_tagging) 80 | tag_sentence.append(tagged_expression) 81 | tagged = True 82 | else: 83 | j = j - 1 84 | if not tagged: 85 | tag_sentence.append(sentence[i]) 86 | i += 1 87 | return tag_sentence 88 | 89 | def value_of(sentiment): 90 | if sentiment == 'positive': return -1 91 | if sentiment == 'negative': return 1 92 | return 0 93 | 94 | def value_of_threat(sentiment): 95 | if sentiment == 'positive': return -1 96 | if sentiment == 'negative': return 1 97 | if sentiment == 'threat': return 1 98 | return 0 99 | 100 | def sentence_score(sentence_tokens, previous_token, acum_score): 101 | if not sentence_tokens: 102 | return acum_score 103 | else: 104 | current_token = sentence_tokens[0] 105 | tags = current_token[2] 106 | token_score = sum([value_of(tag) for tag in tags]) 107 | if previous_token is not None: 108 | previous_tags = previous_token[2] 109 | if 'inc' in previous_tags: 110 | token_score *= 2.0 111 | elif 'dec' in previous_tags: 112 | token_score /= 2.0 113 | elif 'inv' in previous_tags: 114 | token_score *= -1.0 115 | return sentence_score(sentence_tokens[1:], current_token, acum_score + token_score) 116 | 117 | def threatening_score(sentence_tokens, previous_token, acum_score): 118 | if not sentence_tokens: 119 | return acum_score 120 | else: 121 | current_token = sentence_tokens[0] 122 | tags = current_token[2] 123 | token_score = sum([value_of_threat(tag) for tag in tags]) 124 | if previous_token is not None: 125 | previous_tags = previous_token[2] 126 | if 'inc' in previous_tags: 127 | token_score *= 2.0 128 | elif 'dec' in previous_tags: 129 | token_score /= 2.0 130 | elif 'inv' in previous_tags: 131 | token_score *= -1.0 132 | return threatening_score(sentence_tokens[1:], current_token, acum_score + token_score) 133 | 134 | def threat_score(review): 135 | return sum([threatening_score(sentence, None, 0.0) for sentence in review]) 136 | def sentiment_score(review): 137 | return sum([sentence_score(sentence, None, 0.0) for sentence in review]) 138 | 139 | 140 | 141 | if __name__ == "__main__": 142 | 143 | with open ('testinput.txt','w') as f: 144 | with open("testdata.txt") as file: 145 | reader = csv.reader(file) 146 | for row in reader: 147 | #print(row) 148 | text = ''.join(row) 149 | new_text = text.replace(',', '') 150 | new_text = new_text.replace('.', '') 151 | #pprint(new_text) 152 | output_json = json.load(open('threat.json')) 153 | threat_scores = 0.0 154 | number_of_threat = 0 155 | for word in new_text.split(): 156 | for majorkey, subdict in output_json.iteritems(): 157 | if word == majorkey: 158 | #print(subdict) 159 | threat_scores+=float(subdict) 160 | number_of_threat+=1 161 | #pprint(threat_score) 162 | #pprint(number_of_threat) 163 | 164 | charLength = float(len(text)) 165 | wordLength = float(len(text.split())) 166 | averages = float(charLength/wordLength) 167 | #pprint('Character Length-> %d'%charLength) 168 | #pprint('Word Length-> %d'%wordLength) 169 | 170 | splitter = Splitter() 171 | postagger = POSTagger() 172 | 173 | dicttagger = DictionaryTagger([ 'Dicts/threatWords.yml','Dicts/Positive.yml', 'Dicts/Negative.yml', 174 | 'Dicts/Increasing.yml', 'Dicts/Decreasing.yml', 'Dicts/Inverting.yml']) 175 | 176 | 177 | splitted_sentences = splitter.split(text) 178 | 179 | pos_tagged_sentences = postagger.pos_tag(splitted_sentences) 180 | 181 | dict_tagged_sentences = dicttagger.tag(pos_tagged_sentences) 182 | 183 | #print("analyzing sentiment...") 184 | sentimentscore = sentiment_score(dict_tagged_sentences) 185 | #print(score) 186 | dicttagger1 = DictionaryTagger([ 'Dicts/threatWords.yml','Dicts/Positive.yml', 'Dicts/Negative.yml', 187 | 'Dicts/Increasing.yml', 'Dicts/Decreasing.yml', 'Dicts/Inverting.yml']) 188 | 189 | splitted_sentences1 = splitter.split(text) 190 | 191 | pos_tagged_sentences1 = postagger.pos_tag(splitted_sentences1) 192 | 193 | dict_tagged_sentences1 = dicttagger1.tag(pos_tagged_sentences1) 194 | 195 | #print("analyzing threat...") 196 | threatscore = threat_score(dict_tagged_sentences1) 197 | average = number_of_threat/wordLength 198 | #print(threatscore) 199 | 200 | system = sentimentscore, averages , threatscore, number_of_threat, average 201 | f.write(str(system)+'\n') 202 | 203 | 204 | 205 | 206 | -------------------------------------------------------------------------------- /threat.json: -------------------------------------------------------------------------------- 1 | {"funereal": ".5", "influenza": "0.5", "bomb": "0.8", "hurricane": "0.8", "rifle": ".6", "precarious": "0.5", "matricide": "0.7", "executing": ".8", "avalanche": "0.7", "malignant": "0.7", "hanging": "0.8", "lethal": ".7", "shot": "0.9", "baneful": "0.4", "Boko": ".8", "gangs": "0.6", "distressing": ".5", "dreadful": ".6", "suicide": ".8", "warlike": ".6", "sororicidal": "0.6", "disgusting": ".7", "crime": "0.7", "abhorrent": "0.8", "to": "0.0", "unpleasant": "0.7", "terror": ".8", "portentous": "0.5", "fatal": "0.9", "spiteful": "0.6", "hope": "0.2", "Malevolent": "0.6", "ordained": "0.2", "risk": "0.6", "torture": "0.8", "hostage": "0.8", "inoculable": "0.7", "massacre": "0.6", "keylogger": "0.6", "impregnable": "0", "predestined": "0.3", "frowning": "0.6", "ebola": "0.7", "gloomy": ".4", "awful": ".", "nasty": "0.7", "frightening": "0.7", "cautionary": "0.7", "homicidal": "0.5", "Demise": "0.6", "sunk": "0.7", "touch-and-go": "0.7", "die": ".9", "dejected": ".7", "gun": ".6", "bad": "0.8", "shots": "0.7", "typhoon": "0.8", "gunman": ".8", "relentless": "0.6", "manslaughter": "0.8", "foreordained": "0.2", "force": ".7", "disaster": "0.8", "cynical": ".5", "gunfight": "0.7", "resentful": ".6", "cyber": "0.6", "portend": "0.6", "past": "0.2", "Daesh": ".9", "irredeemable": "0.8", "desperate": "0.5", "disturbing": ".5", "unmitigable": "0.6", "kidnap": "0.7", "incurable": "0.7", "bacteria": "0.6", "army": ".6", "unrelenting": "0.5", "bomber": ".8", "inauspicious": "0.4", "ruthless": "0.8", "alarming": "0.9", "poisonous": "0.8", "poison": "0.7", "perilous": "0.6", "sour": ".5", "irrevocable": "0.7", "closed": "0", "horrible": ".6", "weird": ".3", "pistol": ".6", "presage": "0.7", "terrorism": ".8", "hairy": "0.6", "scowling": "0.4", "burn": "0.5", "explosion": "0.8", "barbaric": ".8", "slaying": "0.7", "brewing": "0.4", "killing": "1", "rapacious": ".6", "cards": "0", "hazmat": "0.7", "forthcoming": "0.3", "contamination": "0.6", "menacing": "0.7", "tremor": "0.7", "come": "0.0", "earthquake": "0.7", "momentous": "0.2", "extreme": "0.5", "trepidation": "0.6", "great": "0.1", "store": "0", "direful": "5.8", "State": ".6", "Commination": "0.6", "phishing": "0.6", "ill": "0.5", "quarrelsome": ".7", "murdered": "0.7", "creepy": ".5", "cessation": "0.6", "ill-fated": "0.6", "strong-arm": ".7", "doleful": ".5", "assault": ".8", "implacable": "0.6", "Death": "0.7", "tough": "0.4", "sulky": "0.3", "pox": "0.5", "gone": "0.2", "foreboding": "0.4", "vain": "0.4", "defiance": "0.6", "epidemic": "0.7", "hamas": "1", "baleful": "0.5", "deleterious": "0.5", "fulmination": "0.6", "loaded": "0", "cocaine": "0.7", "regicide": "0.7", "dangersome": "0.9", "terminal": "0.4", "sealed": "0.2", "fright": "0.6", "eradication": "0.6", "injure": ".8", "Hostage": "0.7", "illegal": "0.7", "condemned": "0.5", "stated": "0.1", "predesigned": "0.3", "unapproachable": "0.7", "detrimental": "0.6", "life-and-death": "0.8", "captive": "0.6", "intrusive": ".4", "fratricide": "0.7", "killed": "0.9", "wicked": "0.5", "dismay": "0.5", "on": "0", "infect": "0.6", "butchery": "0.7", "spammer": "0.6", "missile": ".5", "downhearted": "0.5", "ghoulish": ".7", "dismal": ".3", "disruptive": ".8", "Haram": ".8", "breach": "0.7", "instant": "0.2", "cunt": ".2", "shoot": "0.9", "queasy": "0.4", "H1N1": "0.5", "flu": "0.4", "foreshadowing": "0.6", "will": "0.0", "ugly": "0.7", "near": ".1", "lugubrious": ".6", "irreparable": "0.7", "internecine": "0.5", "venomous": ".7", "sombre": ".8", "imperil": "0.6", "pessimistic": ".3", "extirpation": "0.6", "sinister": "0.6", "atrocious": ".8", "impracticable": "0.6", "viperous": "0.8", "contaminate": "0.6", "is": "0.0", "meant": "0.0", "collision": "0.3", "pressing": "0.2", "in": "0", "crash": "0.3", "rape": ".8", "homicide": "0.8", "malware": "0.6", "terrorist": ".8", "disconsolate": "0.4", "maligned": "0.6", "agony": "0.6", "calamitous": ".8", "wound": ".3", "parlous": "0.7", "mortiferous": "0.5", "injurious": ".7", "chemical": "0.7", "tepidity": "0.6", "hot": "0", "vital": "0.5", "unfortunate": "0.7", "goner": "0.3", "repellent": "0.7", "itchy": ".4", "persuasion": "0", "panic": "0.6", "ball": "0.0", "intended": "0.6", "taliban": "1", "restraint": ".2", "insecure": "0.6", "filicide": "0.8", "vicious": "0.6", "despondent": "0.4", "hand": "0", "bullying": ".6", "abuse": ".4", "delicate": "0", "destined": "0.5", "coming": "0", "fierce": "0.6", "murdering": "0.8", "destructive": ".6", "compulsory": "0", "bleak": ".4", "malevolent": ".8", "disagreeable": "0.7", "bombers": ".8", "shotgun": ".4", "weapon": ".8", "bounces": "0.1", "fighting": ".8", "unachievable": "0.7", "thorny": "0.7", "drone": ".", "Irreconcilable": "0.5", "dread": "0.6", "detainee": "0.4", "furious": ".8", "compelled": "0.1", "premonitory": "0.6", "wind": "0", "rancorous": ".4", "jeopardous": "0.8", "vindictive": "0.6", "rocket": ".8", "peril": "0.7", "radiation": "0.8", "over": "0.7", "shaky": "0.5", "patricide": "0.9", "militiamen": ".5", "course": "0", "glum": "0.4", "kill": ".9", "victim": ".7", "horrify": "0.8", "exemplary": "0.6", "Killing": "0.7", "encroaching": ".6", "impending": "0.5", "bombing": ".9", "pendemic": "0.7", "death": ".9", "consternation": "0.8", "flood": "0.7", "hezbollah": "1", "assassination": "0.8", "rootkit": "0.6", "remorseless": "0.6", "Taliban": ".8", "pestiferous": "0.4", "trojan": "0.6", "consequential": "0.7", "mournful": ".7", "Calamitous": "0.6", "overhanging": "0.5", "monitorial": "0.5", "grievous": "0.7", "sick": "0.3", "emergency": "0.5", "looming": "0.7", "intimidation": ".8", "dark": "0.5", "assailing": ".7", "Cruel": "0.7", "predetermined": "0.3", "repulsive": "0.8", "hair-raising": ".8", "Fatal": ".8", "outbreak": "0.7", "execution": "0.8", "grim": "0.6", "serpentine": "0.8", "acute": "0.9", "Misery": "0.6", "lost": "0.6", "tsunami": "0.8", "unstable": "0.2", "averse": ".6", "virulent": "0.6", "security": "0.6", "heavy": "0.5", "dour": "0.6", "liquidation": "0.7", "creek": "0", "exposed": "0.6", "macabre": ".8", "extremism": "1", "treacherous": "0.5", "shuddersome": ".6", "martial": ".3", "hazard": "0.7", "pugnacious": ".77", "dispiriting": ".5", "meth": "0.7", "mephitic": "0.6", "savage": "0.6", "toxic": "0.8", "for": "0", "infectious": "0.6", "lockdown": "0.6", "unavoidable": "0.4", "crucial": "0.8", "discouraging": "0.5", "bellicose": ".8", "adverse": "0.6", "antipathetic": ".7", "critical": "0.9", "despair": "0.6", "evacuation": "0.6", "ruinous": ".8", "IS": ".8", "despairing": "0.3", "browbeating": ".3", "itching": ".4", "combative": ".7", "slaughter": "0.8", "intruding": ".4", "unfriendly": "0.7", "heroin": "0.7", "que": "0.1", "evil": "0.7", "dire": "0.5", "nightmarish": ".7", "obliteration": "0.8", "ominous": "0.5", "inevitable": "0.5", "scary": ".6", "hawkish": ".4", "hostile": "0.7", "constraint": ".4", "violence": "0.5", "terrible": "0.6", "important": "0", "militia": ".5", "hopeless": ".6", "threat": "0.5", "mortal": ".2", "consequence": "0.6", "jeopardise.": "0.6", "glowering": "0.6", "warning": "0.8", "angry": "0.7", "destruction": "0.8", "extinction": "0.7", "enraged": ".7", "major": "0.2", "daunting": "0.7", "unhealthy": "0.7", "burning": "0.5", "monitory": "0.6", "bound": "0.5", "invading": ".6", "brute": "0.6", "down": "0.6", "clamorous": "0.7", "formidable": "0.7", "impossible": "0.6", "pointless": "0.7", "radioactive": "0.8", "fired": "0.7", "ISIS": ".8", "no-win": "0.8", "fated": "0.4", "noxious": "0.6", "nocuous": "0.7", "threatening": "0.7", "ricin": "0.7", "H5N1": "0.5", "uxoricide": "0.7", "militant": ".8", "ineluctable": "0.2", "attack": "0.8", "tragic": "0.9", "terrifying": ".8", "storm": "0.8", "ticklish": "0", "dynamite": "0.7", "ISIL": ".9", "war": ".7", "duress": "..8", "futile": ".5", "virus": "0.6", "murder": "0.9", "way": "0.0", "that": "0.0", "hazardous": "0.7", "hurtful": ".6", "dreary": ".6", "depressing": ".3", "sera": ".5", "plague": "0.7", "helpless": "0.5", "troubling": "0.8", "forbidding": "0.5", "gruesome": ".8", "be": "0.0", "irreversible": "0.7", "sarin": "0.7", "directed": "0.1", "salmonella": "0.5", "abduct": ".7", "twister": "0.8", "rampage": ".8", "urgent": ".0.6", "dangerous": ".5", "up": "0.1", "extermination": "0.7", "annihilation": "0.9", "future": "0", "contrary": "0.5", "dirty": "0.4", "afflictive": "0.8", "AlQaeda": ".8", "forlorn": "0.9", "ultimatum": "0.8", "jihadist": ".7", "risky": "0.5", "prospect": "0.3", "breakneck": "1", "settled": "0.1", "contentious": ".7", "horrific": "0.8", "deaths": "0.8", "woebegone": "0.6", "certain": "0", "suspicious": "0.5", "unsafe": "0.8", "marijuana": "0.5", "sad": "0.7", "antagonistic": "0.6", "Terror": "0.8", "airstrike": ".8", "at": "0", "exhortatory": "0.6", "anthrax": "0.7", "admonitory": "0.8", "shooting": "0.7", "depressive": ".3", "vulnerable": "0.6", "sullen": "0.3", "dying": "0.7", "eerie": ".6", "deadly": "0.9", "speculative": "0.7", "malicious": "0.6", "offensive": "0.8", "severe": "0.7", "narcotics": "0.6", "Islamic": ".3", "unavailing": "0.6", "useless": "0.5", "tactic": ".1", "pestilential": "0.6", "admonishing": "0.6", "woeful": "0.6", "worsening": "0.7", "doomed": "0.8", "oppressive": ".77", "tornado": "0.8", "cautioning": "0.5", "inescapable": "0.5", "chancy": "0.5", "harmful": ".6", "militants": ".8", "fell": "0.1", "stab": ".8", "infection": "0.6", "significant": "0.7", "termination": "0.6", "prisoner": "0.6", "jihadi": ".7", "odious": "0.7", "Murder": "0.8", "grave": "0.9", "carcinogenic": "0.6", "Intimidation": "0.7", "off-putting": "0.6", "inexorable": "0.4", "fighter": ".8", "bullet": ".7", "infanticide": "0.6", "touchy": "0.6", "upset": "0.6", "exigent": "0.4", "pernicious": ".7", "demoralized": "0.3", "Endemic": "0.8", "nuclear": "0.9", "serious": "0.6", "menace": "0.5", "attacking": ".8", "weighty": "0.6", "standoff": "0.6"} -------------------------------------------------------------------------------- /Neural Network/threatSentiment.json: -------------------------------------------------------------------------------- 1 | {"funereal": ".5", "influenza": "0.5", "bomb": "0.8", "hurricane": "0.8", "rifle": ".6", "precarious": "0.5", "matricide": "0.7", "executing": ".8", "avalanche": "0.7", "malignant": "0.7", "hanging": "0.8", "lethal": ".7", "shot": "0.9", "baneful": "0.4", "Boko": ".8", "gangs": "0.6", "distressing": ".5", "dreadful": ".6", "suicide": ".8", "warlike": ".6", "sororicidal": "0.6", "disgusting": ".7", "crime": "0.7", "abhorrent": "0.8", "to": "0.0", "unpleasant": "0.7", "terror": ".8", "portentous": "0.5", "fatal": "0.9", "spiteful": "0.6", "hope": "0.2", "Malevolent": "0.6", "ordained": "0.2", "risk": "0.6", "torture": "0.8", "hostage": "0.8", "inoculable": "0.7", "massacre": "0.6", "keylogger": "0.6", "impregnable": "0", "predestined": "0.3", "frowning": "0.6", "ebola": "0.7", "gloomy": ".4", "awful": ".", "nasty": "0.7", "frightening": "0.7", "cautionary": "0.7", "homicidal": "0.5", "Demise": "0.6", "sunk": "0.7", "touch-and-go": "0.7", "die": ".9", "dejected": ".7", "gun": ".6", "bad": "0.8", "shots": "0.7", "typhoon": "0.8", "gunman": ".8", "relentless": "0.6", "manslaughter": "0.8", "foreordained": "0.2", "force": ".7", "disaster": "0.8", "cynical": ".5", "gunfight": "0.7", "resentful": ".6", "cyber": "0.6", "portend": "0.6", "past": "0.2", "Daesh": ".9", "irredeemable": "0.8", "desperate": "0.5", "disturbing": ".5", "unmitigable": "0.6", "kidnap": "0.7", "incurable": "0.7", "bacteria": "0.6", "army": ".6", "unrelenting": "0.5", "bomber": ".8", "inauspicious": "0.4", "ruthless": "0.8", "alarming": "0.9", "poisonous": "0.8", "poison": "0.7", "perilous": "0.6", "sour": ".5", "irrevocable": "0.7", "closed": "0", "horrible": ".6", "weird": ".3", "pistol": ".6", "presage": "0.7", "terrorism": ".8", "hairy": "0.6", "scowling": "0.4", "burn": "0.5", "explosion": "0.8", "barbaric": ".8", "slaying": "0.7", "brewing": "0.4", "killing": "1", "rapacious": ".6", "cards": "0", "hazmat": "0.7", "forthcoming": "0.3", "contamination": "0.6", "menacing": "0.7", "tremor": "0.7", "come": "0.0", "earthquake": "0.7", "momentous": "0.2", "extreme": "0.5", "trepidation": "0.6", "great": "0.1", "store": "0", "direful": "5.8", "State": ".6", "Commination": "0.6", "phishing": "0.6", "ill": "0.5", "quarrelsome": ".7", "murdered": "0.7", "creepy": ".5", "cessation": "0.6", "ill-fated": "0.6", "strong-arm": ".7", "doleful": ".5", "assault": ".8", "implacable": "0.6", "Death": "0.7", "tough": "0.4", "sulky": "0.3", "pox": "0.5", "gone": "0.2", "foreboding": "0.4", "vain": "0.4", "defiance": "0.6", "epidemic": "0.7", "hamas": "1", "baleful": "0.5", "deleterious": "0.5", "fulmination": "0.6", "loaded": "0", "cocaine": "0.7", "regicide": "0.7", "dangersome": "0.9", "terminal": "0.4", "sealed": "0.2", "fright": "0.6", "eradication": "0.6", "injure": ".8", "Hostage": "0.7", "illegal": "0.7", "condemned": "0.5", "stated": "0.1", "predesigned": "0.3", "unapproachable": "0.7", "detrimental": "0.6", "life-and-death": "0.8", "captive": "0.6", "intrusive": ".4", "fratricide": "0.7", "killed": "0.9", "wicked": "0.5", "dismay": "0.5", "on": "0", "infect": "0.6", "butchery": "0.7", "spammer": "0.6", "missile": ".5", "downhearted": "0.5", "ghoulish": ".7", "dismal": ".3", "disruptive": ".8", "Haram": ".8", "breach": "0.7", "instant": "0.2", "cunt": ".2", "shoot": "0.9", "queasy": "0.4", "H1N1": "0.5", "flu": "0.4", "foreshadowing": "0.6", "will": "0.0", "ugly": "0.7", "near": ".1", "lugubrious": ".6", "irreparable": "0.7", "internecine": "0.5", "venomous": ".7", "sombre": ".8", "imperil": "0.6", "pessimistic": ".3", "extirpation": "0.6", "sinister": "0.6", "atrocious": ".8", "impracticable": "0.6", "viperous": "0.8", "contaminate": "0.6", "is": "0.0", "meant": "0.0", "collision": "0.3", "pressing": "0.2", "in": "0", "crash": "0.3", "rape": ".8", "homicide": "0.8", "malware": "0.6", "terrorist": ".8", "disconsolate": "0.4", "maligned": "0.6", "agony": "0.6", "calamitous": ".8", "wound": ".3", "parlous": "0.7", "mortiferous": "0.5", "injurious": ".7", "chemical": "0.7", "tepidity": "0.6", "hot": "0", "vital": "0.5", "unfortunate": "0.7", "goner": "0.3", "repellent": "0.7", "itchy": ".4", "persuasion": "0", "panic": "0.6", "ball": "0.0", "intended": "0.6", "taliban": "1", "restraint": ".2", "insecure": "0.6", "filicide": "0.8", "vicious": "0.6", "despondent": "0.4", "hand": "0", "bullying": ".6", "abuse": ".4", "delicate": "0", "destined": "0.5", "coming": "0", "fierce": "0.6", "murdering": "0.8", "destructive": ".6", "compulsory": "0", "bleak": ".4", "malevolent": ".8", "disagreeable": "0.7", "bombers": ".8", "shotgun": ".4", "weapon": ".8", "bounces": "0.1", "fighting": ".8", "unachievable": "0.7", "thorny": "0.7", "drone": ".", "Irreconcilable": "0.5", "dread": "0.6", "detainee": "0.4", "furious": ".8", "compelled": "0.1", "premonitory": "0.6", "wind": "0", "rancorous": ".4", "jeopardous": "0.8", "vindictive": "0.6", "rocket": ".8", "peril": "0.7", "radiation": "0.8", "over": "0.7", "shaky": "0.5", "patricide": "0.9", "militiamen": ".5", "course": "0", "glum": "0.4", "kill": ".9", "victim": ".7", "horrify": "0.8", "exemplary": "0.6", "Killing": "0.7", "encroaching": ".6", "impending": "0.5", "bombing": ".9", "pendemic": "0.7", "death": ".9", "consternation": "0.8", "flood": "0.7", "hezbollah": "1", "assassination": "0.8", "rootkit": "0.6", "remorseless": "0.6", "Taliban": ".8", "pestiferous": "0.4", "trojan": "0.6", "consequential": "0.7", "mournful": ".7", "Calamitous": "0.6", "overhanging": "0.5", "monitorial": "0.5", "grievous": "0.7", "sick": "0.3", "emergency": "0.5", "looming": "0.7", "intimidation": ".8", "dark": "0.5", "assailing": ".7", "Cruel": "0.7", "predetermined": "0.3", "repulsive": "0.8", "hair-raising": ".8", "Fatal": ".8", "outbreak": "0.7", "execution": "0.8", "grim": "0.6", "serpentine": "0.8", "acute": "0.9", "Misery": "0.6", "lost": "0.6", "tsunami": "0.8", "unstable": "0.2", "averse": ".6", "virulent": "0.6", "security": "0.6", "heavy": "0.5", "dour": "0.6", "liquidation": "0.7", "creek": "0", "exposed": "0.6", "macabre": ".8", "extremism": "1", "treacherous": "0.5", "shuddersome": ".6", "martial": ".3", "hazard": "0.7", "pugnacious": ".77", "dispiriting": ".5", "meth": "0.7", "mephitic": "0.6", "savage": "0.6", "toxic": "0.8", "for": "0", "infectious": "0.6", "lockdown": "0.6", "unavoidable": "0.4", "crucial": "0.8", "discouraging": "0.5", "bellicose": ".8", "adverse": "0.6", "antipathetic": ".7", "critical": "0.9", "despair": "0.6", "evacuation": "0.6", "ruinous": ".8", "IS": ".8", "despairing": "0.3", "browbeating": ".3", "itching": ".4", "combative": ".7", "slaughter": "0.8", "intruding": ".4", "unfriendly": "0.7", "heroin": "0.7", "que": "0.1", "evil": "0.7", "dire": "0.5", "nightmarish": ".7", "obliteration": "0.8", "ominous": "0.5", "inevitable": "0.5", "scary": ".6", "hawkish": ".4", "hostile": "0.7", "constraint": ".4", "violence": "0.5", "terrible": "0.6", "important": "0", "militia": ".5", "hopeless": ".6", "threat": "0.5", "mortal": ".2", "consequence": "0.6", "jeopardise.": "0.6", "glowering": "0.6", "warning": "0.8", "angry": "0.7", "destruction": "0.8", "extinction": "0.7", "enraged": ".7", "major": "0.2", "daunting": "0.7", "unhealthy": "0.7", "burning": "0.5", "monitory": "0.6", "bound": "0.5", "invading": ".6", "brute": "0.6", "down": "0.6", "clamorous": "0.7", "formidable": "0.7", "impossible": "0.6", "pointless": "0.7", "radioactive": "0.8", "fired": "0.7", "ISIS": ".8", "no-win": "0.8", "fated": "0.4", "noxious": "0.6", "nocuous": "0.7", "threatening": "0.7", "ricin": "0.7", "H5N1": "0.5", "uxoricide": "0.7", "militant": ".8", "ineluctable": "0.2", "attack": "0.8", "tragic": "0.9", "terrifying": ".8", "storm": "0.8", "ticklish": "0", "dynamite": "0.7", "ISIL": ".9", "war": ".7", "duress": "..8", "futile": ".5", "virus": "0.6", "murder": "0.9", "way": "0.0", "that": "0.0", "hazardous": "0.7", "hurtful": ".6", "dreary": ".6", "depressing": ".3", "sera": ".5", "plague": "0.7", "helpless": "0.5", "troubling": "0.8", "forbidding": "0.5", "gruesome": ".8", "be": "0.0", "irreversible": "0.7", "sarin": "0.7", "directed": "0.1", "salmonella": "0.5", "abduct": ".7", "twister": "0.8", "rampage": ".8", "urgent": ".0.6", "dangerous": ".5", "up": "0.1", "extermination": "0.7", "annihilation": "0.9", "future": "0", "contrary": "0.5", "dirty": "0.4", "afflictive": "0.8", "AlQaeda": ".8", "forlorn": "0.9", "ultimatum": "0.8", "jihadist": ".7", "risky": "0.5", "prospect": "0.3", "breakneck": "1", "settled": "0.1", "contentious": ".7", "horrific": "0.8", "deaths": "0.8", "woebegone": "0.6", "certain": "0", "suspicious": "0.5", "unsafe": "0.8", "marijuana": "0.5", "sad": "0.7", "antagonistic": "0.6", "Terror": "0.8", "airstrike": ".8", "at": "0", "exhortatory": "0.6", "anthrax": "0.7", "admonitory": "0.8", "shooting": "0.7", "depressive": ".3", "vulnerable": "0.6", "sullen": "0.3", "dying": "0.7", "eerie": ".6", "deadly": "0.9", "speculative": "0.7", "malicious": "0.6", "offensive": "0.8", "severe": "0.7", "narcotics": "0.6", "Islamic": ".3", "unavailing": "0.6", "useless": "0.5", "tactic": ".1", "pestilential": "0.6", "admonishing": "0.6", "woeful": "0.6", "worsening": "0.7", "doomed": "0.8", "oppressive": ".77", "tornado": "0.8", "cautioning": "0.5", "inescapable": "0.5", "chancy": "0.5", "harmful": ".6", "militants": ".8", "fell": "0.1", "stab": ".8", "infection": "0.6", "significant": "0.7", "termination": "0.6", "prisoner": "0.6", "jihadi": ".7", "odious": "0.7", "Murder": "0.8", "grave": "0.9", "carcinogenic": "0.6", "Intimidation": "0.7", "off-putting": "0.6", "inexorable": "0.4", "fighter": ".8", "bullet": ".7", "infanticide": "0.6", "touchy": "0.6", "upset": "0.6", "exigent": "0.4", "pernicious": ".7", "demoralized": "0.3", "Endemic": "0.8", "nuclear": "0.9", "serious": "0.6", "menace": "0.5", "attacking": ".8", "weighty": "0.6", "standoff": "0.6"} -------------------------------------------------------------------------------- /ThreatDetectionSentimentAnalysis/threat.json: -------------------------------------------------------------------------------- 1 | {"funereal": ".5", "influenza": "0.5", "bomb": "0.8", "hurricane": "0.8", "rifle": ".6", "precarious": "0.5", "matricide": "0.7", "executing": ".8", "avalanche": "0.7", "malignant": "0.7", "hanging": "0.8", "lethal": ".7", "shot": "0.9", "baneful": "0.4", "Boko": ".8", "gangs": "0.6", "distressing": ".5", "dreadful": ".6", "suicide": ".8", "warlike": ".6", "sororicidal": "0.6", "disgusting": ".7", "crime": "0.7", "abhorrent": "0.8", "to": "0.0", "unpleasant": "0.7", "terror": ".8", "portentous": "0.5", "fatal": "0.9", "spiteful": "0.6", "hope": "0.2", "Malevolent": "0.6", "ordained": "0.2", "risk": "0.6", "torture": "0.8", "hostage": "0.8", "inoculable": "0.7", "massacre": "0.6", "keylogger": "0.6", "impregnable": "0", "predestined": "0.3", "frowning": "0.6", "ebola": "0.7", "gloomy": ".4", "awful": ".", "nasty": "0.7", "frightening": "0.7", "cautionary": "0.7", "homicidal": "0.5", "Demise": "0.6", "sunk": "0.7", "touch-and-go": "0.7", "die": ".9", "dejected": ".7", "gun": ".6", "bad": "0.8", "shots": "0.7", "typhoon": "0.8", "gunman": ".8", "relentless": "0.6", "manslaughter": "0.8", "foreordained": "0.2", "force": ".7", "disaster": "0.8", "cynical": ".5", "gunfight": "0.7", "resentful": ".6", "cyber": "0.6", "portend": "0.6", "past": "0.2", "Daesh": ".9", "irredeemable": "0.8", "desperate": "0.5", "disturbing": ".5", "unmitigable": "0.6", "kidnap": "0.7", "incurable": "0.7", "bacteria": "0.6", "army": ".6", "unrelenting": "0.5", "bomber": ".8", "inauspicious": "0.4", "ruthless": "0.8", "alarming": "0.9", "poisonous": "0.8", "poison": "0.7", "perilous": "0.6", "sour": ".5", "irrevocable": "0.7", "closed": "0", "horrible": ".6", "weird": ".3", "pistol": ".6", "presage": "0.7", "terrorism": ".8", "hairy": "0.6", "scowling": "0.4", "burn": "0.5", "explosion": "0.8", "barbaric": ".8", "slaying": "0.7", "brewing": "0.4", "killing": "1", "rapacious": ".6", "cards": "0", "hazmat": "0.7", "forthcoming": "0.3", "contamination": "0.6", "menacing": "0.7", "tremor": "0.7", "come": "0.0", "earthquake": "0.7", "momentous": "0.2", "extreme": "0.5", "trepidation": "0.6", "great": "0.1", "store": "0", "direful": "5.8", "State": ".6", "Commination": "0.6", "phishing": "0.6", "ill": "0.5", "quarrelsome": ".7", "murdered": "0.7", "creepy": ".5", "cessation": "0.6", "ill-fated": "0.6", "strong-arm": ".7", "doleful": ".5", "assault": ".8", "implacable": "0.6", "Death": "0.7", "tough": "0.4", "sulky": "0.3", "pox": "0.5", "gone": "0.2", "foreboding": "0.4", "vain": "0.4", "defiance": "0.6", "epidemic": "0.7", "hamas": "1", "baleful": "0.5", "deleterious": "0.5", "fulmination": "0.6", "loaded": "0", "cocaine": "0.7", "regicide": "0.7", "dangersome": "0.9", "terminal": "0.4", "sealed": "0.2", "fright": "0.6", "eradication": "0.6", "injure": ".8", "Hostage": "0.7", "illegal": "0.7", "condemned": "0.5", "stated": "0.1", "predesigned": "0.3", "unapproachable": "0.7", "detrimental": "0.6", "life-and-death": "0.8", "captive": "0.6", "intrusive": ".4", "fratricide": "0.7", "killed": "0.9", "wicked": "0.5", "dismay": "0.5", "on": "0", "infect": "0.6", "butchery": "0.7", "spammer": "0.6", "missile": ".5", "downhearted": "0.5", "ghoulish": ".7", "dismal": ".3", "disruptive": ".8", "Haram": ".8", "breach": "0.7", "instant": "0.2", "cunt": ".2", "shoot": "0.9", "queasy": "0.4", "H1N1": "0.5", "flu": "0.4", "foreshadowing": "0.6", "will": "0.0", "ugly": "0.7", "near": ".1", "lugubrious": ".6", "irreparable": "0.7", "internecine": "0.5", "venomous": ".7", "sombre": ".8", "imperil": "0.6", "pessimistic": ".3", "extirpation": "0.6", "sinister": "0.6", "atrocious": ".8", "impracticable": "0.6", "viperous": "0.8", "contaminate": "0.6", "is": "0.0", "meant": "0.0", "collision": "0.3", "pressing": "0.2", "in": "0", "crash": "0.3", "rape": ".8", "homicide": "0.8", "malware": "0.6", "terrorist": ".8", "disconsolate": "0.4", "maligned": "0.6", "agony": "0.6", "calamitous": ".8", "wound": ".3", "parlous": "0.7", "mortiferous": "0.5", "injurious": ".7", "chemical": "0.7", "tepidity": "0.6", "hot": "0", "vital": "0.5", "unfortunate": "0.7", "goner": "0.3", "repellent": "0.7", "itchy": ".4", "persuasion": "0", "panic": "0.6", "ball": "0.0", "intended": "0.6", "taliban": "1", "restraint": ".2", "insecure": "0.6", "filicide": "0.8", "vicious": "0.6", "despondent": "0.4", "hand": "0", "bullying": ".6", "abuse": ".4", "delicate": "0", "destined": "0.5", "coming": "0", "fierce": "0.6", "murdering": "0.8", "destructive": ".6", "compulsory": "0", "bleak": ".4", "malevolent": ".8", "disagreeable": "0.7", "bombers": ".8", "shotgun": ".4", "weapon": ".8", "bounces": "0.1", "fighting": ".8", "unachievable": "0.7", "thorny": "0.7", "drone": ".", "Irreconcilable": "0.5", "dread": "0.6", "detainee": "0.4", "furious": ".8", "compelled": "0.1", "premonitory": "0.6", "wind": "0", "rancorous": ".4", "jeopardous": "0.8", "vindictive": "0.6", "rocket": ".8", "peril": "0.7", "radiation": "0.8", "over": "0.7", "shaky": "0.5", "patricide": "0.9", "militiamen": ".5", "course": "0", "glum": "0.4", "kill": ".9", "victim": ".7", "horrify": "0.8", "exemplary": "0.6", "Killing": "0.7", "encroaching": ".6", "impending": "0.5", "bombing": ".9", "pendemic": "0.7", "death": ".9", "consternation": "0.8", "flood": "0.7", "hezbollah": "1", "assassination": "0.8", "rootkit": "0.6", "remorseless": "0.6", "Taliban": ".8", "pestiferous": "0.4", "trojan": "0.6", "consequential": "0.7", "mournful": ".7", "Calamitous": "0.6", "overhanging": "0.5", "monitorial": "0.5", "grievous": "0.7", "sick": "0.3", "emergency": "0.5", "looming": "0.7", "intimidation": ".8", "dark": "0.5", "assailing": ".7", "Cruel": "0.7", "predetermined": "0.3", "repulsive": "0.8", "hair-raising": ".8", "Fatal": ".8", "outbreak": "0.7", "execution": "0.8", "grim": "0.6", "serpentine": "0.8", "acute": "0.9", "Misery": "0.6", "lost": "0.6", "tsunami": "0.8", "unstable": "0.2", "averse": ".6", "virulent": "0.6", "security": "0.6", "heavy": "0.5", "dour": "0.6", "liquidation": "0.7", "creek": "0", "exposed": "0.6", "macabre": ".8", "extremism": "1", "treacherous": "0.5", "shuddersome": ".6", "martial": ".3", "hazard": "0.7", "pugnacious": ".77", "dispiriting": ".5", "meth": "0.7", "mephitic": "0.6", "savage": "0.6", "toxic": "0.8", "for": "0", "infectious": "0.6", "lockdown": "0.6", "unavoidable": "0.4", "crucial": "0.8", "discouraging": "0.5", "bellicose": ".8", "adverse": "0.6", "antipathetic": ".7", "critical": "0.9", "despair": "0.6", "evacuation": "0.6", "ruinous": ".8", "IS": ".8", "despairing": "0.3", "browbeating": ".3", "itching": ".4", "combative": ".7", "slaughter": "0.8", "intruding": ".4", "unfriendly": "0.7", "heroin": "0.7", "que": "0.1", "evil": "0.7", "dire": "0.5", "nightmarish": ".7", "obliteration": "0.8", "ominous": "0.5", "inevitable": "0.5", "scary": ".6", "hawkish": ".4", "hostile": "0.7", "constraint": ".4", "violence": "0.5", "terrible": "0.6", "important": "0", "militia": ".5", "hopeless": ".6", "threat": "0.5", "mortal": ".2", "consequence": "0.6", "jeopardise.": "0.6", "glowering": "0.6", "warning": "0.8", "angry": "0.7", "destruction": "0.8", "extinction": "0.7", "enraged": ".7", "major": "0.2", "daunting": "0.7", "unhealthy": "0.7", "burning": "0.5", "monitory": "0.6", "bound": "0.5", "invading": ".6", "brute": "0.6", "down": "0.6", "clamorous": "0.7", "formidable": "0.7", "impossible": "0.6", "pointless": "0.7", "radioactive": "0.8", "fired": "0.7", "ISIS": ".8", "no-win": "0.8", "fated": "0.4", "noxious": "0.6", "nocuous": "0.7", "threatening": "0.7", "ricin": "0.7", "H5N1": "0.5", "uxoricide": "0.7", "militant": ".8", "ineluctable": "0.2", "attack": "0.8", "tragic": "0.9", "terrifying": ".8", "storm": "0.8", "ticklish": "0", "dynamite": "0.7", "ISIL": ".9", "war": ".7", "duress": "..8", "futile": ".5", "virus": "0.6", "murder": "0.9", "way": "0.0", "that": "0.0", "hazardous": "0.7", "hurtful": ".6", "dreary": ".6", "depressing": ".3", "sera": ".5", "plague": "0.7", "helpless": "0.5", "troubling": "0.8", "forbidding": "0.5", "gruesome": ".8", "be": "0.0", "irreversible": "0.7", "sarin": "0.7", "directed": "0.1", "salmonella": "0.5", "abduct": ".7", "twister": "0.8", "rampage": ".8", "urgent": ".0.6", "dangerous": ".5", "up": "0.1", "extermination": "0.7", "annihilation": "0.9", "future": "0", "contrary": "0.5", "dirty": "0.4", "afflictive": "0.8", "AlQaeda": ".8", "forlorn": "0.9", "ultimatum": "0.8", "jihadist": ".7", "risky": "0.5", "prospect": "0.3", "breakneck": "1", "settled": "0.1", "contentious": ".7", "horrific": "0.8", "deaths": "0.8", "woebegone": "0.6", "certain": "0", "suspicious": "0.5", "unsafe": "0.8", "marijuana": "0.5", "sad": "0.7", "antagonistic": "0.6", "Terror": "0.8", "airstrike": ".8", "at": "0", "exhortatory": "0.6", "anthrax": "0.7", "admonitory": "0.8", "shooting": "0.7", "depressive": ".3", "vulnerable": "0.6", "sullen": "0.3", "dying": "0.7", "eerie": ".6", "deadly": "0.9", "speculative": "0.7", "malicious": "0.6", "offensive": "0.8", "severe": "0.7", "narcotics": "0.6", "Islamic": ".3", "unavailing": "0.6", "useless": "0.5", "tactic": ".1", "pestilential": "0.6", "admonishing": "0.6", "woeful": "0.6", "worsening": "0.7", "doomed": "0.8", "oppressive": ".77", "tornado": "0.8", "cautioning": "0.5", "inescapable": "0.5", "chancy": "0.5", "harmful": ".6", "militants": ".8", "fell": "0.1", "stab": ".8", "infection": "0.6", "significant": "0.7", "termination": "0.6", "prisoner": "0.6", "jihadi": ".7", "odious": "0.7", "Murder": "0.8", "grave": "0.9", "carcinogenic": "0.6", "Intimidation": "0.7", "off-putting": "0.6", "inexorable": "0.4", "fighter": ".8", "bullet": ".7", "infanticide": "0.6", "touchy": "0.6", "upset": "0.6", "exigent": "0.4", "pernicious": ".7", "demoralized": "0.3", "Endemic": "0.8", "nuclear": "0.9", "serious": "0.6", "menace": "0.5", "attacking": ".8", "weighty": "0.6", "standoff": "0.6"} -------------------------------------------------------------------------------- /ThreatDetectionSentimentAnalysis/Neural Network/threatSentiment.json: -------------------------------------------------------------------------------- 1 | {"funereal": ".5", "influenza": "0.5", "bomb": "0.8", "hurricane": "0.8", "rifle": ".6", "precarious": "0.5", "matricide": "0.7", "executing": ".8", "avalanche": "0.7", "malignant": "0.7", "hanging": "0.8", "lethal": ".7", "shot": "0.9", "baneful": "0.4", "Boko": ".8", "gangs": "0.6", "distressing": ".5", "dreadful": ".6", "suicide": ".8", "warlike": ".6", "sororicidal": "0.6", "disgusting": ".7", "crime": "0.7", "abhorrent": "0.8", "to": "0.0", "unpleasant": "0.7", "terror": ".8", "portentous": "0.5", "fatal": "0.9", "spiteful": "0.6", "hope": "0.2", "Malevolent": "0.6", "ordained": "0.2", "risk": "0.6", "torture": "0.8", "hostage": "0.8", "inoculable": "0.7", "massacre": "0.6", "keylogger": "0.6", "impregnable": "0", "predestined": "0.3", "frowning": "0.6", "ebola": "0.7", "gloomy": ".4", "awful": ".", "nasty": "0.7", "frightening": "0.7", "cautionary": "0.7", "homicidal": "0.5", "Demise": "0.6", "sunk": "0.7", "touch-and-go": "0.7", "die": ".9", "dejected": ".7", "gun": ".6", "bad": "0.8", "shots": "0.7", "typhoon": "0.8", "gunman": ".8", "relentless": "0.6", "manslaughter": "0.8", "foreordained": "0.2", "force": ".7", "disaster": "0.8", "cynical": ".5", "gunfight": "0.7", "resentful": ".6", "cyber": "0.6", "portend": "0.6", "past": "0.2", "Daesh": ".9", "irredeemable": "0.8", "desperate": "0.5", "disturbing": ".5", "unmitigable": "0.6", "kidnap": "0.7", "incurable": "0.7", "bacteria": "0.6", "army": ".6", "unrelenting": "0.5", "bomber": ".8", "inauspicious": "0.4", "ruthless": "0.8", "alarming": "0.9", "poisonous": "0.8", "poison": "0.7", "perilous": "0.6", "sour": ".5", "irrevocable": "0.7", "closed": "0", "horrible": ".6", "weird": ".3", "pistol": ".6", "presage": "0.7", "terrorism": ".8", "hairy": "0.6", "scowling": "0.4", "burn": "0.5", "explosion": "0.8", "barbaric": ".8", "slaying": "0.7", "brewing": "0.4", "killing": "1", "rapacious": ".6", "cards": "0", "hazmat": "0.7", "forthcoming": "0.3", "contamination": "0.6", "menacing": "0.7", "tremor": "0.7", "come": "0.0", "earthquake": "0.7", "momentous": "0.2", "extreme": "0.5", "trepidation": "0.6", "great": "0.1", "store": "0", "direful": "5.8", "State": ".6", "Commination": "0.6", "phishing": "0.6", "ill": "0.5", "quarrelsome": ".7", "murdered": "0.7", "creepy": ".5", "cessation": "0.6", "ill-fated": "0.6", "strong-arm": ".7", "doleful": ".5", "assault": ".8", "implacable": "0.6", "Death": "0.7", "tough": "0.4", "sulky": "0.3", "pox": "0.5", "gone": "0.2", "foreboding": "0.4", "vain": "0.4", "defiance": "0.6", "epidemic": "0.7", "hamas": "1", "baleful": "0.5", "deleterious": "0.5", "fulmination": "0.6", "loaded": "0", "cocaine": "0.7", "regicide": "0.7", "dangersome": "0.9", "terminal": "0.4", "sealed": "0.2", "fright": "0.6", "eradication": "0.6", "injure": ".8", "Hostage": "0.7", "illegal": "0.7", "condemned": "0.5", "stated": "0.1", "predesigned": "0.3", "unapproachable": "0.7", "detrimental": "0.6", "life-and-death": "0.8", "captive": "0.6", "intrusive": ".4", "fratricide": "0.7", "killed": "0.9", "wicked": "0.5", "dismay": "0.5", "on": "0", "infect": "0.6", "butchery": "0.7", "spammer": "0.6", "missile": ".5", "downhearted": "0.5", "ghoulish": ".7", "dismal": ".3", "disruptive": ".8", "Haram": ".8", "breach": "0.7", "instant": "0.2", "cunt": ".2", "shoot": "0.9", "queasy": "0.4", "H1N1": "0.5", "flu": "0.4", "foreshadowing": "0.6", "will": "0.0", "ugly": "0.7", "near": ".1", "lugubrious": ".6", "irreparable": "0.7", "internecine": "0.5", "venomous": ".7", "sombre": ".8", "imperil": "0.6", "pessimistic": ".3", "extirpation": "0.6", "sinister": "0.6", "atrocious": ".8", "impracticable": "0.6", "viperous": "0.8", "contaminate": "0.6", "is": "0.0", "meant": "0.0", "collision": "0.3", "pressing": "0.2", "in": "0", "crash": "0.3", "rape": ".8", "homicide": "0.8", "malware": "0.6", "terrorist": ".8", "disconsolate": "0.4", "maligned": "0.6", "agony": "0.6", "calamitous": ".8", "wound": ".3", "parlous": "0.7", "mortiferous": "0.5", "injurious": ".7", "chemical": "0.7", "tepidity": "0.6", "hot": "0", "vital": "0.5", "unfortunate": "0.7", "goner": "0.3", "repellent": "0.7", "itchy": ".4", "persuasion": "0", "panic": "0.6", "ball": "0.0", "intended": "0.6", "taliban": "1", "restraint": ".2", "insecure": "0.6", "filicide": "0.8", "vicious": "0.6", "despondent": "0.4", "hand": "0", "bullying": ".6", "abuse": ".4", "delicate": "0", "destined": "0.5", "coming": "0", "fierce": "0.6", "murdering": "0.8", "destructive": ".6", "compulsory": "0", "bleak": ".4", "malevolent": ".8", "disagreeable": "0.7", "bombers": ".8", "shotgun": ".4", "weapon": ".8", "bounces": "0.1", "fighting": ".8", "unachievable": "0.7", "thorny": "0.7", "drone": ".", "Irreconcilable": "0.5", "dread": "0.6", "detainee": "0.4", "furious": ".8", "compelled": "0.1", "premonitory": "0.6", "wind": "0", "rancorous": ".4", "jeopardous": "0.8", "vindictive": "0.6", "rocket": ".8", "peril": "0.7", "radiation": "0.8", "over": "0.7", "shaky": "0.5", "patricide": "0.9", "militiamen": ".5", "course": "0", "glum": "0.4", "kill": ".9", "victim": ".7", "horrify": "0.8", "exemplary": "0.6", "Killing": "0.7", "encroaching": ".6", "impending": "0.5", "bombing": ".9", "pendemic": "0.7", "death": ".9", "consternation": "0.8", "flood": "0.7", "hezbollah": "1", "assassination": "0.8", "rootkit": "0.6", "remorseless": "0.6", "Taliban": ".8", "pestiferous": "0.4", "trojan": "0.6", "consequential": "0.7", "mournful": ".7", "Calamitous": "0.6", "overhanging": "0.5", "monitorial": "0.5", "grievous": "0.7", "sick": "0.3", "emergency": "0.5", "looming": "0.7", "intimidation": ".8", "dark": "0.5", "assailing": ".7", "Cruel": "0.7", "predetermined": "0.3", "repulsive": "0.8", "hair-raising": ".8", "Fatal": ".8", "outbreak": "0.7", "execution": "0.8", "grim": "0.6", "serpentine": "0.8", "acute": "0.9", "Misery": "0.6", "lost": "0.6", "tsunami": "0.8", "unstable": "0.2", "averse": ".6", "virulent": "0.6", "security": "0.6", "heavy": "0.5", "dour": "0.6", "liquidation": "0.7", "creek": "0", "exposed": "0.6", "macabre": ".8", "extremism": "1", "treacherous": "0.5", "shuddersome": ".6", "martial": ".3", "hazard": "0.7", "pugnacious": ".77", "dispiriting": ".5", "meth": "0.7", "mephitic": "0.6", "savage": "0.6", "toxic": "0.8", "for": "0", "infectious": "0.6", "lockdown": "0.6", "unavoidable": "0.4", "crucial": "0.8", "discouraging": "0.5", "bellicose": ".8", "adverse": "0.6", "antipathetic": ".7", "critical": "0.9", "despair": "0.6", "evacuation": "0.6", "ruinous": ".8", "IS": ".8", "despairing": "0.3", "browbeating": ".3", "itching": ".4", "combative": ".7", "slaughter": "0.8", "intruding": ".4", "unfriendly": "0.7", "heroin": "0.7", "que": "0.1", "evil": "0.7", "dire": "0.5", "nightmarish": ".7", "obliteration": "0.8", "ominous": "0.5", "inevitable": "0.5", "scary": ".6", "hawkish": ".4", "hostile": "0.7", "constraint": ".4", "violence": "0.5", "terrible": "0.6", "important": "0", "militia": ".5", "hopeless": ".6", "threat": "0.5", "mortal": ".2", "consequence": "0.6", "jeopardise.": "0.6", "glowering": "0.6", "warning": "0.8", "angry": "0.7", "destruction": "0.8", "extinction": "0.7", "enraged": ".7", "major": "0.2", "daunting": "0.7", "unhealthy": "0.7", "burning": "0.5", "monitory": "0.6", "bound": "0.5", "invading": ".6", "brute": "0.6", "down": "0.6", "clamorous": "0.7", "formidable": "0.7", "impossible": "0.6", "pointless": "0.7", "radioactive": "0.8", "fired": "0.7", "ISIS": ".8", "no-win": "0.8", "fated": "0.4", "noxious": "0.6", "nocuous": "0.7", "threatening": "0.7", "ricin": "0.7", "H5N1": "0.5", "uxoricide": "0.7", "militant": ".8", "ineluctable": "0.2", "attack": "0.8", "tragic": "0.9", "terrifying": ".8", "storm": "0.8", "ticklish": "0", "dynamite": "0.7", "ISIL": ".9", "war": ".7", "duress": "..8", "futile": ".5", "virus": "0.6", "murder": "0.9", "way": "0.0", "that": "0.0", "hazardous": "0.7", "hurtful": ".6", "dreary": ".6", "depressing": ".3", "sera": ".5", "plague": "0.7", "helpless": "0.5", "troubling": "0.8", "forbidding": "0.5", "gruesome": ".8", "be": "0.0", "irreversible": "0.7", "sarin": "0.7", "directed": "0.1", "salmonella": "0.5", "abduct": ".7", "twister": "0.8", "rampage": ".8", "urgent": ".0.6", "dangerous": ".5", "up": "0.1", "extermination": "0.7", "annihilation": "0.9", "future": "0", "contrary": "0.5", "dirty": "0.4", "afflictive": "0.8", "AlQaeda": ".8", "forlorn": "0.9", "ultimatum": "0.8", "jihadist": ".7", "risky": "0.5", "prospect": "0.3", "breakneck": "1", "settled": "0.1", "contentious": ".7", "horrific": "0.8", "deaths": "0.8", "woebegone": "0.6", "certain": "0", "suspicious": "0.5", "unsafe": "0.8", "marijuana": "0.5", "sad": "0.7", "antagonistic": "0.6", "Terror": "0.8", "airstrike": ".8", "at": "0", "exhortatory": "0.6", "anthrax": "0.7", "admonitory": "0.8", "shooting": "0.7", "depressive": ".3", "vulnerable": "0.6", "sullen": "0.3", "dying": "0.7", "eerie": ".6", "deadly": "0.9", "speculative": "0.7", "malicious": "0.6", "offensive": "0.8", "severe": "0.7", "narcotics": "0.6", "Islamic": ".3", "unavailing": "0.6", "useless": "0.5", "tactic": ".1", "pestilential": "0.6", "admonishing": "0.6", "woeful": "0.6", "worsening": "0.7", "doomed": "0.8", "oppressive": ".77", "tornado": "0.8", "cautioning": "0.5", "inescapable": "0.5", "chancy": "0.5", "harmful": ".6", "militants": ".8", "fell": "0.1", "stab": ".8", "infection": "0.6", "significant": "0.7", "termination": "0.6", "prisoner": "0.6", "jihadi": ".7", "odious": "0.7", "Murder": "0.8", "grave": "0.9", "carcinogenic": "0.6", "Intimidation": "0.7", "off-putting": "0.6", "inexorable": "0.4", "fighter": ".8", "bullet": ".7", "infanticide": "0.6", "touchy": "0.6", "upset": "0.6", "exigent": "0.4", "pernicious": ".7", "demoralized": "0.3", "Endemic": "0.8", "nuclear": "0.9", "serious": "0.6", "menace": "0.5", "attacking": ".8", "weighty": "0.6", "standoff": "0.6"} -------------------------------------------------------------------------------- /Dicts/threatWords.yml: -------------------------------------------------------------------------------- 1 | abduct: [threat] 2 | abhorrent: [threat] 3 | abuse: [threat] 4 | acute: [threat] 5 | afflictive: [threat] 6 | agony: [threat] 7 | airstrike: [threat] 8 | alarming: [threat] 9 | AlQaeda: [threat] 10 | angry: [threat] 11 | annihilation: [threat] 12 | antipathetic: [threat] 13 | army: [threat] 14 | assailing: [threat] 15 | assassination: [threat] 16 | assault: [threat] 17 | at hand: [threat] 18 | atrocious: [threat] 19 | attacking: [threat] 20 | averse. Fatal: [threat] 21 | awful: [threat] 22 | bad: [threat] 23 | baleful: [threat] 24 | baneful: [threat] 25 | barbaric: [threat] 26 | bellicose: [threat] 27 | beyond recall: [threat] 28 | bleak: [threat] 29 | boko: [threat] 30 | bomb: [threat] 31 | bomber: [threat] 32 | bombing: [threat] 33 | bound for: [threat] 34 | breakneck: [threat] 35 | brewing: [threat] 36 | browbeating: [threat] 37 | bullet: [threat] 38 | bullying: [threat] 39 | burning: [threat] 40 | butchery: [threat] 41 | Calamitous: [threat] 42 | calamitous: [threat] 43 | captive: [threat] 44 | carcinogenic: [threat] 45 | certain: [threat] 46 | cessation: [threat] 47 | chancy: [threat] 48 | clamorous: [threat] 49 | closed: [threat] 50 | combative: [threat] 51 | coming: [threat] 52 | Commination: [threat] 53 | compelled: [threat] 54 | compulsory: [threat] 55 | condemned: [threat] 56 | consequential: [threat] 57 | consternation: [threat] 58 | constraint: [threat] 59 | contentious: [threat] 60 | creepy: [threat] 61 | critical: [threat] 62 | crucial: [threat] 63 | Cruel: [threat] 64 | cunt: [threat] 65 | cynical: [threat] 66 | Daesh: [threat] 67 | dangerous: [threat] 68 | dangersome: [threat] 69 | dark: [threat] 70 | daunting: [threat] 71 | deadly: [threat] 72 | death: [threat] 73 | Death: [threat] 74 | defiance: [threat] 75 | dejected: [threat] 76 | deleterious: [threat] 77 | delicate: [threat] 78 | demoralized: [threat] 79 | depressing: [threat] 80 | depressive: [threat] 81 | designed: [threat] 82 | despairing: [threat] 83 | desperate: [threat] 84 | despondent: [threat] 85 | destined: [threat] 86 | destruction: [threat] 87 | destructive: [threat] 88 | detainee: [threat] 89 | detrimental: [threat] 90 | die: [threat] 91 | dire: [threat] 92 | directed: [threat] 93 | direful: [threat] 94 | disagreeable: [threat] 95 | disaster: [threat] 96 | disconsolate: [threat] 97 | discouraging: [threat] 98 | disgusting: [threat] 99 | dismal: [threat] 100 | dismay: [threat] 101 | dispiriting: [threat] 102 | disruptive: [threat] 103 | distressing: [threat] 104 | disturbing: [threat] 105 | doleful: [threat] 106 | doomed: [threat] 107 | dour: [threat] 108 | downhearted: [threat] 109 | dread: [threat] 110 | dreadful: [threat] 111 | dreary: [threat] 112 | drone: [threat] 113 | duress: [threat] 114 | dying: [threat] 115 | dynamite: [threat] 116 | eerie: [threat] 117 | encroaching: [threat] 118 | Endemic: [threat] 119 | enraged: [threat] 120 | eradication: [threat] 121 | evil: [threat] 122 | executing: [threat] 123 | execution: [threat] 124 | exigent: [threat] 125 | exposed: [threat] 126 | extermination: [threat] 127 | extinction: [threat] 128 | extirpation: [threat] 129 | extreme: [threat] 130 | fatal: [threat] 131 | fated in near future: [threat] 132 | fell: [threat] 133 | fierce: [threat] 134 | fighter: [threat] 135 | fighting: [threat] 136 | filicide: [threat] 137 | fired: [threat] 138 | forbidding: [threat] 139 | force: [threat] 140 | foreboding: [threat] 141 | foreordained: [threat] 142 | foreshadowing: [threat] 143 | forlorn: [threat] 144 | formidable: [threat] 145 | forthcoming: [threat] 146 | fratricide: [threat] 147 | fright. Hostage: [threat] 148 | frightening: [threat] 149 | frowning: [threat] 150 | fulmination: [threat] 151 | funereal: [threat] 152 | furious: [threat] 153 | futile: [threat] 154 | ghoulish: [threat] 155 | gloomy: [threat] 156 | glowering: [threat] 157 | glum: [threat] 158 | gone: [threat] 159 | goner: [threat] 160 | grave: [threat] 161 | grievous: [threat] 162 | grim: [threat] 163 | gruesome: [threat] 164 | gun: [threat] 165 | gunman: [threat] 166 | hair-raising: [threat] 167 | hairy: [threat] 168 | hanging over: [threat] 169 | Haram: [threat] 170 | haram: [threat] 171 | isis: [threat] 172 | harmful: [threat] 173 | hawkish: [threat] 174 | hazard: [threat] 175 | hazardous: [threat] 176 | heavy: [threat] 177 | helpless: [threat] 178 | homicidal: [threat] 179 | homicide: [threat] 180 | hopeless: [threat] 181 | horrible: [threat] 182 | hostile: [threat] 183 | hot: [threat] 184 | hurtful: [threat] 185 | ill-fated: [threat] 186 | impending: [threat] 187 | imperil: [threat] 188 | implacable: [threat] 189 | important: [threat] 190 | impossible: [threat] 191 | impracticable: [threat] 192 | impregnable: [threat] 193 | in despair: [threat] 194 | in prospect: [threat] 195 | in store: [threat] 196 | in the cards: [threat] 197 | in the wind: [threat] 198 | incurable: [threat] 199 | ineluctable: [threat] 200 | inescapable: [threat] 201 | inevitable: [threat] 202 | inexorable: [threat] 203 | infanticide: [threat] 204 | infectious: [threat] 205 | injure: [threat] 206 | injurious: [threat] 207 | inoculable: [threat] 208 | insecure: [threat] 209 | instant: [threat] 210 | intended: [threat] 211 | internecine: [threat] 212 | Intimidation: [threat] 213 | intimidation: [threat] 214 | intruding: [threat] 215 | intrusive: [threat] 216 | invading: [threat] 217 | Irreconcilable: [threat] 218 | irredeemable: [threat] 219 | irreparable: [threat] 220 | irreversible: [threat] 221 | irrevocable: [threat] 222 | IS: [threat] 223 | ISIL: [threat] 224 | ISIS: [threat] 225 | Islamic State: [threat] 226 | itching: [threat] 227 | itchy: [threat] 228 | jeopardise. Demise: [threat] 229 | jeopardous: [threat] 230 | jihadi: [threat] 231 | jihadist: [threat] 232 | kill: [threat] 233 | killing: [threat] 234 | Killing: [threat] 235 | lethal: [threat] 236 | life-and-death: [threat] 237 | liquidation: [threat] 238 | loaded: [threat] 239 | looming: [threat] 240 | lost: [threat] 241 | lugubrious: [threat] 242 | macabre: [threat] 243 | major: [threat] 244 | Malevolent: [threat] 245 | malevolent: [threat] 246 | malicious: [threat] 247 | malignant: [threat] 248 | maligned: [threat] 249 | manslaughter: [threat] 250 | martial: [threat] 251 | massacre: [threat] 252 | matricide: [threat] 253 | meant: [threat] 254 | menace: [threat] 255 | menacing: [threat] 256 | mephitic: [threat] 257 | militant: [threat] 258 | militants: [threat] 259 | militia: [threat] 260 | militiamen: [threat] 261 | missile: [threat] 262 | momentous: [threat] 263 | mortal: [threat] 264 | mortiferous: [threat] 265 | mournful: [threat] 266 | Murder: [threat] 267 | nasty: [threat] 268 | near: [threat] 269 | nightmarish: [threat] 270 | no-win: [threat] 271 | nocuous: [threat] 272 | noxious: [threat] 273 | nuclear missile: [threat] 274 | obliteration: [threat] 275 | odious: [threat] 276 | of great consequence: [threat] 277 | off-putting: [threat] 278 | offensive: [threat] 279 | ominous: [threat] 280 | on collision course: [threat] 281 | oppressive: [threat] 282 | ordained: [threat] 283 | overhanging: [threat] 284 | panic: [threat] 285 | parlous: [threat] 286 | past hope: [threat] 287 | patricide: [threat] 288 | peril: [threat] 289 | perilous: [threat] 290 | pernicious: [threat] 291 | persuasion: [threat] 292 | pessimistic: [threat] 293 | pestiferous: [threat] 294 | pestilential: [threat] 295 | pistol: [threat] 296 | pointless: [threat] 297 | poisonous: [threat] 298 | portend: [threat] 299 | portentous: [threat] 300 | precarious: [threat] 301 | predesigned: [threat] 302 | predestined: [threat] 303 | predetermined: [threat] 304 | presage: [threat] 305 | pressing: [threat] 306 | prisoner: [threat] 307 | pugnacious: [threat] 308 | quarrelsome: [threat] 309 | que sera sera: [threat] 310 | queasy: [threat] 311 | rampage: [threat] 312 | rancorous: [threat] 313 | rapacious: [threat] 314 | rape: [threat] 315 | regicide: [threat] 316 | relentless: [threat] 317 | remorseless: [threat] 318 | repellent: [threat] 319 | repulsive: [threat] 320 | resentful: [threat] 321 | restraint: [threat] 322 | rifle: [threat] 323 | risk: [threat] 324 | risky: [threat] 325 | rocket: [threat] 326 | ruinous: [threat] 327 | ruthless: [threat] 328 | sad: [threat] 329 | savage: [threat] 330 | scary: [threat] 331 | scowling: [threat] 332 | sealed: [threat] 333 | serious: [threat] 334 | serpentine: [threat] 335 | settled: [threat] 336 | severe: [threat] 337 | shaky: [threat] 338 | shoot: [threat] 339 | shot: [threat] 340 | shot down: [threat] 341 | shotgun: [threat] 342 | shuddersome: [threat] 343 | significant: [threat] 344 | sinister: [threat] 345 | slaughter: [threat] 346 | slaying. Misery: [threat] 347 | sombre: [threat] 348 | sororicidal: [threat] 349 | sour: [threat] 350 | speculative: [threat] 351 | spiteful: [threat] 352 | stab: [threat] 353 | stated: [threat] 354 | strong-arm tactic: [threat] 355 | suicide bombers: [threat] 356 | sulky: [threat] 357 | sullen: [threat] 358 | sunk: [threat] 359 | tepidity: [threat] 360 | terminal: [threat] 361 | termination: [threat] 362 | terrible: [threat] 363 | terrifying: [threat] 364 | Terror: [threat] 365 | terror: [threat] 366 | terrorism: [threat] 367 | terrorist: [threat] 368 | that is to be: [threat] 369 | that will be: [threat] 370 | thorny: [threat] 371 | threat: [threat] 372 | threatening: [threat] 373 | ticklish: [threat] 374 | to come: [threat] 375 | torture: [threat] 376 | touch-and-go: [threat] 377 | touchy: [threat] 378 | tough: [threat] 379 | toxic: [threat] 380 | tragic: [threat] 381 | treacherous: [threat] 382 | trepidation: [threat] 383 | troubling: [threat] 384 | ugly: [threat] 385 | ultimatum: [threat] 386 | unachievable: [threat] 387 | unapproachable: [threat] 388 | unavailing: [threat] 389 | unavoidable: [threat] 390 | unfortunate: [threat] 391 | unfriendly: [threat] 392 | unhealthy: [threat] 393 | unmitigable: [threat] 394 | unpleasant: [threat] 395 | unrelenting: [threat] 396 | unsafe: [threat] 397 | unstable: [threat] 398 | up the creek: [threat] 399 | upset: [threat] 400 | urgent: [threat] 401 | useless: [threat] 402 | uxoricide: [threat] 403 | vain: [threat] 404 | venomous: [threat] 405 | vicious: [threat] 406 | victim: [threat] 407 | vindictive: [threat] 408 | violence: [threat] 409 | viperous: [threat] 410 | virulent: [threat] 411 | vital: [threat] 412 | vulnerable: [threat] 413 | war: [threat] 414 | warlike: [threat] 415 | warning: [threat] 416 | weapon: [threat] 417 | weighty: [threat] 418 | weird: [threat] 419 | wicked: [threat] 420 | woebegone: [threat] 421 | woeful: [threat] 422 | worsening: [threat] 423 | wound: [threat] 424 | decapitate: [threat] 425 | harassment: [threat] 426 | attack: [threat] 427 | attacks: [threat] 428 | attacking: [threat] 429 | attacked: [threat] 430 | atrocity: [threat] 431 | atrocities: [threat] 432 | warns: [threat] 433 | militants: [threat] 434 | suffer: [threat] 435 | warn: [threat] 436 | horrific: [threat] 437 | outrage: [threat] -------------------------------------------------------------------------------- /ThreatDetectionSentimentAnalysis/Dicts/threatWords.yml: -------------------------------------------------------------------------------- 1 | abduct: [threat] 2 | abhorrent: [threat] 3 | abuse: [threat] 4 | acute: [threat] 5 | afflictive: [threat] 6 | agony: [threat] 7 | airstrike: [threat] 8 | alarming: [threat] 9 | AlQaeda: [threat] 10 | angry: [threat] 11 | annihilation: [threat] 12 | antipathetic: [threat] 13 | army: [threat] 14 | assailing: [threat] 15 | assassination: [threat] 16 | assault: [threat] 17 | at hand: [threat] 18 | atrocious: [threat] 19 | attacking: [threat] 20 | averse. Fatal: [threat] 21 | awful: [threat] 22 | bad: [threat] 23 | baleful: [threat] 24 | baneful: [threat] 25 | barbaric: [threat] 26 | bellicose: [threat] 27 | beyond recall: [threat] 28 | bleak: [threat] 29 | boko: [threat] 30 | bomb: [threat] 31 | bomber: [threat] 32 | bombing: [threat] 33 | bound for: [threat] 34 | breakneck: [threat] 35 | brewing: [threat] 36 | browbeating: [threat] 37 | bullet: [threat] 38 | bullying: [threat] 39 | burning: [threat] 40 | butchery: [threat] 41 | Calamitous: [threat] 42 | calamitous: [threat] 43 | captive: [threat] 44 | carcinogenic: [threat] 45 | certain: [threat] 46 | cessation: [threat] 47 | chancy: [threat] 48 | clamorous: [threat] 49 | closed: [threat] 50 | combative: [threat] 51 | coming: [threat] 52 | Commination: [threat] 53 | compelled: [threat] 54 | compulsory: [threat] 55 | condemned: [threat] 56 | consequential: [threat] 57 | consternation: [threat] 58 | constraint: [threat] 59 | contentious: [threat] 60 | creepy: [threat] 61 | critical: [threat] 62 | crucial: [threat] 63 | Cruel: [threat] 64 | cunt: [threat] 65 | cynical: [threat] 66 | Daesh: [threat] 67 | dangerous: [threat] 68 | dangersome: [threat] 69 | dark: [threat] 70 | daunting: [threat] 71 | deadly: [threat] 72 | death: [threat] 73 | Death: [threat] 74 | defiance: [threat] 75 | dejected: [threat] 76 | deleterious: [threat] 77 | delicate: [threat] 78 | demoralized: [threat] 79 | depressing: [threat] 80 | depressive: [threat] 81 | designed: [threat] 82 | despairing: [threat] 83 | desperate: [threat] 84 | despondent: [threat] 85 | destined: [threat] 86 | destruction: [threat] 87 | destructive: [threat] 88 | detainee: [threat] 89 | detrimental: [threat] 90 | die: [threat] 91 | dire: [threat] 92 | directed: [threat] 93 | direful: [threat] 94 | disagreeable: [threat] 95 | disaster: [threat] 96 | disconsolate: [threat] 97 | discouraging: [threat] 98 | disgusting: [threat] 99 | dismal: [threat] 100 | dismay: [threat] 101 | dispiriting: [threat] 102 | disruptive: [threat] 103 | distressing: [threat] 104 | disturbing: [threat] 105 | doleful: [threat] 106 | doomed: [threat] 107 | dour: [threat] 108 | downhearted: [threat] 109 | dread: [threat] 110 | dreadful: [threat] 111 | dreary: [threat] 112 | drone: [threat] 113 | duress: [threat] 114 | dying: [threat] 115 | dynamite: [threat] 116 | eerie: [threat] 117 | encroaching: [threat] 118 | Endemic: [threat] 119 | enraged: [threat] 120 | eradication: [threat] 121 | evil: [threat] 122 | executing: [threat] 123 | execution: [threat] 124 | exigent: [threat] 125 | exposed: [threat] 126 | extermination: [threat] 127 | extinction: [threat] 128 | extirpation: [threat] 129 | extreme: [threat] 130 | fatal: [threat] 131 | fated in near future: [threat] 132 | fell: [threat] 133 | fierce: [threat] 134 | fighter: [threat] 135 | fighting: [threat] 136 | filicide: [threat] 137 | fired: [threat] 138 | forbidding: [threat] 139 | force: [threat] 140 | foreboding: [threat] 141 | foreordained: [threat] 142 | foreshadowing: [threat] 143 | forlorn: [threat] 144 | formidable: [threat] 145 | forthcoming: [threat] 146 | fratricide: [threat] 147 | fright. Hostage: [threat] 148 | frightening: [threat] 149 | frowning: [threat] 150 | fulmination: [threat] 151 | funereal: [threat] 152 | furious: [threat] 153 | futile: [threat] 154 | ghoulish: [threat] 155 | gloomy: [threat] 156 | glowering: [threat] 157 | glum: [threat] 158 | gone: [threat] 159 | goner: [threat] 160 | grave: [threat] 161 | grievous: [threat] 162 | grim: [threat] 163 | gruesome: [threat] 164 | gun: [threat] 165 | gunman: [threat] 166 | hair-raising: [threat] 167 | hairy: [threat] 168 | hanging over: [threat] 169 | Haram: [threat] 170 | haram: [threat] 171 | isis: [threat] 172 | harmful: [threat] 173 | hawkish: [threat] 174 | hazard: [threat] 175 | hazardous: [threat] 176 | heavy: [threat] 177 | helpless: [threat] 178 | homicidal: [threat] 179 | homicide: [threat] 180 | hopeless: [threat] 181 | horrible: [threat] 182 | hostile: [threat] 183 | hot: [threat] 184 | hurtful: [threat] 185 | ill-fated: [threat] 186 | impending: [threat] 187 | imperil: [threat] 188 | implacable: [threat] 189 | important: [threat] 190 | impossible: [threat] 191 | impracticable: [threat] 192 | impregnable: [threat] 193 | in despair: [threat] 194 | in prospect: [threat] 195 | in store: [threat] 196 | in the cards: [threat] 197 | in the wind: [threat] 198 | incurable: [threat] 199 | ineluctable: [threat] 200 | inescapable: [threat] 201 | inevitable: [threat] 202 | inexorable: [threat] 203 | infanticide: [threat] 204 | infectious: [threat] 205 | injure: [threat] 206 | injurious: [threat] 207 | inoculable: [threat] 208 | insecure: [threat] 209 | instant: [threat] 210 | intended: [threat] 211 | internecine: [threat] 212 | Intimidation: [threat] 213 | intimidation: [threat] 214 | intruding: [threat] 215 | intrusive: [threat] 216 | invading: [threat] 217 | Irreconcilable: [threat] 218 | irredeemable: [threat] 219 | irreparable: [threat] 220 | irreversible: [threat] 221 | irrevocable: [threat] 222 | IS: [threat] 223 | ISIL: [threat] 224 | ISIS: [threat] 225 | Islamic State: [threat] 226 | itching: [threat] 227 | itchy: [threat] 228 | jeopardise. Demise: [threat] 229 | jeopardous: [threat] 230 | jihadi: [threat] 231 | jihadist: [threat] 232 | kill: [threat] 233 | killing: [threat] 234 | Killing: [threat] 235 | lethal: [threat] 236 | life-and-death: [threat] 237 | liquidation: [threat] 238 | loaded: [threat] 239 | looming: [threat] 240 | lost: [threat] 241 | lugubrious: [threat] 242 | macabre: [threat] 243 | major: [threat] 244 | Malevolent: [threat] 245 | malevolent: [threat] 246 | malicious: [threat] 247 | malignant: [threat] 248 | maligned: [threat] 249 | manslaughter: [threat] 250 | martial: [threat] 251 | massacre: [threat] 252 | matricide: [threat] 253 | meant: [threat] 254 | menace: [threat] 255 | menacing: [threat] 256 | mephitic: [threat] 257 | militant: [threat] 258 | militants: [threat] 259 | militia: [threat] 260 | militiamen: [threat] 261 | missile: [threat] 262 | momentous: [threat] 263 | mortal: [threat] 264 | mortiferous: [threat] 265 | mournful: [threat] 266 | Murder: [threat] 267 | nasty: [threat] 268 | near: [threat] 269 | nightmarish: [threat] 270 | no-win: [threat] 271 | nocuous: [threat] 272 | noxious: [threat] 273 | nuclear missile: [threat] 274 | obliteration: [threat] 275 | odious: [threat] 276 | of great consequence: [threat] 277 | off-putting: [threat] 278 | offensive: [threat] 279 | ominous: [threat] 280 | on collision course: [threat] 281 | oppressive: [threat] 282 | ordained: [threat] 283 | overhanging: [threat] 284 | panic: [threat] 285 | parlous: [threat] 286 | past hope: [threat] 287 | patricide: [threat] 288 | peril: [threat] 289 | perilous: [threat] 290 | pernicious: [threat] 291 | persuasion: [threat] 292 | pessimistic: [threat] 293 | pestiferous: [threat] 294 | pestilential: [threat] 295 | pistol: [threat] 296 | pointless: [threat] 297 | poisonous: [threat] 298 | portend: [threat] 299 | portentous: [threat] 300 | precarious: [threat] 301 | predesigned: [threat] 302 | predestined: [threat] 303 | predetermined: [threat] 304 | presage: [threat] 305 | pressing: [threat] 306 | prisoner: [threat] 307 | pugnacious: [threat] 308 | quarrelsome: [threat] 309 | que sera sera: [threat] 310 | queasy: [threat] 311 | rampage: [threat] 312 | rancorous: [threat] 313 | rapacious: [threat] 314 | rape: [threat] 315 | regicide: [threat] 316 | relentless: [threat] 317 | remorseless: [threat] 318 | repellent: [threat] 319 | repulsive: [threat] 320 | resentful: [threat] 321 | restraint: [threat] 322 | rifle: [threat] 323 | risk: [threat] 324 | risky: [threat] 325 | rocket: [threat] 326 | ruinous: [threat] 327 | ruthless: [threat] 328 | sad: [threat] 329 | savage: [threat] 330 | scary: [threat] 331 | scowling: [threat] 332 | sealed: [threat] 333 | serious: [threat] 334 | serpentine: [threat] 335 | settled: [threat] 336 | severe: [threat] 337 | shaky: [threat] 338 | shoot: [threat] 339 | shot: [threat] 340 | shot down: [threat] 341 | shotgun: [threat] 342 | shuddersome: [threat] 343 | significant: [threat] 344 | sinister: [threat] 345 | slaughter: [threat] 346 | slaying. Misery: [threat] 347 | sombre: [threat] 348 | sororicidal: [threat] 349 | sour: [threat] 350 | speculative: [threat] 351 | spiteful: [threat] 352 | stab: [threat] 353 | stated: [threat] 354 | strong-arm tactic: [threat] 355 | suicide bombers: [threat] 356 | sulky: [threat] 357 | sullen: [threat] 358 | sunk: [threat] 359 | tepidity: [threat] 360 | terminal: [threat] 361 | termination: [threat] 362 | terrible: [threat] 363 | terrifying: [threat] 364 | Terror: [threat] 365 | terror: [threat] 366 | terrorism: [threat] 367 | terrorist: [threat] 368 | that is to be: [threat] 369 | that will be: [threat] 370 | thorny: [threat] 371 | threat: [threat] 372 | threatening: [threat] 373 | ticklish: [threat] 374 | to come: [threat] 375 | torture: [threat] 376 | touch-and-go: [threat] 377 | touchy: [threat] 378 | tough: [threat] 379 | toxic: [threat] 380 | tragic: [threat] 381 | treacherous: [threat] 382 | trepidation: [threat] 383 | troubling: [threat] 384 | ugly: [threat] 385 | ultimatum: [threat] 386 | unachievable: [threat] 387 | unapproachable: [threat] 388 | unavailing: [threat] 389 | unavoidable: [threat] 390 | unfortunate: [threat] 391 | unfriendly: [threat] 392 | unhealthy: [threat] 393 | unmitigable: [threat] 394 | unpleasant: [threat] 395 | unrelenting: [threat] 396 | unsafe: [threat] 397 | unstable: [threat] 398 | up the creek: [threat] 399 | upset: [threat] 400 | urgent: [threat] 401 | useless: [threat] 402 | uxoricide: [threat] 403 | vain: [threat] 404 | venomous: [threat] 405 | vicious: [threat] 406 | victim: [threat] 407 | vindictive: [threat] 408 | violence: [threat] 409 | viperous: [threat] 410 | virulent: [threat] 411 | vital: [threat] 412 | vulnerable: [threat] 413 | war: [threat] 414 | warlike: [threat] 415 | warning: [threat] 416 | weapon: [threat] 417 | weighty: [threat] 418 | weird: [threat] 419 | wicked: [threat] 420 | woebegone: [threat] 421 | woeful: [threat] 422 | worsening: [threat] 423 | wound: [threat] 424 | decapitate: [threat] 425 | harassment: [threat] 426 | attack: [threat] 427 | attacks: [threat] 428 | attacking: [threat] 429 | attacked: [threat] 430 | atrocity: [threat] 431 | atrocities: [threat] 432 | warns: [threat] 433 | militants: [threat] 434 | suffer: [threat] 435 | warn: [threat] 436 | horrific: [threat] 437 | outrage: [threat] -------------------------------------------------------------------------------- /op.py: -------------------------------------------------------------------------------- 1 | from pprint import pprint 2 | import json 3 | import csv 4 | import numpy as np 5 | from pprint import pprint 6 | import nltk 7 | import yaml 8 | import sys 9 | import os 10 | import re 11 | 12 | class Splitter(object): 13 | 14 | def __init__(self): 15 | self.nltk_splitter = nltk.data.load('tokenizers/punkt/english.pickle') 16 | self.nltk_tokenizer = nltk.tokenize.TreebankWordTokenizer() 17 | 18 | def split(self, text): 19 | sentences = self.nltk_splitter.tokenize(text) 20 | tokenized_sentences = [self.nltk_tokenizer.tokenize(sent) for sent in sentences] 21 | return tokenized_sentences 22 | 23 | 24 | class POSTagger(object): 25 | 26 | def __init__(self): 27 | pass 28 | 29 | def pos_tag(self, sentences): 30 | pos = [nltk.pos_tag(sentence) for sentence in sentences] 31 | #adapt format 32 | pos = [[(word, word, [postag]) for (word, postag) in sentence] for sentence in pos] 33 | return pos 34 | 35 | class DictionaryTagger(object): 36 | 37 | def __init__(self, dictionary_paths): 38 | files = [open(path, 'r') for path in dictionary_paths] 39 | dictionaries = [yaml.load(dict_file) for dict_file in files] 40 | map(lambda x: x.close(), files) 41 | self.dictionary = {} 42 | self.max_key_size = 0 43 | for curr_dict in dictionaries: 44 | for key in curr_dict: 45 | if key in self.dictionary: 46 | self.dictionary[key].extend(curr_dict[key]) 47 | else: 48 | self.dictionary[key] = curr_dict[key] 49 | self.max_key_size = max(self.max_key_size, len(key)) 50 | 51 | def tag(self, postagged_sentences): 52 | return [self.tag_sentence(sentence) for sentence in postagged_sentences] 53 | 54 | def tag_sentence(self, sentence, tag_with_lemmas=False): 55 | tag_sentence = [] 56 | N = len(sentence) 57 | if self.max_key_size == 0: 58 | self.max_key_size = N 59 | i = 0 60 | while (i < N): 61 | j = min(i + self.max_key_size, N) #avoid overflow 62 | tagged = False 63 | while (j > i): 64 | expression_form = ' '.join([word[0] for word in sentence[i:j]]).lower() 65 | expression_lemma = ' '.join([word[1] for word in sentence[i:j]]).lower() 66 | if tag_with_lemmas: 67 | literal = expression_lemma 68 | else: 69 | literal = expression_form 70 | if literal in self.dictionary: 71 | #self.logger.debug("found: %s" % literal) 72 | is_single_token = j - i == 1 73 | original_position = i 74 | i = j 75 | taggings = [tag for tag in self.dictionary[literal]] 76 | tagged_expression = (expression_form, expression_lemma, taggings) 77 | if is_single_token: #if the tagged literal is a single token, conserve its previous taggings: 78 | original_token_tagging = sentence[original_position][2] 79 | tagged_expression[2].extend(original_token_tagging) 80 | tag_sentence.append(tagged_expression) 81 | tagged = True 82 | else: 83 | j = j - 1 84 | if not tagged: 85 | tag_sentence.append(sentence[i]) 86 | i += 1 87 | return tag_sentence 88 | 89 | def value_of(sentiment): 90 | if sentiment == 'positive': return -1 91 | if sentiment == 'negative': return 1 92 | return 0 93 | 94 | def value_of_threat(sentiment): 95 | if sentiment == 'positive': return -1 96 | if sentiment == 'negative': return 1 97 | if sentiment == 'threat': return 1 98 | return 0 99 | 100 | def sentence_score(sentence_tokens, previous_token, acum_score): 101 | if not sentence_tokens: 102 | return acum_score 103 | else: 104 | current_token = sentence_tokens[0] 105 | tags = current_token[2] 106 | token_score = sum([value_of(tag) for tag in tags]) 107 | if previous_token is not None: 108 | previous_tags = previous_token[2] 109 | if 'inc' in previous_tags: 110 | token_score *= 2.0 111 | elif 'dec' in previous_tags: 112 | token_score /= 2.0 113 | elif 'inv' in previous_tags: 114 | token_score *= -1.0 115 | return sentence_score(sentence_tokens[1:], current_token, acum_score + token_score) 116 | 117 | def threatening_score(sentence_tokens, previous_token, acum_score): 118 | if not sentence_tokens: 119 | return acum_score 120 | else: 121 | current_token = sentence_tokens[0] 122 | tags = current_token[2] 123 | token_score = sum([value_of_threat(tag) for tag in tags]) 124 | if previous_token is not None: 125 | previous_tags = previous_token[2] 126 | if 'inc' in previous_tags: 127 | token_score *= 2.0 128 | elif 'dec' in previous_tags: 129 | token_score /= 2.0 130 | elif 'inv' in previous_tags: 131 | token_score *= -1.0 132 | return threatening_score(sentence_tokens[1:], current_token, acum_score + token_score) 133 | 134 | def threat_score(review): 135 | return sum([threatening_score(sentence, None, 0.0) for sentence in review]) 136 | def sentiment_score(review): 137 | return sum([sentence_score(sentence, None, 0.0) for sentence in review]) 138 | 139 | 140 | class BigramTrigram(object): 141 | 142 | """ 143 | This method searches each biagram and trigram in dictionaries and calculate threat score 144 | :return: ThreatCount, PositiveCount, NegativeCount 145 | """ 146 | def countThreat(self, sentence): 147 | dictTag = ['Dicts/threatWords.yml', 'Dicts/Positive.yml', 'Dicts/Decreasing.yml', 'Dicts/Inverting.yml'] 148 | files = [open(path, 'r') for path in dictTag] 149 | dictionaries = [yaml.load(dict_file) for dict_file in files] 150 | 151 | tokens = nltk.word_tokenize(sentence) 152 | #print(tokens) 153 | bigrams = [" ".join(pair) for pair in nltk.bigrams(tokens)] 154 | #print(bigrams) 155 | trigrams = [" ".join(trio) for trio in nltk.trigrams(tokens)] 156 | #print(trigrams) 157 | 158 | bigramThreatCount = 0 159 | bigramPositiveCount = 0 160 | bigramNegativeCount = 0 161 | 162 | trioThreatCount = 0 163 | trioPositiveCount = 0 164 | trioNegativeCount = 0 165 | 166 | for bigram in bigrams: 167 | for dictionary in dictionaries: 168 | if bigram in dictionary and dictionary[bigram] == ['threat']: 169 | bigramThreatCount += 1 170 | if bigram in dictionary and dictionary[bigram] == ['positive']: 171 | bigramPositiveCount += 1 172 | if bigram in dictionary and dictionary[bigram] == ['negative']: 173 | bigramNegativeCount += 1 174 | 175 | for trigram in trigrams: 176 | for dictionary in dictionaries: 177 | if trigram in dictionary and dictionary[trigram] == ['threat']: 178 | trioThreatCount += 1 179 | if trigram in dictionary and dictionary[trigram] == ['positive']: 180 | trioPositiveCount += 1 181 | if trigram in dictionary and dictionary[trigram] == ['negative']: 182 | trioNegativeCount += 1 183 | 184 | threatCount = bigramThreatCount + trioThreatCount 185 | positiveCount = bigramPositiveCount + trioPositiveCount 186 | negativeCount = bigramNegativeCount + trioNegativeCount 187 | return threatCount, positiveCount, negativeCount 188 | 189 | 190 | 191 | 192 | 193 | if __name__ == "__main__": 194 | 195 | with open ('testinputdata.txt','w') as f: 196 | with open("testdata.txt") as file: 197 | reader = csv.reader(file) 198 | for row in reader: 199 | #print(row) 200 | text = ''.join(row) 201 | new_text = text.replace(',', '') 202 | new_text = new_text.replace('.', '') 203 | #pprint(new_text) 204 | output_json = json.load(open('threat.json')) 205 | threat_scores = 0.0 206 | number_of_threat = 0 207 | for word in new_text.split(): 208 | for majorkey, subdict in output_json.iteritems(): 209 | if word == majorkey: 210 | #print(subdict) 211 | threat_scores+=float(subdict) 212 | number_of_threat+=1 213 | #pprint(threat_score) 214 | #pprint(number_of_threat) 215 | 216 | charLength = float(len(text)) 217 | wordLength = float(len(text.split())) 218 | averages = float(charLength/wordLength) 219 | #pprint('Character Length-> %d'%charLength) 220 | #pprint('Word Length-> %d'%wordLength) 221 | 222 | splitter = Splitter() 223 | postagger = POSTagger() 224 | bigramtagger = BigramTrigram() 225 | 226 | dicttagger = DictionaryTagger([ 'Dicts/threatWords.yml','Dicts/Positive.yml', 'Dicts/Negative.yml', 227 | 'Dicts/Increasing.yml', 'Dicts/Decreasing.yml', 'Dicts/Inverting.yml']) 228 | 229 | 230 | splitted_sentences = splitter.split(text) 231 | 232 | pos_tagged_sentences = postagger.pos_tag(splitted_sentences) 233 | 234 | dict_tagged_sentences = dicttagger.tag(pos_tagged_sentences) 235 | 236 | #print("analyzing sentiment...") 237 | sentimentscore = sentiment_score(dict_tagged_sentences) 238 | 239 | dicttagger1 = DictionaryTagger([ 'Dicts/threatWords.yml','Dicts/Positive.yml', 'Dicts/Negative.yml', 240 | 'Dicts/Increasing.yml', 'Dicts/Decreasing.yml', 'Dicts/Inverting.yml']) 241 | 242 | splitted_sentences1 = splitter.split(text) 243 | 244 | pos_tagged_sentences1 = postagger.pos_tag(splitted_sentences1) 245 | 246 | dict_tagged_sentences1 = dicttagger1.tag(pos_tagged_sentences1) 247 | 248 | threatCounts = bigramtagger.countThreat(text) 249 | #return: threat, positive, negative 250 | 251 | #print("threat tuple ",threatCounts) 252 | #print("analyzing sentiment...") 253 | 254 | #print("analyzing threat...") 255 | threatscore = threat_score(dict_tagged_sentences1) 256 | 257 | finalScore = threatscore+threatCounts[2]+threatCounts[0]-threatCounts[1] 258 | #print(finalScore) 259 | 260 | average = number_of_threat/wordLength 261 | #print(threatscore) 262 | 263 | system = sentimentscore, averages , finalScore, number_of_threat, average 264 | f.write(str(system)+'\n') 265 | 266 | 267 | 268 | 269 | -------------------------------------------------------------------------------- /ip.py: -------------------------------------------------------------------------------- 1 | from pprint import pprint 2 | import json 3 | import csv 4 | import numpy as np 5 | from pprint import pprint 6 | import nltk 7 | import yaml 8 | import sys 9 | import os 10 | import re 11 | 12 | class Splitter(object): 13 | 14 | def __init__(self): 15 | self.nltk_splitter = nltk.data.load('tokenizers/punkt/english.pickle') 16 | self.nltk_tokenizer = nltk.tokenize.TreebankWordTokenizer() 17 | 18 | def split(self, text): 19 | sentences = self.nltk_splitter.tokenize(text) 20 | tokenized_sentences = [self.nltk_tokenizer.tokenize(sent) for sent in sentences] 21 | return tokenized_sentences 22 | 23 | 24 | class POSTagger(object): 25 | 26 | def __init__(self): 27 | pass 28 | 29 | def pos_tag(self, sentences): 30 | pos = [nltk.pos_tag(sentence) for sentence in sentences] 31 | #adapt format 32 | pos = [[(word, word, [postag]) for (word, postag) in sentence] for sentence in pos] 33 | return pos 34 | 35 | class DictionaryTagger(object): 36 | 37 | def __init__(self, dictionary_paths): 38 | files = [open(path, 'r') for path in dictionary_paths] 39 | dictionaries = [yaml.load(dict_file) for dict_file in files] 40 | map(lambda x: x.close(), files) 41 | self.dictionary = {} 42 | self.max_key_size = 0 43 | for curr_dict in dictionaries: 44 | for key in curr_dict: 45 | if key in self.dictionary: 46 | self.dictionary[key].extend(curr_dict[key]) 47 | else: 48 | self.dictionary[key] = curr_dict[key] 49 | self.max_key_size = max(self.max_key_size, len(key)) 50 | 51 | def tag(self, postagged_sentences): 52 | return [self.tag_sentence(sentence) for sentence in postagged_sentences] 53 | 54 | def tag_sentence(self, sentence, tag_with_lemmas=False): 55 | tag_sentence = [] 56 | N = len(sentence) 57 | if self.max_key_size == 0: 58 | self.max_key_size = N 59 | i = 0 60 | while (i < N): 61 | j = min(i + self.max_key_size, N) #avoid overflow 62 | tagged = False 63 | while (j > i): 64 | expression_form = ' '.join([word[0] for word in sentence[i:j]]).lower() 65 | expression_lemma = ' '.join([word[1] for word in sentence[i:j]]).lower() 66 | if tag_with_lemmas: 67 | literal = expression_lemma 68 | else: 69 | literal = expression_form 70 | if literal in self.dictionary: 71 | #self.logger.debug("found: %s" % literal) 72 | is_single_token = j - i == 1 73 | original_position = i 74 | i = j 75 | taggings = [tag for tag in self.dictionary[literal]] 76 | tagged_expression = (expression_form, expression_lemma, taggings) 77 | if is_single_token: #if the tagged literal is a single token, conserve its previous taggings: 78 | original_token_tagging = sentence[original_position][2] 79 | tagged_expression[2].extend(original_token_tagging) 80 | tag_sentence.append(tagged_expression) 81 | tagged = True 82 | else: 83 | j = j - 1 84 | if not tagged: 85 | tag_sentence.append(sentence[i]) 86 | i += 1 87 | return tag_sentence 88 | 89 | def value_of(sentiment): 90 | if sentiment == 'positive': return -1 91 | if sentiment == 'negative': return 1 92 | return 0 93 | 94 | def value_of_threat(sentiment): 95 | if sentiment == 'positive': return -1 96 | if sentiment == 'negative': return 1 97 | if sentiment == 'threat': return 1 98 | return 0 99 | 100 | def sentence_score(sentence_tokens, previous_token, acum_score): 101 | if not sentence_tokens: 102 | return acum_score 103 | else: 104 | current_token = sentence_tokens[0] 105 | tags = current_token[2] 106 | token_score = sum([value_of(tag) for tag in tags]) 107 | if previous_token is not None: 108 | previous_tags = previous_token[2] 109 | if 'inc' in previous_tags: 110 | token_score *= 2.0 111 | elif 'dec' in previous_tags: 112 | token_score /= 2.0 113 | elif 'inv' in previous_tags: 114 | token_score *= -1.0 115 | return sentence_score(sentence_tokens[1:], current_token, acum_score + token_score) 116 | 117 | def threatening_score(sentence_tokens, previous_token, acum_score): 118 | if not sentence_tokens: 119 | return acum_score 120 | else: 121 | current_token = sentence_tokens[0] 122 | tags = current_token[2] 123 | token_score = sum([value_of_threat(tag) for tag in tags]) 124 | if previous_token is not None: 125 | previous_tags = previous_token[2] 126 | if 'inc' in previous_tags: 127 | token_score *= 2.0 128 | elif 'dec' in previous_tags: 129 | token_score /= 2.0 130 | elif 'inv' in previous_tags: 131 | token_score *= -1.0 132 | return threatening_score(sentence_tokens[1:], current_token, acum_score + token_score) 133 | 134 | def threat_score(review): 135 | return sum([threatening_score(sentence, None, 0.0) for sentence in review]) 136 | def sentiment_score(review): 137 | return sum([sentence_score(sentence, None, 0.0) for sentence in review]) 138 | 139 | 140 | class BigramTrigram(object): 141 | 142 | """ 143 | This method searches each biagram and trigram in dictionaries and calculate threat score 144 | :return: ThreatCount, PositiveCount, NegativeCount 145 | """ 146 | def countThreat(self, sentence): 147 | dictTag = ['Dicts/threatWords.yml', 'Dicts/Positive.yml', 'Dicts/Decreasing.yml', 'Dicts/Inverting.yml'] 148 | files = [open(path, 'r') for path in dictTag] 149 | dictionaries = [yaml.load(dict_file) for dict_file in files] 150 | 151 | tokens = nltk.word_tokenize(sentence) 152 | #print(tokens) 153 | bigrams = [" ".join(pair) for pair in nltk.bigrams(tokens)] 154 | #print(bigrams) 155 | trigrams = [" ".join(trio) for trio in nltk.trigrams(tokens)] 156 | #print(trigrams) 157 | 158 | bigramThreatCount = 0 159 | bigramPositiveCount = 0 160 | bigramNegativeCount = 0 161 | 162 | trioThreatCount = 0 163 | trioPositiveCount = 0 164 | trioNegativeCount = 0 165 | 166 | for bigram in bigrams: 167 | for dictionary in dictionaries: 168 | if bigram in dictionary and dictionary[bigram] == ['threat']: 169 | bigramThreatCount += 1 170 | if bigram in dictionary and dictionary[bigram] == ['positive']: 171 | bigramPositiveCount += 1 172 | if bigram in dictionary and dictionary[bigram] == ['negative']: 173 | bigramNegativeCount += 1 174 | 175 | for trigram in trigrams: 176 | for dictionary in dictionaries: 177 | if trigram in dictionary and dictionary[trigram] == ['threat']: 178 | trioThreatCount += 1 179 | if trigram in dictionary and dictionary[trigram] == ['positive']: 180 | trioPositiveCount += 1 181 | if trigram in dictionary and dictionary[trigram] == ['negative']: 182 | trioNegativeCount += 1 183 | 184 | threatCount = bigramThreatCount + trioThreatCount 185 | positiveCount = bigramPositiveCount + trioPositiveCount 186 | negativeCount = bigramNegativeCount + trioNegativeCount 187 | return threatCount, positiveCount, negativeCount 188 | 189 | 190 | 191 | 192 | if __name__ == "__main__": 193 | 194 | with open ('inputparams.txt','w') as f: #create new file for writing input parameters 195 | with open("Input.txt") as file: #read input file 196 | reader = csv.reader(file) 197 | for row in reader: 198 | #print(row) 199 | text = ''.join(row) 200 | new_text = text.replace(',', '') 201 | new_text = new_text.replace('.', '') 202 | #pprint(new_text) 203 | output_json = json.load(open('threat.json')) #read the threat.json file 204 | threat_scores = 0.0 205 | number_of_threat = 0 206 | for word in new_text.split(): #check if word is in threat.json file 207 | for majorkey, subdict in output_json.iteritems(): 208 | if word == majorkey: 209 | #print(subdict) 210 | threat_scores+=float(subdict) 211 | number_of_threat+=1 212 | #pprint(threat_score) 213 | #pprint(number_of_threat) 214 | 215 | if threat_scores >= 0.8: #assign expected output value of the input sentences 216 | output = 1 217 | else: 218 | output = 0 219 | charLength = float(len(text)) 220 | wordLength = float(len(text.split())) 221 | averages = float(charLength/wordLength) 222 | #pprint('Character Length-> %d'%charLength) 223 | #pprint('Word Length-> %d'%wordLength) 224 | 225 | """ Tag the words as threat and calulate threat score""" 226 | splitter = Splitter() 227 | postagger = POSTagger() 228 | bigramtagger = BigramTrigram() 229 | 230 | dicttagger = DictionaryTagger([ 'Dicts/threatWords.yml','Dicts/Positive.yml', 'Dicts/Negative.yml', 231 | 'Dicts/Increasing.yml', 'Dicts/Decreasing.yml', 'Dicts/Inverting.yml']) 232 | 233 | 234 | splitted_sentences = splitter.split(text) 235 | 236 | pos_tagged_sentences = postagger.pos_tag(splitted_sentences) 237 | 238 | dict_tagged_sentences = dicttagger.tag(pos_tagged_sentences) 239 | 240 | #print("analyzing sentiment...") 241 | sentimentscore = sentiment_score(dict_tagged_sentences) 242 | 243 | dicttagger1 = DictionaryTagger([ 'Dicts/threatWords.yml','Dicts/Positive.yml', 'Dicts/Negative.yml', 244 | 'Dicts/Increasing.yml', 'Dicts/Decreasing.yml', 'Dicts/Inverting.yml']) 245 | 246 | splitted_sentences1 = splitter.split(text) 247 | 248 | pos_tagged_sentences1 = postagger.pos_tag(splitted_sentences1) 249 | 250 | dict_tagged_sentences1 = dicttagger1.tag(pos_tagged_sentences1) 251 | 252 | threatCounts = bigramtagger.countThreat(text) 253 | #return: threat, positive, negative 254 | 255 | #print("threat tuple ",threatCounts) 256 | #print("analyzing sentiment...") 257 | 258 | 259 | 260 | #print("analyzing threat...") 261 | threatscore = threat_score(dict_tagged_sentences1) 262 | 263 | finalScore = threatscore+threatCounts[2]+threatCounts[0]-threatCounts[1] 264 | #print(finalScore) 265 | 266 | average = number_of_threat/wordLength 267 | #print(threatscore) 268 | """form the input parameters for the NN""" 269 | system = sentimentscore, averages , finalScore, number_of_threat, average, output 270 | #write the value system in the file 271 | f.write(str(system)+'\n') 272 | 273 | 274 | 275 | 276 | -------------------------------------------------------------------------------- /Dicts/Positive.yml: -------------------------------------------------------------------------------- 1 | abound: [positive] 2 | abounds: [positive] 3 | abundance: [positive] 4 | abundant: [positive] 5 | accessable: [positive] 6 | accessible: [positive] 7 | acclaim: [positive] 8 | acclaimed: [positive] 9 | acclamation: [positive] 10 | accolade: [positive] 11 | accolades: [positive] 12 | accommodative: [positive] 13 | accomodative: [positive] 14 | accomplish: [positive] 15 | accomplished: [positive] 16 | accomplishment: [positive] 17 | accomplishments: [positive] 18 | accurate: [positive] 19 | accurately: [positive] 20 | achievable: [positive] 21 | achievement: [positive] 22 | achievements: [positive] 23 | achievible: [positive] 24 | acumen: [positive] 25 | adaptable: [positive] 26 | adaptive: [positive] 27 | adequate: [positive] 28 | adjustable: [positive] 29 | admirable: [positive] 30 | admirably: [positive] 31 | admiration: [positive] 32 | admire: [positive] 33 | admirer: [positive] 34 | admiring: [positive] 35 | admiringly: [positive] 36 | adorable: [positive] 37 | adore: [positive] 38 | adored: [positive] 39 | adorer: [positive] 40 | adoring: [positive] 41 | adoringly: [positive] 42 | adroit: [positive] 43 | adroitly: [positive] 44 | adulate: [positive] 45 | adulation: [positive] 46 | adulatory: [positive] 47 | advanced: [positive] 48 | advantage: [positive] 49 | advantageous: [positive] 50 | advantageously: [positive] 51 | advantages: [positive] 52 | adventuresome: [positive] 53 | adventurous: [positive] 54 | advocate: [positive] 55 | advocated: [positive] 56 | advocates: [positive] 57 | affability: [positive] 58 | affable: [positive] 59 | affably: [positive] 60 | affectation: [positive] 61 | affection: [positive] 62 | affectionate: [positive] 63 | affinity: [positive] 64 | affirm: [positive] 65 | affirmation: [positive] 66 | affirmative: [positive] 67 | affluence: [positive] 68 | affluent: [positive] 69 | afford: [positive] 70 | affordable: [positive] 71 | affordably: [positive] 72 | afordable: [positive] 73 | agile: [positive] 74 | agilely: [positive] 75 | agility: [positive] 76 | agreeable: [positive] 77 | agreeableness: [positive] 78 | agreeably: [positive] 79 | all-around: [positive] 80 | alluring: [positive] 81 | alluringly: [positive] 82 | altruistic: [positive] 83 | altruistically: [positive] 84 | amaze: [positive] 85 | amazed: [positive] 86 | amazement: [positive] 87 | amazes: [positive] 88 | amazing: [positive] 89 | amazingly: [positive] 90 | ambitious: [positive] 91 | ambitiously: [positive] 92 | ameliorate: [positive] 93 | amenable: [positive] 94 | amenity: [positive] 95 | amiability: [positive] 96 | amiabily: [positive] 97 | amiable: [positive] 98 | amicability: [positive] 99 | amicable: [positive] 100 | amicably: [positive] 101 | amity: [positive] 102 | ample: [positive] 103 | amply: [positive] 104 | amuse: [positive] 105 | amusing: [positive] 106 | amusingly: [positive] 107 | angel: [positive] 108 | angelic: [positive] 109 | apotheosis: [positive] 110 | appeal: [positive] 111 | appealing: [positive] 112 | applaud: [positive] 113 | appreciable: [positive] 114 | appreciate: [positive] 115 | appreciated: [positive] 116 | appreciates: [positive] 117 | appreciative: [positive] 118 | appreciatively: [positive] 119 | appropriate: [positive] 120 | approval: [positive] 121 | approve: [positive] 122 | ardent: [positive] 123 | ardently: [positive] 124 | ardor: [positive] 125 | articulate: [positive] 126 | aspiration: [positive] 127 | aspirations: [positive] 128 | aspire: [positive] 129 | assurance: [positive] 130 | assurances: [positive] 131 | assure: [positive] 132 | assuredly: [positive] 133 | assuring: [positive] 134 | astonish: [positive] 135 | astonished: [positive] 136 | astonishing: [positive] 137 | astonishingly: [positive] 138 | astonishment: [positive] 139 | astound: [positive] 140 | astounded: [positive] 141 | astounding: [positive] 142 | astoundingly: [positive] 143 | astutely: [positive] 144 | attentive: [positive] 145 | attraction: [positive] 146 | attractive: [positive] 147 | attractively: [positive] 148 | attune: [positive] 149 | audible: [positive] 150 | audibly: [positive] 151 | auspicious: [positive] 152 | authentic: [positive] 153 | authoritative: [positive] 154 | autonomous: [positive] 155 | available: [positive] 156 | aver: [positive] 157 | avid: [positive] 158 | avidly: [positive] 159 | award: [positive] 160 | awarded: [positive] 161 | awards: [positive] 162 | awe: [positive] 163 | awed: [positive] 164 | awesome: [positive] 165 | awesomely: [positive] 166 | awesomeness: [positive] 167 | awestruck: [positive] 168 | awsome: [positive] 169 | backbone: [positive] 170 | balanced: [positive] 171 | bargain: [positive] 172 | beauteous: [positive] 173 | beautiful: [positive] 174 | beautifullly: [positive] 175 | beautifully: [positive] 176 | beautify: [positive] 177 | beauty: [positive] 178 | beckon: [positive] 179 | beckoned: [positive] 180 | beckoning: [positive] 181 | beckons: [positive] 182 | believable: [positive] 183 | believeable: [positive] 184 | beloved: [positive] 185 | benefactor: [positive] 186 | beneficent: [positive] 187 | beneficial: [positive] 188 | beneficially: [positive] 189 | beneficiary: [positive] 190 | benefit: [positive] 191 | benefits: [positive] 192 | benevolence: [positive] 193 | benevolent: [positive] 194 | benifits: [positive] 195 | best: [positive] 196 | best-known: [positive] 197 | best-performing: [positive] 198 | best-selling: [positive] 199 | better: [positive] 200 | better-known: [positive] 201 | better-than-expected: [positive] 202 | beutifully: [positive] 203 | blameless: [positive] 204 | bless: [positive] 205 | blessing: [positive] 206 | bliss: [positive] 207 | blissful: [positive] 208 | blissfully: [positive] 209 | blithe: [positive] 210 | blockbuster: [positive] 211 | bloom: [positive] 212 | blossom: [positive] 213 | bolster: [positive] 214 | bonny: [positive] 215 | bonus: [positive] 216 | bonuses: [positive] 217 | boom: [positive] 218 | booming: [positive] 219 | boost: [positive] 220 | boundless: [positive] 221 | bountiful: [positive] 222 | brainiest: [positive] 223 | brainy: [positive] 224 | brand-new: [positive] 225 | brave: [positive] 226 | bravery: [positive] 227 | bravo: [positive] 228 | breakthrough: [positive] 229 | breakthroughs: [positive] 230 | breathlessness: [positive] 231 | breathtaking: [positive] 232 | breathtakingly: [positive] 233 | breeze: [positive] 234 | bright: [positive] 235 | brighten: [positive] 236 | brighter: [positive] 237 | brightest: [positive] 238 | brilliance: [positive] 239 | brilliances: [positive] 240 | brilliant: [positive] 241 | brilliantly: [positive] 242 | brisk: [positive] 243 | brotherly: [positive] 244 | bullish: [positive] 245 | buoyant: [positive] 246 | cajole: [positive] 247 | calm: [positive] 248 | calming: [positive] 249 | calmness: [positive] 250 | capability: [positive] 251 | capable: [positive] 252 | capably: [positive] 253 | captivate: [positive] 254 | captivating: [positive] 255 | carefree: [positive] 256 | cashback: [positive] 257 | cashbacks: [positive] 258 | catchy: [positive] 259 | celebrate: [positive] 260 | celebrated: [positive] 261 | celebration: [positive] 262 | celebratory: [positive] 263 | champ: [positive] 264 | champion: [positive] 265 | charisma: [positive] 266 | charismatic: [positive] 267 | charitable: [positive] 268 | charm: [positive] 269 | charming: [positive] 270 | charmingly: [positive] 271 | chaste: [positive] 272 | cheaper: [positive] 273 | cheapest: [positive] 274 | cheer: [positive] 275 | cheerful: [positive] 276 | cheery: [positive] 277 | cherish: [positive] 278 | cherished: [positive] 279 | cherub: [positive] 280 | chic: [positive] 281 | chivalrous: [positive] 282 | chivalry: [positive] 283 | civility: [positive] 284 | civilize: [positive] 285 | clarity: [positive] 286 | classic: [positive] 287 | classy: [positive] 288 | clean: [positive] 289 | cleaner: [positive] 290 | cleanest: [positive] 291 | cleanliness: [positive] 292 | cleanly: [positive] 293 | clear: [positive] 294 | clear-cut: [positive] 295 | cleared: [positive] 296 | clearer: [positive] 297 | clearly: [positive] 298 | clears: [positive] 299 | clever: [positive] 300 | cleverly: [positive] 301 | cohere: [positive] 302 | coherence: [positive] 303 | coherent: [positive] 304 | cohesive: [positive] 305 | colorful: [positive] 306 | comely: [positive] 307 | comfort: [positive] 308 | comfortable: [positive] 309 | comfortably: [positive] 310 | comforting: [positive] 311 | comfy: [positive] 312 | commend: [positive] 313 | commendable: [positive] 314 | commendably: [positive] 315 | commitment: [positive] 316 | commodious: [positive] 317 | compact: [positive] 318 | compactly: [positive] 319 | compassion: [positive] 320 | compassionate: [positive] 321 | compatible: [positive] 322 | competitive: [positive] 323 | complement: [positive] 324 | complementary: [positive] 325 | complemented: [positive] 326 | complements: [positive] 327 | compliant: [positive] 328 | compliment: [positive] 329 | complimentary: [positive] 330 | comprehensive: [positive] 331 | conciliate: [positive] 332 | conciliatory: [positive] 333 | concise: [positive] 334 | confidence: [positive] 335 | confident: [positive] 336 | congenial: [positive] 337 | congratulate: [positive] 338 | congratulation: [positive] 339 | congratulations: [positive] 340 | congratulatory: [positive] 341 | conscientious: [positive] 342 | considerate: [positive] 343 | consistent: [positive] 344 | consistently: [positive] 345 | constructive: [positive] 346 | consummate: [positive] 347 | contentment: [positive] 348 | continuity: [positive] 349 | contrasty: [positive] 350 | contribution: [positive] 351 | convenience: [positive] 352 | convenient: [positive] 353 | conveniently: [positive] 354 | convience: [positive] 355 | convienient: [positive] 356 | convient: [positive] 357 | convincing: [positive] 358 | convincingly: [positive] 359 | cool: [positive] 360 | coolest: [positive] 361 | cooperative: [positive] 362 | cooperatively: [positive] 363 | cornerstone: [positive] 364 | correct: [positive] 365 | correctly: [positive] 366 | cost-effective: [positive] 367 | cost-saving: [positive] 368 | counter-attack: [positive] 369 | counter-attacks: [positive] 370 | courage: [positive] 371 | courageous: [positive] 372 | courageously: [positive] 373 | courageousness: [positive] 374 | courteous: [positive] 375 | courtly: [positive] 376 | covenant: [positive] 377 | cozy: [positive] 378 | creative: [positive] 379 | credence: [positive] 380 | credible: [positive] 381 | crisp: [positive] 382 | crisper: [positive] 383 | cure: [positive] 384 | cure-all: [positive] 385 | cushy: [positive] 386 | cute: [positive] 387 | cuteness: [positive] 388 | danke: [positive] 389 | danken: [positive] 390 | daring: [positive] 391 | daringly: [positive] 392 | darling: [positive] 393 | dashing: [positive] 394 | dauntless: [positive] 395 | dawn: [positive] 396 | dazzle: [positive] 397 | dazzled: [positive] 398 | dazzling: [positive] 399 | dead-cheap: [positive] 400 | dead-on: [positive] 401 | decency: [positive] 402 | decent: [positive] 403 | decisive: [positive] 404 | decisiveness: [positive] 405 | dedicated: [positive] 406 | defeat: [positive] 407 | defeated: [positive] 408 | defeating: [positive] 409 | defeats: [positive] 410 | defender: [positive] 411 | deference: [positive] 412 | deft: [positive] 413 | deginified: [positive] 414 | delectable: [positive] 415 | delicacy: [positive] 416 | delicate: [positive] 417 | delicious: [positive] 418 | delight: [positive] 419 | delighted: [positive] 420 | delightful: [positive] 421 | delightfully: [positive] 422 | delightfulness: [positive] 423 | dependable: [positive] 424 | dependably: [positive] 425 | deservedly: [positive] 426 | deserving: [positive] 427 | desirable: [positive] 428 | desiring: [positive] 429 | desirous: [positive] 430 | destiny: [positive] 431 | detachable: [positive] 432 | devout: [positive] 433 | dexterous: [positive] 434 | dexterously: [positive] 435 | dextrous: [positive] 436 | dignified: [positive] 437 | dignify: [positive] 438 | dignity: [positive] 439 | diligence: [positive] 440 | diligent: [positive] 441 | diligently: [positive] 442 | diplomatic: [positive] 443 | dirt-cheap: [positive] 444 | distinction: [positive] 445 | distinctive: [positive] 446 | distinguished: [positive] 447 | diversified: [positive] 448 | divine: [positive] 449 | divinely: [positive] 450 | dominate: [positive] 451 | dominated: [positive] 452 | dominates: [positive] 453 | dote: [positive] 454 | dotingly: [positive] 455 | doubtless: [positive] 456 | dreamland: [positive] 457 | dumbfounded: [positive] 458 | dumbfounding: [positive] 459 | dummy-proof: [positive] 460 | durable: [positive] 461 | dynamic: [positive] 462 | eager: [positive] 463 | eagerly: [positive] 464 | eagerness: [positive] 465 | earnest: [positive] 466 | earnestly: [positive] 467 | earnestness: [positive] 468 | ease: [positive] 469 | eased: [positive] 470 | eases: [positive] 471 | easier: [positive] 472 | easiest: [positive] 473 | easiness: [positive] 474 | easing: [positive] 475 | easy: [positive] 476 | easy-to-use: [positive] 477 | easygoing: [positive] 478 | ebullience: [positive] 479 | ebullient: [positive] 480 | ebulliently: [positive] 481 | ecenomical: [positive] 482 | economical: [positive] 483 | ecstasies: [positive] 484 | ecstasy: [positive] 485 | ecstatic: [positive] 486 | ecstatically: [positive] 487 | edify: [positive] 488 | educated: [positive] 489 | effective: [positive] 490 | effectively: [positive] 491 | effectiveness: [positive] 492 | effectual: [positive] 493 | efficacious: [positive] 494 | efficient: [positive] 495 | efficiently: [positive] 496 | effortless: [positive] 497 | effortlessly: [positive] 498 | effusion: [positive] 499 | effusive: [positive] 500 | effusively: [positive] 501 | effusiveness: [positive] 502 | elan: [positive] 503 | elate: [positive] 504 | elated: [positive] 505 | elatedly: [positive] 506 | elation: [positive] 507 | electrify: [positive] 508 | elegance: [positive] 509 | elegant: [positive] 510 | elegantly: [positive] 511 | elevate: [positive] 512 | elite: [positive] 513 | eloquence: [positive] 514 | eloquent: [positive] 515 | eloquently: [positive] 516 | embolden: [positive] 517 | eminence: [positive] 518 | eminent: [positive] 519 | empathize: [positive] 520 | empathy: [positive] 521 | empower: [positive] 522 | empowerment: [positive] 523 | enchant: [positive] 524 | enchanted: [positive] 525 | enchanting: [positive] 526 | enchantingly: [positive] 527 | encourage: [positive] 528 | encouragement: [positive] 529 | encouraging: [positive] 530 | encouragingly: [positive] 531 | endear: [positive] 532 | endearing: [positive] 533 | endorse: [positive] 534 | endorsed: [positive] 535 | endorsement: [positive] 536 | endorses: [positive] 537 | endorsing: [positive] 538 | energetic: [positive] 539 | energize: [positive] 540 | energy-efficient: [positive] 541 | energy-saving: [positive] 542 | engaging: [positive] 543 | engrossing: [positive] 544 | enhance: [positive] 545 | enhanced: [positive] 546 | enhancement: [positive] 547 | enhances: [positive] 548 | enjoy: [positive] 549 | enjoyable: [positive] 550 | enjoyably: [positive] 551 | enjoyed: [positive] 552 | enjoying: [positive] 553 | enjoyment: [positive] 554 | enjoys: [positive] 555 | enlighten: [positive] 556 | enlightenment: [positive] 557 | enliven: [positive] 558 | ennoble: [positive] 559 | enough: [positive] 560 | enrapt: [positive] 561 | enrapture: [positive] 562 | enraptured: [positive] 563 | enrich: [positive] 564 | enrichment: [positive] 565 | enterprising: [positive] 566 | entertain: [positive] 567 | entertaining: [positive] 568 | entertains: [positive] 569 | enthral: [positive] 570 | enthrall: [positive] 571 | enthralled: [positive] 572 | enthuse: [positive] 573 | enthusiasm: [positive] 574 | enthusiast: [positive] 575 | enthusiastic: [positive] 576 | enthusiastically: [positive] 577 | entice: [positive] 578 | enticed: [positive] 579 | enticing: [positive] 580 | enticingly: [positive] 581 | entranced: [positive] 582 | entrancing: [positive] 583 | entrust: [positive] 584 | enviable: [positive] 585 | enviably: [positive] 586 | envious: [positive] 587 | enviously: [positive] 588 | enviousness: [positive] 589 | envy: [positive] 590 | equitable: [positive] 591 | ergonomical: [positive] 592 | err-free: [positive] 593 | erudite: [positive] 594 | ethical: [positive] 595 | eulogize: [positive] 596 | euphoria: [positive] 597 | euphoric: [positive] 598 | euphorically: [positive] 599 | evaluative: [positive] 600 | evenly: [positive] 601 | eventful: [positive] 602 | everlasting: [positive] 603 | evocative: [positive] 604 | exalt: [positive] 605 | exaltation: [positive] 606 | exalted: [positive] 607 | exaltedly: [positive] 608 | exalting: [positive] 609 | exaltingly: [positive] 610 | examplar: [positive] 611 | examplary: [positive] 612 | excallent: [positive] 613 | exceed: [positive] 614 | exceeded: [positive] 615 | exceeding: [positive] 616 | exceedingly: [positive] 617 | exceeds: [positive] 618 | excel: [positive] 619 | exceled: [positive] 620 | excelent: [positive] 621 | excellant: [positive] 622 | excelled: [positive] 623 | excellence: [positive] 624 | excellency: [positive] 625 | excellent: [positive] 626 | excellently: [positive] 627 | excels: [positive] 628 | exceptional: [positive] 629 | exceptionally: [positive] 630 | excite: [positive] 631 | excited: [positive] 632 | excitedly: [positive] 633 | excitedness: [positive] 634 | excitement: [positive] 635 | excites: [positive] 636 | exciting: [positive] 637 | excitingly: [positive] 638 | exellent: [positive] 639 | exemplar: [positive] 640 | exemplary: [positive] 641 | exhilarate: [positive] 642 | exhilarating: [positive] 643 | exhilaratingly: [positive] 644 | exhilaration: [positive] 645 | exonerate: [positive] 646 | expansive: [positive] 647 | expeditiously: [positive] 648 | expertly: [positive] 649 | exquisite: [positive] 650 | exquisitely: [positive] 651 | extol: [positive] 652 | extoll: [positive] 653 | extraordinarily: [positive] 654 | extraordinary: [positive] 655 | exuberance: [positive] 656 | exuberant: [positive] 657 | exuberantly: [positive] 658 | exult: [positive] 659 | exultant: [positive] 660 | exultation: [positive] 661 | exultingly: [positive] 662 | eye-catch: [positive] 663 | eye-catching: [positive] 664 | eyecatch: [positive] 665 | eyecatching: [positive] 666 | fabulous: [positive] 667 | fabulously: [positive] 668 | facilitate: [positive] 669 | fair: [positive] 670 | fairly: [positive] 671 | fairness: [positive] 672 | faith: [positive] 673 | faithful: [positive] 674 | faithfully: [positive] 675 | faithfulness: [positive] 676 | fame: [positive] 677 | famed: [positive] 678 | famous: [positive] 679 | famously: [positive] 680 | fancier: [positive] 681 | fancinating: [positive] 682 | fancy: [positive] 683 | fanfare: [positive] 684 | fans: [positive] 685 | fantastic: [positive] 686 | fantastically: [positive] 687 | fascinate: [positive] 688 | fascinating: [positive] 689 | fascinatingly: [positive] 690 | fascination: [positive] 691 | fashionable: [positive] 692 | fashionably: [positive] 693 | fast: [positive] 694 | fast-growing: [positive] 695 | fast-paced: [positive] 696 | faster: [positive] 697 | fastest: [positive] 698 | fastest-growing: [positive] 699 | faultless: [positive] 700 | fav: [positive] 701 | fave: [positive] 702 | favor: [positive] 703 | favorable: [positive] 704 | favored: [positive] 705 | favorite: [positive] 706 | favorited: [positive] 707 | favour: [positive] 708 | fearless: [positive] 709 | fearlessly: [positive] 710 | feasible: [positive] 711 | feasibly: [positive] 712 | feat: [positive] 713 | feature-rich: [positive] 714 | fecilitous: [positive] 715 | feisty: [positive] 716 | felicitate: [positive] 717 | felicitous: [positive] 718 | felicity: [positive] 719 | fertile: [positive] 720 | fervent: [positive] 721 | fervently: [positive] 722 | fervid: [positive] 723 | fervidly: [positive] 724 | fervor: [positive] 725 | festive: [positive] 726 | fidelity: [positive] 727 | fiery: [positive] 728 | fine: [positive] 729 | fine-looking: [positive] 730 | finely: [positive] 731 | finer: [positive] 732 | finest: [positive] 733 | firmer: [positive] 734 | first-class: [positive] 735 | first-in-class: [positive] 736 | first-rate: [positive] 737 | flashy: [positive] 738 | flatter: [positive] 739 | flattering: [positive] 740 | flatteringly: [positive] 741 | flawless: [positive] 742 | flawlessly: [positive] 743 | flexibility: [positive] 744 | flexible: [positive] 745 | flourish: [positive] 746 | flourishing: [positive] 747 | fluent: [positive] 748 | flutter: [positive] 749 | fond: [positive] 750 | fondly: [positive] 751 | fondness: [positive] 752 | foolproof: [positive] 753 | foremost: [positive] 754 | foresight: [positive] 755 | formidable: [positive] 756 | fortitude: [positive] 757 | fortuitous: [positive] 758 | fortuitously: [positive] 759 | fortunate: [positive] 760 | fortunately: [positive] 761 | fortune: [positive] 762 | fragrant: [positive] 763 | free: [positive] 764 | freed: [positive] 765 | freedom: [positive] 766 | freedoms: [positive] 767 | fresh: [positive] 768 | fresher: [positive] 769 | freshest: [positive] 770 | friendliness: [positive] 771 | friendly: [positive] 772 | frolic: [positive] 773 | frugal: [positive] 774 | fruitful: [positive] 775 | ftw: [positive] 776 | fulfillment: [positive] 777 | fun: [positive] 778 | futurestic: [positive] 779 | futuristic: [positive] 780 | gaiety: [positive] 781 | gaily: [positive] 782 | gain: [positive] 783 | gained: [positive] 784 | gainful: [positive] 785 | gainfully: [positive] 786 | gaining: [positive] 787 | gains: [positive] 788 | gallant: [positive] 789 | gallantly: [positive] 790 | galore: [positive] 791 | geekier: [positive] 792 | geeky: [positive] 793 | gem: [positive] 794 | gems: [positive] 795 | generosity: [positive] 796 | generous: [positive] 797 | generously: [positive] 798 | genial: [positive] 799 | genius: [positive] 800 | gentle: [positive] 801 | gentlest: [positive] 802 | genuine: [positive] 803 | gifted: [positive] 804 | glad: [positive] 805 | gladden: [positive] 806 | gladly: [positive] 807 | gladness: [positive] 808 | glamorous: [positive] 809 | glee: [positive] 810 | gleeful: [positive] 811 | gleefully: [positive] 812 | glimmer: [positive] 813 | glimmering: [positive] 814 | glisten: [positive] 815 | glistening: [positive] 816 | glitter: [positive] 817 | glitz: [positive] 818 | glorify: [positive] 819 | glorious: [positive] 820 | gloriously: [positive] 821 | glory: [positive] 822 | glow: [positive] 823 | glowing: [positive] 824 | glowingly: [positive] 825 | god-given: [positive] 826 | god-send: [positive] 827 | godlike: [positive] 828 | godsend: [positive] 829 | gold: [positive] 830 | golden: [positive] 831 | good: [positive] 832 | goodly: [positive] 833 | goodness: [positive] 834 | goodwill: [positive] 835 | goood: [positive] 836 | gooood: [positive] 837 | gorgeous: [positive] 838 | gorgeously: [positive] 839 | grace: [positive] 840 | graceful: [positive] 841 | gracefully: [positive] 842 | gracious: [positive] 843 | graciously: [positive] 844 | graciousness: [positive] 845 | grand: [positive] 846 | grandeur: [positive] 847 | grateful: [positive] 848 | gratefully: [positive] 849 | gratification: [positive] 850 | gratified: [positive] 851 | gratifies: [positive] 852 | gratify: [positive] 853 | gratifying: [positive] 854 | gratifyingly: [positive] 855 | gratitude: [positive] 856 | great: [positive] 857 | greatest: [positive] 858 | greatness: [positive] 859 | grin: [positive] 860 | groundbreaking: [positive] 861 | guarantee: [positive] 862 | guidance: [positive] 863 | guiltless: [positive] 864 | gumption: [positive] 865 | gush: [positive] 866 | gusto: [positive] 867 | gutsy: [positive] 868 | hail: [positive] 869 | halcyon: [positive] 870 | hale: [positive] 871 | hallmark: [positive] 872 | hallmarks: [positive] 873 | hallowed: [positive] 874 | handier: [positive] 875 | handily: [positive] 876 | hands-down: [positive] 877 | handsome: [positive] 878 | handsomely: [positive] 879 | handy: [positive] 880 | happier: [positive] 881 | happily: [positive] 882 | happiness: [positive] 883 | happy: [positive] 884 | hard-working: [positive] 885 | hardier: [positive] 886 | hardy: [positive] 887 | harmless: [positive] 888 | harmonious: [positive] 889 | harmoniously: [positive] 890 | harmonize: [positive] 891 | harmony: [positive] 892 | headway: [positive] 893 | heal: [positive] 894 | healthful: [positive] 895 | healthy: [positive] 896 | hearten: [positive] 897 | heartening: [positive] 898 | heartfelt: [positive] 899 | heartily: [positive] 900 | heartwarming: [positive] 901 | heaven: [positive] 902 | heavenly: [positive] 903 | helped: [positive] 904 | helpful: [positive] 905 | helping: [positive] 906 | hero: [positive] 907 | heroic: [positive] 908 | heroically: [positive] 909 | heroine: [positive] 910 | heroize: [positive] 911 | heros: [positive] 912 | high-quality: [positive] 913 | high-spirited: [positive] 914 | hilarious: [positive] 915 | holy: [positive] 916 | homage: [positive] 917 | honest: [positive] 918 | honesty: [positive] 919 | honor: [positive] 920 | honorable: [positive] 921 | honored: [positive] 922 | honoring: [positive] 923 | hooray: [positive] 924 | hopeful: [positive] 925 | hospitable: [positive] 926 | hot: [positive] 927 | hotcake: [positive] 928 | hotcakes: [positive] 929 | hottest: [positive] 930 | hug: [positive] 931 | humane: [positive] 932 | humble: [positive] 933 | humility: [positive] 934 | humor: [positive] 935 | humorous: [positive] 936 | humorously: [positive] 937 | humour: [positive] 938 | humourous: [positive] 939 | ideal: [positive] 940 | idealize: [positive] 941 | ideally: [positive] 942 | idol: [positive] 943 | idolize: [positive] 944 | idolized: [positive] 945 | idyllic: [positive] 946 | illuminate: [positive] 947 | illuminati: [positive] 948 | illuminating: [positive] 949 | illumine: [positive] 950 | illustrious: [positive] 951 | ilu: [positive] 952 | imaculate: [positive] 953 | imaginative: [positive] 954 | immaculate: [positive] 955 | immaculately: [positive] 956 | immense: [positive] 957 | impartial: [positive] 958 | impartiality: [positive] 959 | impartially: [positive] 960 | impassioned: [positive] 961 | impeccable: [positive] 962 | impeccably: [positive] 963 | important: [positive] 964 | impress: [positive] 965 | impressed: [positive] 966 | impresses: [positive] 967 | impressive: [positive] 968 | impressively: [positive] 969 | impressiveness: [positive] 970 | improve: [positive] 971 | improved: [positive] 972 | improvement: [positive] 973 | improvements: [positive] 974 | improves: [positive] 975 | improving: [positive] 976 | incredible: [positive] 977 | incredibly: [positive] 978 | indebted: [positive] 979 | individualized: [positive] 980 | indulgence: [positive] 981 | indulgent: [positive] 982 | industrious: [positive] 983 | inestimable: [positive] 984 | inestimably: [positive] 985 | inexpensive: [positive] 986 | infallibility: [positive] 987 | infallible: [positive] 988 | infallibly: [positive] 989 | influential: [positive] 990 | ingenious: [positive] 991 | ingeniously: [positive] 992 | ingenuity: [positive] 993 | ingenuous: [positive] 994 | ingenuously: [positive] 995 | innocuous: [positive] 996 | innovation: [positive] 997 | innovative: [positive] 998 | inpressed: [positive] 999 | insightful: [positive] 1000 | insightfully: [positive] 1001 | inspiration: [positive] 1002 | inspirational: [positive] 1003 | inspire: [positive] 1004 | inspiring: [positive] 1005 | instantly: [positive] 1006 | instructive: [positive] 1007 | instrumental: [positive] 1008 | integral: [positive] 1009 | integrated: [positive] 1010 | intelligence: [positive] 1011 | intelligent: [positive] 1012 | intelligible: [positive] 1013 | interesting: [positive] 1014 | interests: [positive] 1015 | intimacy: [positive] 1016 | intimate: [positive] 1017 | intricate: [positive] 1018 | intrigue: [positive] 1019 | intriguing: [positive] 1020 | intriguingly: [positive] 1021 | intuitive: [positive] 1022 | invaluable: [positive] 1023 | invaluablely: [positive] 1024 | inventive: [positive] 1025 | invigorate: [positive] 1026 | invigorating: [positive] 1027 | invincibility: [positive] 1028 | invincible: [positive] 1029 | inviolable: [positive] 1030 | inviolate: [positive] 1031 | invulnerable: [positive] 1032 | irreplaceable: [positive] 1033 | irreproachable: [positive] 1034 | irresistible: [positive] 1035 | irresistibly: [positive] 1036 | issue-free: [positive] 1037 | jaw-droping: [positive] 1038 | jaw-dropping: [positive] 1039 | jollify: [positive] 1040 | jolly: [positive] 1041 | jovial: [positive] 1042 | joy: [positive] 1043 | joyful: [positive] 1044 | joyfully: [positive] 1045 | joyous: [positive] 1046 | joyously: [positive] 1047 | jubilant: [positive] 1048 | jubilantly: [positive] 1049 | jubilate: [positive] 1050 | jubilation: [positive] 1051 | jubiliant: [positive] 1052 | judicious: [positive] 1053 | justly: [positive] 1054 | keen: [positive] 1055 | keenly: [positive] 1056 | keenness: [positive] 1057 | kid-friendly: [positive] 1058 | kindliness: [positive] 1059 | kindly: [positive] 1060 | kindness: [positive] 1061 | knowledgeable: [positive] 1062 | kudos: [positive] 1063 | large-capacity: [positive] 1064 | laud: [positive] 1065 | laudable: [positive] 1066 | laudably: [positive] 1067 | lavish: [positive] 1068 | lavishly: [positive] 1069 | law-abiding: [positive] 1070 | lawful: [positive] 1071 | lawfully: [positive] 1072 | lead: [positive] 1073 | leading: [positive] 1074 | leads: [positive] 1075 | lean: [positive] 1076 | led: [positive] 1077 | legendary: [positive] 1078 | leverage: [positive] 1079 | levity: [positive] 1080 | liberate: [positive] 1081 | liberation: [positive] 1082 | liberty: [positive] 1083 | lifesaver: [positive] 1084 | light-hearted: [positive] 1085 | lighter: [positive] 1086 | likable: [positive] 1087 | like: [positive] 1088 | liked: [positive] 1089 | likes: [positive] 1090 | liking: [positive] 1091 | lionhearted: [positive] 1092 | lively: [positive] 1093 | logical: [positive] 1094 | long-lasting: [positive] 1095 | lovable: [positive] 1096 | lovably: [positive] 1097 | love: [positive] 1098 | loved: [positive] 1099 | loveliness: [positive] 1100 | lovely: [positive] 1101 | lover: [positive] 1102 | loves: [positive] 1103 | loving: [positive] 1104 | low-cost: [positive] 1105 | low-price: [positive] 1106 | low-priced: [positive] 1107 | low-risk: [positive] 1108 | lower-priced: [positive] 1109 | loyal: [positive] 1110 | loyalty: [positive] 1111 | lucid: [positive] 1112 | lucidly: [positive] 1113 | luck: [positive] 1114 | luckier: [positive] 1115 | luckiest: [positive] 1116 | luckiness: [positive] 1117 | lucky: [positive] 1118 | lucrative: [positive] 1119 | luminous: [positive] 1120 | lush: [positive] 1121 | luster: [positive] 1122 | lustrous: [positive] 1123 | luxuriant: [positive] 1124 | luxuriate: [positive] 1125 | luxurious: [positive] 1126 | luxuriously: [positive] 1127 | luxury: [positive] 1128 | lyrical: [positive] 1129 | magic: [positive] 1130 | magical: [positive] 1131 | magnanimous: [positive] 1132 | magnanimously: [positive] 1133 | magnificence: [positive] 1134 | magnificent: [positive] 1135 | magnificently: [positive] 1136 | majestic: [positive] 1137 | majesty: [positive] 1138 | manageable: [positive] 1139 | maneuverable: [positive] 1140 | marvel: [positive] 1141 | marveled: [positive] 1142 | marvelled: [positive] 1143 | marvellous: [positive] 1144 | marvelous: [positive] 1145 | marvelously: [positive] 1146 | marvelousness: [positive] 1147 | marvels: [positive] 1148 | master: [positive] 1149 | masterful: [positive] 1150 | masterfully: [positive] 1151 | masterpiece: [positive] 1152 | masterpieces: [positive] 1153 | masters: [positive] 1154 | mastery: [positive] 1155 | matchless: [positive] 1156 | mature: [positive] 1157 | maturely: [positive] 1158 | maturity: [positive] 1159 | meaningful: [positive] 1160 | memorable: [positive] 1161 | merciful: [positive] 1162 | mercifully: [positive] 1163 | mercy: [positive] 1164 | merit: [positive] 1165 | meritorious: [positive] 1166 | merrily: [positive] 1167 | merriment: [positive] 1168 | merriness: [positive] 1169 | merry: [positive] 1170 | mesmerize: [positive] 1171 | mesmerized: [positive] 1172 | mesmerizes: [positive] 1173 | mesmerizing: [positive] 1174 | mesmerizingly: [positive] 1175 | meticulous: [positive] 1176 | meticulously: [positive] 1177 | mightily: [positive] 1178 | mighty: [positive] 1179 | mind-blowing: [positive] 1180 | miracle: [positive] 1181 | miracles: [positive] 1182 | miraculous: [positive] 1183 | miraculously: [positive] 1184 | miraculousness: [positive] 1185 | modern: [positive] 1186 | modest: [positive] 1187 | modesty: [positive] 1188 | momentous: [positive] 1189 | monumental: [positive] 1190 | monumentally: [positive] 1191 | morality: [positive] 1192 | motivated: [positive] 1193 | multi-purpose: [positive] 1194 | navigable: [positive] 1195 | neat: [positive] 1196 | neatest: [positive] 1197 | neatly: [positive] 1198 | nice: [positive] 1199 | nicely: [positive] 1200 | nicer: [positive] 1201 | nicest: [positive] 1202 | nifty: [positive] 1203 | nimble: [positive] 1204 | noble: [positive] 1205 | nobly: [positive] 1206 | noiseless: [positive] 1207 | non-violence: [positive] 1208 | non-violent: [positive] 1209 | notably: [positive] 1210 | noteworthy: [positive] 1211 | nourish: [positive] 1212 | nourishing: [positive] 1213 | nourishment: [positive] 1214 | novelty: [positive] 1215 | nurturing: [positive] 1216 | oasis: [positive] 1217 | obsession: [positive] 1218 | obsessions: [positive] 1219 | obtainable: [positive] 1220 | openly: [positive] 1221 | openness: [positive] 1222 | optimal: [positive] 1223 | optimism: [positive] 1224 | optimistic: [positive] 1225 | opulent: [positive] 1226 | orderly: [positive] 1227 | originality: [positive] 1228 | outdo: [positive] 1229 | outdone: [positive] 1230 | outperform: [positive] 1231 | outperformed: [positive] 1232 | outperforming: [positive] 1233 | outperforms: [positive] 1234 | outshine: [positive] 1235 | outshone: [positive] 1236 | outsmart: [positive] 1237 | outstanding: [positive] 1238 | outstandingly: [positive] 1239 | outstrip: [positive] 1240 | outwit: [positive] 1241 | ovation: [positive] 1242 | overjoyed: [positive] 1243 | overtake: [positive] 1244 | overtaken: [positive] 1245 | overtakes: [positive] 1246 | overtaking: [positive] 1247 | overtook: [positive] 1248 | overture: [positive] 1249 | pain-free: [positive] 1250 | painless: [positive] 1251 | painlessly: [positive] 1252 | palatial: [positive] 1253 | pamper: [positive] 1254 | pampered: [positive] 1255 | pamperedly: [positive] 1256 | pamperedness: [positive] 1257 | pampers: [positive] 1258 | panoramic: [positive] 1259 | paradise: [positive] 1260 | paramount: [positive] 1261 | pardon: [positive] 1262 | party: [positive] 1263 | passion: [positive] 1264 | passionate: [positive] 1265 | passionately: [positive] 1266 | patience: [positive] 1267 | patient: [positive] 1268 | patiently: [positive] 1269 | patriot: [positive] 1270 | patriotic: [positive] 1271 | peace: [positive] 1272 | peaceable: [positive] 1273 | peaceful: [positive] 1274 | peacefully: [positive] 1275 | peacekeepers: [positive] 1276 | peach: [positive] 1277 | peerless: [positive] 1278 | pep: [positive] 1279 | pepped: [positive] 1280 | pepping: [positive] 1281 | peppy: [positive] 1282 | peps: [positive] 1283 | perfect: [positive] 1284 | perfection: [positive] 1285 | perfectly: [positive] 1286 | permissible: [positive] 1287 | perseverance: [positive] 1288 | persevere: [positive] 1289 | personages: [positive] 1290 | personalized: [positive] 1291 | phenomenal: [positive] 1292 | phenomenally: [positive] 1293 | picturesque: [positive] 1294 | piety: [positive] 1295 | pinnacle: [positive] 1296 | playful: [positive] 1297 | playfully: [positive] 1298 | pleasant: [positive] 1299 | pleasantly: [positive] 1300 | pleased: [positive] 1301 | pleases: [positive] 1302 | pleasing: [positive] 1303 | pleasingly: [positive] 1304 | pleasurable: [positive] 1305 | pleasurably: [positive] 1306 | pleasure: [positive] 1307 | plentiful: [positive] 1308 | pluses: [positive] 1309 | plush: [positive] 1310 | plusses: [positive] 1311 | poetic: [positive] 1312 | poeticize: [positive] 1313 | poignant: [positive] 1314 | poise: [positive] 1315 | poised: [positive] 1316 | polished: [positive] 1317 | polite: [positive] 1318 | politeness: [positive] 1319 | popular: [positive] 1320 | portable: [positive] 1321 | posh: [positive] 1322 | positive: [positive] 1323 | positively: [positive] 1324 | positives: [positive] 1325 | powerful: [positive] 1326 | powerfully: [positive] 1327 | praise: [positive] 1328 | praiseworthy: [positive] 1329 | praising: [positive] 1330 | pre-eminent: [positive] 1331 | precious: [positive] 1332 | precise: [positive] 1333 | precisely: [positive] 1334 | preeminent: [positive] 1335 | prefer: [positive] 1336 | preferable: [positive] 1337 | preferably: [positive] 1338 | prefered: [positive] 1339 | preferes: [positive] 1340 | preferring: [positive] 1341 | prefers: [positive] 1342 | premier: [positive] 1343 | prestige: [positive] 1344 | prestigious: [positive] 1345 | prettily: [positive] 1346 | pretty: [positive] 1347 | priceless: [positive] 1348 | pride: [positive] 1349 | principled: [positive] 1350 | privilege: [positive] 1351 | privileged: [positive] 1352 | prize: [positive] 1353 | proactive: [positive] 1354 | problem-free: [positive] 1355 | problem-solver: [positive] 1356 | prodigious: [positive] 1357 | prodigiously: [positive] 1358 | prodigy: [positive] 1359 | productive: [positive] 1360 | productively: [positive] 1361 | proficient: [positive] 1362 | proficiently: [positive] 1363 | profound: [positive] 1364 | profoundly: [positive] 1365 | profuse: [positive] 1366 | profusion: [positive] 1367 | progress: [positive] 1368 | progressive: [positive] 1369 | prolific: [positive] 1370 | prominence: [positive] 1371 | prominent: [positive] 1372 | promise: [positive] 1373 | promised: [positive] 1374 | promises: [positive] 1375 | promising: [positive] 1376 | promoter: [positive] 1377 | prompt: [positive] 1378 | promptly: [positive] 1379 | proper: [positive] 1380 | properly: [positive] 1381 | propitious: [positive] 1382 | propitiously: [positive] 1383 | pros: [positive] 1384 | prosper: [positive] 1385 | prosperity: [positive] 1386 | prosperous: [positive] 1387 | prospros: [positive] 1388 | protect: [positive] 1389 | protection: [positive] 1390 | protective: [positive] 1391 | proud: [positive] 1392 | proven: [positive] 1393 | proves: [positive] 1394 | providence: [positive] 1395 | proving: [positive] 1396 | prowess: [positive] 1397 | prudence: [positive] 1398 | prudent: [positive] 1399 | prudently: [positive] 1400 | punctual: [positive] 1401 | pure: [positive] 1402 | purify: [positive] 1403 | purposeful: [positive] 1404 | quaint: [positive] 1405 | qualified: [positive] 1406 | qualify: [positive] 1407 | quicker: [positive] 1408 | quiet: [positive] 1409 | quieter: [positive] 1410 | radiance: [positive] 1411 | radiant: [positive] 1412 | rapid: [positive] 1413 | rapport: [positive] 1414 | rapt: [positive] 1415 | rapture: [positive] 1416 | raptureous: [positive] 1417 | raptureously: [positive] 1418 | rapturous: [positive] 1419 | rapturously: [positive] 1420 | rational: [positive] 1421 | razor-sharp: [positive] 1422 | reachable: [positive] 1423 | readable: [positive] 1424 | readily: [positive] 1425 | ready: [positive] 1426 | reaffirm: [positive] 1427 | reaffirmation: [positive] 1428 | realistic: [positive] 1429 | realizable: [positive] 1430 | reasonable: [positive] 1431 | reasonably: [positive] 1432 | reasoned: [positive] 1433 | reassurance: [positive] 1434 | reassure: [positive] 1435 | receptive: [positive] 1436 | reclaim: [positive] 1437 | recomend: [positive] 1438 | recommend: [positive] 1439 | recommendation: [positive] 1440 | recommendations: [positive] 1441 | recommended: [positive] 1442 | reconcile: [positive] 1443 | reconciliation: [positive] 1444 | record-setting: [positive] 1445 | recover: [positive] 1446 | recovery: [positive] 1447 | rectification: [positive] 1448 | rectify: [positive] 1449 | rectifying: [positive] 1450 | redeem: [positive] 1451 | redeeming: [positive] 1452 | redemption: [positive] 1453 | refine: [positive] 1454 | refined: [positive] 1455 | refinement: [positive] 1456 | reform: [positive] 1457 | reformed: [positive] 1458 | reforming: [positive] 1459 | reforms: [positive] 1460 | refresh: [positive] 1461 | refreshed: [positive] 1462 | refreshing: [positive] 1463 | refund: [positive] 1464 | refunded: [positive] 1465 | regal: [positive] 1466 | regally: [positive] 1467 | regard: [positive] 1468 | rejoice: [positive] 1469 | rejoicing: [positive] 1470 | rejoicingly: [positive] 1471 | rejuvenate: [positive] 1472 | rejuvenated: [positive] 1473 | rejuvenating: [positive] 1474 | relaxed: [positive] 1475 | relent: [positive] 1476 | reliable: [positive] 1477 | reliably: [positive] 1478 | relief: [positive] 1479 | relish: [positive] 1480 | remarkable: [positive] 1481 | remarkably: [positive] 1482 | remedy: [positive] 1483 | remission: [positive] 1484 | remunerate: [positive] 1485 | renaissance: [positive] 1486 | renewed: [positive] 1487 | renown: [positive] 1488 | renowned: [positive] 1489 | replaceable: [positive] 1490 | reputable: [positive] 1491 | reputation: [positive] 1492 | resilient: [positive] 1493 | resolute: [positive] 1494 | resound: [positive] 1495 | resounding: [positive] 1496 | resourceful: [positive] 1497 | resourcefulness: [positive] 1498 | respect: [positive] 1499 | respectable: [positive] 1500 | respectful: [positive] 1501 | respectfully: [positive] 1502 | respite: [positive] 1503 | resplendent: [positive] 1504 | responsibly: [positive] 1505 | responsive: [positive] 1506 | restful: [positive] 1507 | restored: [positive] 1508 | restructure: [positive] 1509 | restructured: [positive] 1510 | restructuring: [positive] 1511 | retractable: [positive] 1512 | revel: [positive] 1513 | revelation: [positive] 1514 | revere: [positive] 1515 | reverence: [positive] 1516 | reverent: [positive] 1517 | reverently: [positive] 1518 | revitalize: [positive] 1519 | revival: [positive] 1520 | revive: [positive] 1521 | revives: [positive] 1522 | revolutionary: [positive] 1523 | revolutionize: [positive] 1524 | revolutionized: [positive] 1525 | revolutionizes: [positive] 1526 | reward: [positive] 1527 | rewarding: [positive] 1528 | rewardingly: [positive] 1529 | rich: [positive] 1530 | richer: [positive] 1531 | richly: [positive] 1532 | richness: [positive] 1533 | right: [positive] 1534 | righten: [positive] 1535 | righteous: [positive] 1536 | righteously: [positive] 1537 | righteousness: [positive] 1538 | rightful: [positive] 1539 | rightfully: [positive] 1540 | rightly: [positive] 1541 | rightness: [positive] 1542 | risk-free: [positive] 1543 | robust: [positive] 1544 | rock-star: [positive] 1545 | rock-stars: [positive] 1546 | rockstar: [positive] 1547 | rockstars: [positive] 1548 | romantic: [positive] 1549 | romantically: [positive] 1550 | romanticize: [positive] 1551 | roomier: [positive] 1552 | roomy: [positive] 1553 | rosy: [positive] 1554 | safe: [positive] 1555 | safely: [positive] 1556 | sagacity: [positive] 1557 | sagely: [positive] 1558 | saint: [positive] 1559 | saintliness: [positive] 1560 | saintly: [positive] 1561 | salutary: [positive] 1562 | salute: [positive] 1563 | sane: [positive] 1564 | satisfactorily: [positive] 1565 | satisfactory: [positive] 1566 | satisfied: [positive] 1567 | satisfies: [positive] 1568 | satisfy: [positive] 1569 | satisfying: [positive] 1570 | satisified: [positive] 1571 | saver: [positive] 1572 | savings: [positive] 1573 | savior: [positive] 1574 | savvy: [positive] 1575 | scenic: [positive] 1576 | seamless: [positive] 1577 | seasoned: [positive] 1578 | secure: [positive] 1579 | securely: [positive] 1580 | selective: [positive] 1581 | self-determination: [positive] 1582 | self-respect: [positive] 1583 | self-satisfaction: [positive] 1584 | self-sufficiency: [positive] 1585 | self-sufficient: [positive] 1586 | sensation: [positive] 1587 | sensational: [positive] 1588 | sensationally: [positive] 1589 | sensations: [positive] 1590 | sensible: [positive] 1591 | sensibly: [positive] 1592 | sensitive: [positive] 1593 | serene: [positive] 1594 | serenity: [positive] 1595 | sexy: [positive] 1596 | sharp: [positive] 1597 | sharper: [positive] 1598 | sharpest: [positive] 1599 | shimmering: [positive] 1600 | shimmeringly: [positive] 1601 | shine: [positive] 1602 | shiny: [positive] 1603 | significant: [positive] 1604 | silent: [positive] 1605 | simpler: [positive] 1606 | simplest: [positive] 1607 | simplified: [positive] 1608 | simplifies: [positive] 1609 | simplify: [positive] 1610 | simplifying: [positive] 1611 | sincere: [positive] 1612 | sincerely: [positive] 1613 | sincerity: [positive] 1614 | skill: [positive] 1615 | skilled: [positive] 1616 | skillful: [positive] 1617 | skillfully: [positive] 1618 | slammin: [positive] 1619 | sleek: [positive] 1620 | slick: [positive] 1621 | smart: [positive] 1622 | smarter: [positive] 1623 | smartest: [positive] 1624 | smartly: [positive] 1625 | smile: [positive] 1626 | smiles: [positive] 1627 | smiling: [positive] 1628 | smilingly: [positive] 1629 | smitten: [positive] 1630 | smooth: [positive] 1631 | smoother: [positive] 1632 | smoothes: [positive] 1633 | smoothest: [positive] 1634 | smoothly: [positive] 1635 | snappy: [positive] 1636 | snazzy: [positive] 1637 | sociable: [positive] 1638 | soft: [positive] 1639 | softer: [positive] 1640 | solace: [positive] 1641 | solicitous: [positive] 1642 | solicitously: [positive] 1643 | solid: [positive] 1644 | solidarity: [positive] 1645 | soothe: [positive] 1646 | soothingly: [positive] 1647 | sophisticated: [positive] 1648 | soulful: [positive] 1649 | soundly: [positive] 1650 | soundness: [positive] 1651 | spacious: [positive] 1652 | sparkle: [positive] 1653 | sparkling: [positive] 1654 | spectacular: [positive] 1655 | spectacularly: [positive] 1656 | speedily: [positive] 1657 | speedy: [positive] 1658 | spellbind: [positive] 1659 | spellbinding: [positive] 1660 | spellbindingly: [positive] 1661 | spellbound: [positive] 1662 | spirited: [positive] 1663 | spiritual: [positive] 1664 | splendid: [positive] 1665 | splendidly: [positive] 1666 | splendor: [positive] 1667 | spontaneous: [positive] 1668 | sporty: [positive] 1669 | spotless: [positive] 1670 | sprightly: [positive] 1671 | stability: [positive] 1672 | stabilize: [positive] 1673 | stable: [positive] 1674 | stainless: [positive] 1675 | standout: [positive] 1676 | state-of-the-art: [positive] 1677 | stately: [positive] 1678 | statuesque: [positive] 1679 | staunch: [positive] 1680 | staunchly: [positive] 1681 | staunchness: [positive] 1682 | steadfast: [positive] 1683 | steadfastly: [positive] 1684 | steadfastness: [positive] 1685 | steadiest: [positive] 1686 | steadiness: [positive] 1687 | steady: [positive] 1688 | stellar: [positive] 1689 | stellarly: [positive] 1690 | stimulate: [positive] 1691 | stimulates: [positive] 1692 | stimulating: [positive] 1693 | stimulative: [positive] 1694 | stirringly: [positive] 1695 | straighten: [positive] 1696 | straightforward: [positive] 1697 | streamlined: [positive] 1698 | striking: [positive] 1699 | strikingly: [positive] 1700 | striving: [positive] 1701 | strong: [positive] 1702 | stronger: [positive] 1703 | strongest: [positive] 1704 | stunned: [positive] 1705 | stunning: [positive] 1706 | stunningly: [positive] 1707 | stupendous: [positive] 1708 | stupendously: [positive] 1709 | sturdier: [positive] 1710 | sturdy: [positive] 1711 | stylish: [positive] 1712 | stylishly: [positive] 1713 | stylized: [positive] 1714 | suave: [positive] 1715 | suavely: [positive] 1716 | sublime: [positive] 1717 | subsidize: [positive] 1718 | subsidized: [positive] 1719 | subsidizes: [positive] 1720 | subsidizing: [positive] 1721 | substantive: [positive] 1722 | succeed: [positive] 1723 | succeeded: [positive] 1724 | succeeding: [positive] 1725 | succeeds: [positive] 1726 | succes: [positive] 1727 | success: [positive] 1728 | successes: [positive] 1729 | successful: [positive] 1730 | successfully: [positive] 1731 | suffice: [positive] 1732 | sufficed: [positive] 1733 | suffices: [positive] 1734 | sufficient: [positive] 1735 | sufficiently: [positive] 1736 | suitable: [positive] 1737 | sumptuous: [positive] 1738 | sumptuously: [positive] 1739 | sumptuousness: [positive] 1740 | super: [positive] 1741 | superb: [positive] 1742 | superbly: [positive] 1743 | superior: [positive] 1744 | superiority: [positive] 1745 | supple: [positive] 1746 | support: [positive] 1747 | supported: [positive] 1748 | supporter: [positive] 1749 | supporting: [positive] 1750 | supportive: [positive] 1751 | supports: [positive] 1752 | supremacy: [positive] 1753 | supreme: [positive] 1754 | supremely: [positive] 1755 | supurb: [positive] 1756 | supurbly: [positive] 1757 | surmount: [positive] 1758 | surpass: [positive] 1759 | surreal: [positive] 1760 | survival: [positive] 1761 | survivor: [positive] 1762 | sustainability: [positive] 1763 | sustainable: [positive] 1764 | swank: [positive] 1765 | swankier: [positive] 1766 | swankiest: [positive] 1767 | swanky: [positive] 1768 | sweeping: [positive] 1769 | sweet: [positive] 1770 | sweeten: [positive] 1771 | sweetheart: [positive] 1772 | sweetly: [positive] 1773 | sweetness: [positive] 1774 | swift: [positive] 1775 | swiftness: [positive] 1776 | talent: [positive] 1777 | talented: [positive] 1778 | talents: [positive] 1779 | tantalize: [positive] 1780 | tantalizing: [positive] 1781 | tantalizingly: [positive] 1782 | tempt: [positive] 1783 | tempting: [positive] 1784 | temptingly: [positive] 1785 | tenacious: [positive] 1786 | tenaciously: [positive] 1787 | tenacity: [positive] 1788 | tender: [positive] 1789 | tenderly: [positive] 1790 | terrific: [positive] 1791 | terrifically: [positive] 1792 | thank: [positive] 1793 | thankful: [positive] 1794 | thinner: [positive] 1795 | thoughtful: [positive] 1796 | thoughtfully: [positive] 1797 | thoughtfulness: [positive] 1798 | thrift: [positive] 1799 | thrifty: [positive] 1800 | thrill: [positive] 1801 | thrilled: [positive] 1802 | thrilling: [positive] 1803 | thrillingly: [positive] 1804 | thrills: [positive] 1805 | thrive: [positive] 1806 | thriving: [positive] 1807 | thumb-up: [positive] 1808 | thumbs-up: [positive] 1809 | tickle: [positive] 1810 | tidy: [positive] 1811 | time-honored: [positive] 1812 | timely: [positive] 1813 | tingle: [positive] 1814 | titillate: [positive] 1815 | titillating: [positive] 1816 | titillatingly: [positive] 1817 | togetherness: [positive] 1818 | tolerable: [positive] 1819 | toll-free: [positive] 1820 | top: [positive] 1821 | top-notch: [positive] 1822 | top-quality: [positive] 1823 | topnotch: [positive] 1824 | tops: [positive] 1825 | tough: [positive] 1826 | tougher: [positive] 1827 | toughest: [positive] 1828 | traction: [positive] 1829 | tranquil: [positive] 1830 | tranquility: [positive] 1831 | transparent: [positive] 1832 | treasure: [positive] 1833 | tremendously: [positive] 1834 | trendy: [positive] 1835 | triumph: [positive] 1836 | triumphal: [positive] 1837 | triumphant: [positive] 1838 | triumphantly: [positive] 1839 | trivially: [positive] 1840 | trophy: [positive] 1841 | trouble-free: [positive] 1842 | trump: [positive] 1843 | trumpet: [positive] 1844 | trust: [positive] 1845 | trusted: [positive] 1846 | trusting: [positive] 1847 | trustingly: [positive] 1848 | trustworthiness: [positive] 1849 | trustworthy: [positive] 1850 | trusty: [positive] 1851 | truthful: [positive] 1852 | truthfully: [positive] 1853 | truthfulness: [positive] 1854 | twinkly: [positive] 1855 | ultra-crisp: [positive] 1856 | unabashed: [positive] 1857 | unabashedly: [positive] 1858 | unaffected: [positive] 1859 | unassailable: [positive] 1860 | unbeatable: [positive] 1861 | unbiased: [positive] 1862 | unbound: [positive] 1863 | uncomplicated: [positive] 1864 | unconditional: [positive] 1865 | undamaged: [positive] 1866 | undaunted: [positive] 1867 | understandable: [positive] 1868 | undisputable: [positive] 1869 | undisputably: [positive] 1870 | undisputed: [positive] 1871 | unencumbered: [positive] 1872 | unequivocal: [positive] 1873 | unequivocally: [positive] 1874 | unfazed: [positive] 1875 | unfettered: [positive] 1876 | unforgettable: [positive] 1877 | unity: [positive] 1878 | unlimited: [positive] 1879 | unmatched: [positive] 1880 | unparalleled: [positive] 1881 | unquestionable: [positive] 1882 | unquestionably: [positive] 1883 | unreal: [positive] 1884 | unrestricted: [positive] 1885 | unrivaled: [positive] 1886 | unselfish: [positive] 1887 | unwavering: [positive] 1888 | upbeat: [positive] 1889 | upgradable: [positive] 1890 | upgradeable: [positive] 1891 | upgraded: [positive] 1892 | upheld: [positive] 1893 | uphold: [positive] 1894 | uplift: [positive] 1895 | uplifting: [positive] 1896 | upliftingly: [positive] 1897 | upliftment: [positive] 1898 | upscale: [positive] 1899 | usable: [positive] 1900 | useable: [positive] 1901 | useful: [positive] 1902 | user-friendly: [positive] 1903 | user-replaceable: [positive] 1904 | valiant: [positive] 1905 | valiantly: [positive] 1906 | valor: [positive] 1907 | valuable: [positive] 1908 | variety: [positive] 1909 | venerate: [positive] 1910 | verifiable: [positive] 1911 | veritable: [positive] 1912 | versatile: [positive] 1913 | versatility: [positive] 1914 | vibrant: [positive] 1915 | vibrantly: [positive] 1916 | victorious: [positive] 1917 | victory: [positive] 1918 | viewable: [positive] 1919 | vigilance: [positive] 1920 | vigilant: [positive] 1921 | virtue: [positive] 1922 | virtuous: [positive] 1923 | virtuously: [positive] 1924 | visionary: [positive] 1925 | vivacious: [positive] 1926 | vivid: [positive] 1927 | vouch: [positive] 1928 | vouchsafe: [positive] 1929 | warm: [positive] 1930 | warmer: [positive] 1931 | warmhearted: [positive] 1932 | warmly: [positive] 1933 | warmth: [positive] 1934 | wealthy: [positive] 1935 | welcome: [positive] 1936 | welcoming: [positive] 1937 | well: [positive] 1938 | well-backlit: [positive] 1939 | well-balanced: [positive] 1940 | well-behaved: [positive] 1941 | well-being: [positive] 1942 | well-bred: [positive] 1943 | well-connected: [positive] 1944 | well-educated: [positive] 1945 | well-established: [positive] 1946 | well-informed: [positive] 1947 | well-intentioned: [positive] 1948 | well-known: [positive] 1949 | well-made: [positive] 1950 | well-managed: [positive] 1951 | well-mannered: [positive] 1952 | well-positioned: [positive] 1953 | well-received: [positive] 1954 | well-regarded: [positive] 1955 | well-rounded: [positive] 1956 | well-run: [positive] 1957 | well-wishers: [positive] 1958 | wellbeing: [positive] 1959 | whoa: [positive] 1960 | wholeheartedly: [positive] 1961 | wholesome: [positive] 1962 | whooa: [positive] 1963 | whoooa: [positive] 1964 | wieldy: [positive] 1965 | willing: [positive] 1966 | willingly: [positive] 1967 | willingness: [positive] 1968 | win: [positive] 1969 | windfall: [positive] 1970 | winnable: [positive] 1971 | winner: [positive] 1972 | winners: [positive] 1973 | winning: [positive] 1974 | wins: [positive] 1975 | wisdom: [positive] 1976 | wise: [positive] 1977 | wisely: [positive] 1978 | witty: [positive] 1979 | won: [positive] 1980 | wonder: [positive] 1981 | wonderful: [positive] 1982 | wonderfully: [positive] 1983 | wonderous: [positive] 1984 | wonderously: [positive] 1985 | wonders: [positive] 1986 | wondrous: [positive] 1987 | woo: [positive] 1988 | work: [positive] 1989 | workable: [positive] 1990 | worked: [positive] 1991 | works: [positive] 1992 | world-famous: [positive] 1993 | worth: [positive] 1994 | worth-while: [positive] 1995 | worthiness: [positive] 1996 | worthwhile: [positive] 1997 | worthy: [positive] 1998 | wow: [positive] 1999 | wowed: [positive] 2000 | wowing: [positive] 2001 | wows: [positive] 2002 | yay: [positive] 2003 | youthful: [positive] 2004 | zeal: [positive] 2005 | zenith: [positive] 2006 | zest: [positive] 2007 | zippy: [positive] 2008 | --------------------------------------------------------------------------------