51 | Please activate JavaScript to enable the search
52 | functionality.
53 |
54 |
55 |
56 | From here you can search these documents. Enter your search
57 | words into the box below and click "search". Note that the search
58 | function will automatically search for all of the words. Pages
59 | containing fewer words won't appear in the result list.
60 |
103 | Enter search terms or a module, class or function name.
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
119 |
120 |
121 |
122 |
123 |
124 |
--------------------------------------------------------------------------------
/watsongraph/node.py:
--------------------------------------------------------------------------------
1 | from mwviews.api import PageviewsClient
2 | import hashlib
3 | import watsongraph.event_insight_lib
4 |
5 |
6 | class Node:
7 | """
8 | The IBM Watson Concept Insights API is based on a weighted graph of Wikipedia pages. Each vertex in the graph
9 | corresponds to an individual article, referred to as a "node" or "label". This `Node` object class abstracts these
10 | nodes. They are the basis of the `ConceptModel` object.
11 |
12 | Nodes are internal representations which are never exposed to the end user. Developers work with Nodes only
13 | through the overall `ConceptModel`.
14 | """
15 |
16 | """
17 | The name of the Wikipedia article associated with the node, e.g. Apple, Apple Inc., Nirvana (band), etc.
18 | "Label" is the terminology used by the IBM Watson API for this attribute; "Concept" is the terminology used
19 | instead by this library (we are building a `ConceptModel()` not a `LabelModel()`!).
20 | """
21 | concept = ""
22 |
23 | """
24 | A dictionary of arbitrary parameter:value tuples. `view_count` and `relevance` are two such parameters which
25 | have baked-in support, but the point of this abstraction is that the user ought to be able to extend the data saved
26 | in the ConceptModel object however they want to.
27 | """
28 | properties = None
29 |
30 | def __init__(self, concept, **kwargs):
31 | """
32 | :param concept: The concept that is being wrapped by the Node. In Concept Insight terminology this concept
33 | is known as the "label", since it is expected to correspond one-to-one with the exact name of a Wikipedia
34 | article in Watson's `en-20120601` graph. For example, Apple Inc. is a valid concept/label while Apple (
35 | company) is not.
36 |
37 | :param kwargs: A list of property:value tuples to be passed to the `properties` parameter.
38 | """
39 | self.concept = concept
40 | self.properties = kwargs
41 | if not self.properties:
42 | self.properties = dict()
43 |
44 | def __eq__(self, other):
45 | """
46 | Two `Node` objects are equal when their `concept` attributes are the same string.
47 | """
48 | if self and other:
49 | return self.concept == other.concept
50 | else:
51 | return False
52 |
53 | def __hash__(self):
54 | """
55 | Two concepts have an equivalent hash if their labels are equivalent. Comparison-by-hash is overwritten this
56 | way to support `nx.compose()` as used in `conceptmodel.merge_with()`.
57 | """
58 | return int(hashlib.md5(self.concept.encode()).hexdigest(), 16)
59 |
60 | def set_view_count(self):
61 | """
62 | Sets the view_count parameter appropriately, using a 30-day average.
63 | """
64 | p = PageviewsClient().article_views("en.wikipedia", [self.concept.replace(' ', '_')])
65 | p = [p[key][self.concept.replace(' ', '_')] for key in p.keys()]
66 | p = int(sum([daily_view_count for daily_view_count in p if daily_view_count])/len(p))
67 | # self.view_count = p
68 | self.properties['view_count'] = p
69 | print(self.properties['view_count'])
70 |
71 | def set_relevance(self, relevance):
72 | """
73 | :param relevance: Sets the concept's relevance parameter.
74 | """
75 | # self.relevance = relevance
76 | self.set_property('relevance', relevance)
77 |
78 | def get_relevance(self):
79 | """
80 | :return: The concept's relevance parameter.
81 | """
82 | return self.get_property('relevance')
83 |
84 | def set_property(self, prop, value):
85 | """
86 | :param prop: The property to be stored.
87 | :param value: The value being stored.
88 | """
89 | self.properties.update({prop: value})
90 |
91 | def get_property(self, prop):
92 | """
93 | :param prop: The property to be retrieved.
94 | """
95 | return self.properties[prop]
96 |
97 |
98 | def conceptualize(user_input):
99 | """
100 | Attempts to map arbitrary textual input to a valid Concept. If the method is unsuccessful no Concept is
101 | returned. See also the similar `conceptmodel.model` static method, which binds arbitrary input to an entire
102 | ConceptModel instead.
103 |
104 | :param user_input: Arbitrary input, be it a name (e.g. Apple (company) -> Apple Inc.) or a text string (e.g.
105 | "the iPhone 5C, released this Thursday..." -> iPhone).
106 | """
107 | # Fetch the precise name of the node (article title) associated with the institution.
108 | raw_concepts = watsongraph.event_insight_lib.annotate_text(user_input)
109 | # If the correction call is successful, keep going.
110 | if 'annotations' in raw_concepts.keys() and len(raw_concepts['annotations']) != 0:
111 | matched_concept_node_label = raw_concepts['annotations'][0]['concept']['label']
112 | return matched_concept_node_label
113 |
--------------------------------------------------------------------------------
/watsongraph/item.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 | from watsongraph.conceptmodel import ConceptModel
4 | from watsongraph.conceptmodel import model as model_input
5 |
6 | # Every augmentation is the ConceptModel of a new user-indicated Item of interest. I have to come up with some sort
7 | # of mathematically justified way of merging this new Item into the old model: decaying the old nodes and reinforcing
8 | # the overlap.
9 | # Idea: every iteration the existing non-overlapping nodes lose 1/10 of their current relevance. Newly added nodes
10 | # come in at high relevance (.9?). Overlapping elements gain half of the distance between their sum and 1.
11 |
12 | # TODO: compare() method for measuring the overlap between two items.
13 |
14 |
15 | class Item:
16 | """
17 | The Item object is a generic container for the objects that the application is trying to recommend to its users.
18 | """
19 | description = ""
20 | name = ""
21 | model = None
22 |
23 | def __init__(self, name="", description=""):
24 | """
25 | Loads an `Item` object from its description and an associated name.
26 |
27 | :param name: The Item's name.
28 |
29 | :param description: A textual description of what the Item is about or describes. This is mined at
30 | initialization for the concepts which are associated with this Item's ConceptModel().
31 |
32 | """
33 | self.name = name
34 | self.description = description
35 | if len(description) > 0:
36 | self.model = model_input(description)
37 | else:
38 | self.model = ConceptModel()
39 |
40 | def nodes(self):
41 | """
42 | :return: The nodes in the Item model.
43 | """
44 | return self.model.nodes()
45 |
46 | def concepts(self):
47 | """
48 | :return: The concepts in the Item model.
49 | """
50 | return self.model.concepts()
51 |
52 | def relevancies(self):
53 | """
54 | :return: Sorted (relevance, concept) pairs associated with the Item.
55 | """
56 | return sorted([("{0:.3f}".format(node.properties['relevance']), node.concept) for node in self.nodes()],
57 | reverse=True)
58 |
59 | def to_json(self):
60 | """
61 | Returns a JSON of the Item object suitable for storage. Counter-operation to `load_from_json()`.
62 |
63 | :return: A JSON serialization of the Item object which is suitable for storage.
64 | """
65 | return {
66 | "name": self.name,
67 | "model": self.model.to_json(),
68 | "description": self.description,
69 | }
70 |
71 | def load_from_json(self, data):
72 | """
73 | Loads an Item from its JSON serialization. Counter-operation to `to_json()`.
74 |
75 | :param data: The JSON data being loaded into an Item object.
76 | """
77 | self.model.load_from_json(data['model'])
78 | self.description = data['description']
79 |
80 | def save(self, filename='items.json'):
81 | """
82 | Saves the Item to a JSON representation.
83 |
84 | :param filename: The filename for the items storage file; `items.json` is the default.
85 | """
86 | item_schema = self.to_json()
87 | if filename not in [f for f in os.listdir('.') if os.path.isfile(f)]:
88 | new_file_schema = {
89 | "items":
90 | [item_schema]
91 | }
92 | f = open(filename, 'w')
93 | f.write(json.dumps(new_file_schema, indent=4))
94 | f.close()
95 | else:
96 | data = json.load(open(filename))
97 | names = [item['name'] for item in data['items']]
98 | if self.name not in names:
99 | data['items'].append(item_schema)
100 | with open(filename, 'w') as outfile:
101 | json.dump(data, outfile, indent=4)
102 | if self.name in names:
103 | user_index = 0
104 | for i in range(0, len(data['items'])):
105 | if data['items'][i]['name'] == self.name:
106 | user_index = i
107 | break
108 | data['items'][user_index] = item_schema
109 | with open(filename, 'w') as outfile:
110 | json.dump(data, outfile, indent=4)
111 |
112 | def load(self, filename="items.json"):
113 | """
114 | Loads the Item from a JSON representation.
115 |
116 | :param filename: The filename for the items storage file; `items.json` is the default.
117 | """
118 | if filename not in [f for f in os.listdir('.') if os.path.isfile(f)]:
119 | raise IOError("The item definitions file" + filename + " appears to be missing!")
120 | list_of_items = json.load(open(filename))['items']
121 | for item in list_of_items:
122 | if item['name'] != self:
123 | continue
124 | else:
125 | self.load_from_json(item)
126 |
--------------------------------------------------------------------------------
/docs/_build/html/searchindex.js:
--------------------------------------------------------------------------------
1 | Search.setIndex({envversion:47,filenames:["index"],objects:{"":{conceptmodel:[0,0,0,"-"],item:[0,0,0,"-"],user:[0,0,0,"-"]},"conceptmodel.ConceptModel":{"__init__":[0,1,1,""],abridge:[0,1,1,""],add:[0,1,1,""],add_edge:[0,1,1,""],add_edges:[0,1,1,""],augment:[0,1,1,""],concepts:[0,1,1,""],concepts_by_property:[0,1,1,""],concepts_by_view_count:[0,1,1,""],copy:[0,1,1,""],edges:[0,1,1,""],expand:[0,1,1,""],explode:[0,1,1,""],explode_edges:[0,1,1,""],get_view_count:[0,1,1,""],load_from_json:[0,1,1,""],map_property:[0,1,1,""],merge_with:[0,1,1,""],neighborhood:[0,1,1,""],remove:[0,1,1,""],set_property:[0,1,1,""],set_view_counts:[0,1,1,""],to_json:[0,1,1,""]},"item.Item":{"__init__":[0,1,1,""],concepts:[0,1,1,""],load:[0,1,1,""],load_from_json:[0,1,1,""],relevancies:[0,1,1,""],save:[0,1,1,""],to_json:[0,1,1,""]},"user.User":{"__init__":[0,1,1,""],concepts:[0,1,1,""],delete_user:[0,1,1,""],express_disinterest:[0,1,1,""],express_interest:[0,1,1,""],get_best_item:[0,1,1,""],input_interest:[0,1,1,""],input_interests:[0,1,1,""],interest_in:[0,1,1,""],interests:[0,1,1,""],load_user:[0,1,1,""],save_user:[0,1,1,""],update_user_credentials:[0,1,1,""]},conceptmodel:{ConceptModel:[0,2,1,""]},item:{Item:[0,2,1,""]},user:{User:[0,2,1,""]}},objnames:{"0":["py","module","Python module"],"1":["py","method","Python method"],"2":["py","class","Python class"]},objtypes:{"0":"py:module","1":"py:method","2":"py:class"},terms:{"0x00000000042ec240":[],"0x00000000043e7cc0":[],"0x00000000060a73c8":[],"0x00000000060a9a90":[],"0x00000000060b8588":[],"0x0000000006107b00":[],"0x000000000619a780":[],"0x00000000061b2f98":[],"0x00000000061c5f98":[],"0x00000000061d1dd8":[],"0x00000000061d2438":[],"0x00000000061f0630":[],"__hash__":0,"__init__":0,"abstract":0,"case":0,"class":0,"default":0,"float":0,"function":0,"int":0,"return":0,"throw":0,"true":0,"try":0,"while":0,about:0,abov:[],abridg:0,abridge_by_nod:0,access:[],account:0,act:0,adapt:[],add:0,add_edg:0,adder:0,addit:[],against:[],all:0,alon:0,alreadi:0,also:0,altern:[],analyz:[],ani:0,annot:[],annotate_text:[],anoth:0,api:0,appl:0,applic:0,arbitrari:0,around:0,articl:0,assign:0,associ:0,augment:0,augment_by_nod:0,auto:0,autoclass:[],averag:[],awfjwajfalwnfa:[],back:0,base:0,basic:0,been:0,best:0,between:0,bind:0,bluemix:[],both:0,broadest:0,cachet:0,call:0,can:0,caus:0,chang:[],check:[],chosen:0,cognit:[],compar:0,comparison:0,complet:[],compos:0,concept:0,conceptmodel:0,concepts_by_properti:0,concepts_by_view_count:0,conceptu:[],connect:0,consid:0,construct:[],contain:0,content:0,content_typ:[],convert_concept_to_dict:[],copi:0,core:0,correl:0,correspond:0,could:[],count:[],counter:0,creat:0,credenti:[],current:0,cutoff:0,data:0,data_repr:0,debug:[],deep:0,delet:0,delete_us:0,depth:0,describ:0,descript:0,determin:0,develop:[],dictionari:0,differ:0,direct:[],directli:0,discov:0,disinterest:0,distort:0,doe:0,don:0,drawn:0,each:0,edg:0,either:0,els:[],empti:0,enough:0,entir:0,equal:0,error:0,etc:[],even:0,event:0,event_insight_lib:[],everi:0,everyth:0,examin:0,exampl:0,except:0,execut:[],exist:[],expand:0,expans:0,expect:0,experiment:[],expir:[],explicitli:0,explod:0,explode_edg:0,explos:[],express:0,express_disinterest:0,express_interest:0,extend:[],extens:0,extern:0,face:0,fail:0,fals:0,fast:[],fblwkaf:[],fetch:[],field:[],file:0,filenam:0,fine:0,focu:0,focus:0,fold:0,found:0,from:0,func:0,gener:0,generatetoken:[],genindex:[],get:0,get_best_item:0,get_nod:[],get_related_concept:[],get_relation_scor:[],get_token:[],get_view_count:0,giant:[],given:0,graph:0,hash:0,have:0,here:0,higher:0,highest:0,hour:[],html:[],human:0,hypothes:0,ibm:0,implement:[],inc:0,incur:[],index:0,individu:[],inform:0,init:[],initi:0,input:0,input_interest:0,insight:[],interest:0,interest_in:0,intern:0,intersect:0,intersection_with:[],intersection_with_by_nod:[],invers:0,issu:0,item:0,item_list:0,itself:0,jan:[],json:0,just:[],know:0,label:0,label_search:[],lack:0,larg:0,least:[],less:0,level:0,librari:0,like:0,limit:0,list:0,list_of_concept:0,list_of_target_concept:0,list_of_target_label:[],load:0,load_from_dict:0,load_from_json:0,load_us:0,local:[],lonely_concept:0,made:[],make:[],manipul:0,map:0,map_properti:0,master:[],match:0,maxdepth:[],meant:[],member:[],merg:0,merge_with:0,method:0,methodolog:[],microsoft:0,might:0,mine:0,minimum:0,mixin_concept_model:0,model:0,modindex:[],modul:0,more:0,most:0,much:0,must:[],name:0,need:0,neighbor:0,neighborhood:0,network:[],networkx:[],newli:0,node:0,none:0,note:0,now:[],number:0,oawbf:[],object:0,onc:0,onli:0,oper:0,option:[],order:0,other:0,otherwis:0,out:0,output:0,overhead:[],overlap:[],overwritten:0,own:0,page:0,pair:0,param:0,paramet:0,particular:0,pass:0,password:0,perform:0,place:0,plain:[],plaintext:[],plu:0,point:0,popular:0,precis:[],prefer:0,present:0,pretti:[],previou:[],primari:[],print:[],print_concept:[],print_edg:[],probabilist:[],process:[],prop:0,properti:0,provid:0,prune:0,python:[],queri:[],quickstart:[],rang:[],rate:0,raw:[],recip:[],recommend:0,reconstruct:[],redund:[],ref:[],relat:0,relation_scor:[],relev:0,relevance_edg:0,remov:0,repeatedli:[],repres:[],represent:0,requir:[],resolv:0,result:0,retriev:0,reus:[],root:[],same:0,save:0,save_us:0,scale:0,score:[],search:0,see:0,self:0,seper:0,seri:0,serial:0,servic:0,set:0,set_properti:0,set_view_count:0,should:[],simpl:0,singl:0,sizabl:0,slow:0,slower:[],sort:0,sourc:0,source_concept:0,sphinx:[],stall:[],standalon:[],still:[],storag:0,store:0,strength:0,string:0,submethod:[],suffici:0,suitabl:0,supposedli:0,system:0,take:0,talk:[],target:0,target_concept:0,team:[],terminolog:[],test:[],text:[],textual:0,than:0,thei:0,them:0,thi:0,though:0,throughout:[],to_dict:0,to_json:0,toctre:[],token:[],token_fil:[],total:0,tue:[],tupl:0,turn:0,uniqu:0,unnecessari:[],unprocess:[],updat:0,update_user_credenti:0,upon:0,user:0,user_id:0,valid:[],validatetoken:[],valu:0,veri:0,version:0,vertic:[],view:0,view_count:0,wai:0,want:[],warn:0,watson:0,weight:[],what:0,whatev:[],when:0,which:0,whom:[],whose:0,why:[],widest:0,wikipedia:0,within:0,without:0,work:0,would:[],wrap:[],wrapper:0,you:0,your:[]},titles:["Welcome to watsongraph’s documentation!"],titleterms:{document:0,indic:0,tabl:0,watsongraph:0,welcom:0}})
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](http://watsongraph.readthedocs.org/en/latest/?badge=latest)
2 | [](https://pypi.python.org/pypi/watsongraph/)
3 |
4 | # watsongraph
5 |
6 | 
7 |
8 | `watsongraph` is a concept discovery, graphing, and processing library written in `Python 3`. The library's
9 | core facility is the `ConceptModel` object, a conceptual graph constructed out of the individual concept nodes
10 | associated with labels from the IBM Watson `wikipedia/en-20120601` Wikipedia-derived conceptual graph. This graph is
11 | queried using the [Concept Insights API](http://www.ibm.com/smarterplanet/us/en/ibmwatson/developercloud/concept-insights.html)
12 | and then reconstructed locally as a `networkx`-based weighted conceptual graph:
13 |
14 | ```
15 | >>> from watsongraph.conceptmodel import ConceptModel
16 | >>> ibm = ConceptModel(['IBM'])
17 | >>> ibm.explode()
18 | >>> ibm.concepts()
19 | ['.NET Framework', 'ARM architecture', 'Advanced Micro Devices', ...]
20 | >>> len(ibm.concepts())
21 | 37
22 | >>> 'Server (computing)' in ibm.concepts()
23 | True
24 | >>> ibm.augment('Server (computing)')
25 | >>> len(ibm.concepts())
26 | 58
27 | >>> ibm.edges()
28 | [(0.89564085, 'IBM', 'Digital Equipment Corporation'),
29 | (0.8793883, 'Solaris (operating system)', 'Server (computing)'),
30 | ...
31 | ]
32 |
33 | ```
34 |
35 | The `ConceptModel` can then be associated with any number of applications. Basic bindings are provided, in
36 | particular, for a recommendation service using library-provided `Item` and `User` classes:
37 |
38 | ```
39 | >>> from watsongraph.user import User
40 | >>> from watsongraph.item import Item
41 | >>> Bob = User(user_id="Bob")
42 | >>> Bob.input_interests(["Data science", "Machine learning", "Big data", "Cognitive science"])
43 | >>> meetup = Item("Meetup", "This is a description of a pretty awesome event...")
44 | >>> relay = Item("Relay", "This is a description of another pretty awesome event...")
45 | >>> Bob.interest_in(meetup)
46 | 1.633861635
47 | >>> Bob.interest_in(relay)
48 | 1.54593405
49 | # Update the "Bob" model to account for our new information on Bob's preferences.
50 | >>> Bob.express_interest(meetup)
51 | ```
52 |
53 | ## Setup
54 |
55 | `watsongraph` is [available on PyPi](https://pypi.python.org/pypi/watsongraph/) and can be downloaded locally with `pip
56 | install watsongraph`.
57 |
58 | However, in order to use IBM Watson cognitive APIs you **must** first register an account on
59 | [IBM Bluemix](https://console.ng.bluemix.net/). If you do not
60 | have an account already you may [register](https://console.ng.bluemix.net/registration/) for a free trial account.
61 |
62 | Once you are logged in, enter the catalog, scroll down to the "IBM Watson" section, and click through to create an
63 | instance of the
64 | [Concept Insights](http://www.ibm.com/smarterplanet/us/en/ibmwatson/developercloud/concept-insights.html) service. Go
65 | back to the dashboard, click on the newly populated service, and click through to "Service Credentials" on the
66 | sidebar to get your service credentials: copy-paste this `json` output and save it locally as
67 | `concept_insight_credentials.json`. Your credentials should look like this:
68 |
69 | ```
70 | {
71 | "credentials": {
72 | "url": "https://gateway.watsonplatform.net/concept-insights/api",
73 | "username": "........-....-....-....-............",
74 | "password": "............"
75 | }
76 | }
77 | ```
78 |
79 | Account access is provided on a thirty-day free trial basis by default, however there is free monthly allotment
80 | (25,000 queries), more than enough for experimental purposes.
81 |
82 | ## Documentation and examples
83 |
84 | * "[Exploring the IBM Watson Concept Insights service using watsongraph](http://www.residentmar.io/2016/02/11/watsongraph-visualization.html)"
85 | is a blog post on my personal website which explores the capacities and use cases for the `watsongraph` library. If
86 | you are curious about how it works, the visualizations here are the best place to start!
87 | * The [ConceptModel Jupyter notebook](http://nbviewer.jupyter.org/github/ResidentMario/watsongraph/blob/master/watsongraph%20-%20Concept%20Modeling.ipynb)
88 | provides a detailed walkthrough of basic `ConceptModel` operations. To learn how to use this library, start here,
89 | then move on to the two notebooks below.
90 | * The [Advanced Concept Modeling Jupyter notebook](http://nbviewer.jupyter.org/github/ResidentMario/watsongraph/blob/master/watsongraph%20-%20Advanced%20Concept%20Modeling.ipynb)
91 | provides a detailed walkthrough of advanced `ConceptModel` features as well as recommendations about how to use them
92 | for modeling.
93 | * The [Recommendations Modeling Jupyter notebook](http://nbviewer.jupyter.org/github/ResidentMario/watsongraph/blob/master/watsongraph%20-%20Recommendations.ipynb)
94 | applies `watsongraph` to user recommendation modeling.
95 | * The [Sphinx documentation](http://watsongraph.readthedocs.org/en/latest/) is the reference manual for all
96 | `watsongraph` methods.
97 | * For further inspiration you can also try out IBM's own
98 | [example application](https://concept-insights-demo.mybluemix.net/) (which predates this library).
99 |
100 | ## Contributing
101 |
102 | The `watsongraph` library is currently in its first stable release, so it is still in a fairly early state of
103 | development: there are quite a large number of improvements and new features which could potentially be made. At the
104 | moment I am waiting for work to finish on the [Watson Developer Cloud Python SDK](https://github.com/watson-developer-cloud/python-sdk)
105 | so that I can make a large volume of low-level architectural improvements (and add a few new features) for the next
106 | planned stable release, `0.3.0`. You can see the milestone composite issues in this repository's
107 | [issue tracker](https://github.com/ResidentMario/watsongraph/issues?q=is%3Aopen+is%3Aissue+milestone%3A0.3.0).
108 |
109 | To pull the latest build onto your development machine, [clone](https://help.github.com/articles/cloning-a-repository/) this repository
110 | (`git clone https://github.com/ResidentMario/cultural-insight.git`) and follow the instructions in [setup](#Setup) to
111 | populate your access credentials.
112 |
113 | To submit a minor fix just submit a [pull request](https://help.github.com/articles/using-pull-requests/). Be sure
114 | to explain what problem your change addresses!
115 |
116 | If you are interested in contributing new features or major enhancements, we should talk! You can submit an [issue](https://guides.github.com/features/issues/)
117 | or [pull request](https://help.github.com/articles/using-pull-requests/) summarizing the work using the "Enhancement"
118 | label. You can also [filter](https://github.com/ResidentMario/watsongraph/labels/enhancement)
119 | to enhancements to see what's already on the radar.
120 |
121 | I am very receptive to feedback and would defintely like to see this code reviewed by others, you can reach out to me
122 | at `aleksey@residentmar.io`.
123 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | REM Command file for Sphinx documentation
4 |
5 | if "%SPHINXBUILD%" == "" (
6 | set SPHINXBUILD=sphinx-build
7 | )
8 | set BUILDDIR=_build
9 | set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
10 | set I18NSPHINXOPTS=%SPHINXOPTS% .
11 | if NOT "%PAPER%" == "" (
12 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
13 | set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
14 | )
15 |
16 | if "%1" == "" goto help
17 |
18 | if "%1" == "help" (
19 | :help
20 | echo.Please use `make ^` where ^ is one of
21 | echo. html to make standalone HTML files
22 | echo. dirhtml to make HTML files named index.html in directories
23 | echo. singlehtml to make a single large HTML file
24 | echo. pickle to make pickle files
25 | echo. json to make JSON files
26 | echo. htmlhelp to make HTML files and a HTML help project
27 | echo. qthelp to make HTML files and a qthelp project
28 | echo. devhelp to make HTML files and a Devhelp project
29 | echo. epub to make an epub
30 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
31 | echo. text to make text files
32 | echo. man to make manual pages
33 | echo. texinfo to make Texinfo files
34 | echo. gettext to make PO message catalogs
35 | echo. changes to make an overview over all changed/added/deprecated items
36 | echo. xml to make Docutils-native XML files
37 | echo. pseudoxml to make pseudoxml-XML files for display purposes
38 | echo. linkcheck to check all external links for integrity
39 | echo. doctest to run all doctests embedded in the documentation if enabled
40 | echo. coverage to run coverage check of the documentation if enabled
41 | goto end
42 | )
43 |
44 | if "%1" == "clean" (
45 | for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
46 | del /q /s %BUILDDIR%\*
47 | goto end
48 | )
49 |
50 |
51 | REM Check if sphinx-build is available and fallback to Python version if any
52 | %SPHINXBUILD% 1>NUL 2>NUL
53 | if errorlevel 9009 goto sphinx_python
54 | goto sphinx_ok
55 |
56 | :sphinx_python
57 |
58 | set SPHINXBUILD=python -m sphinx.__init__
59 | %SPHINXBUILD% 2> nul
60 | if errorlevel 9009 (
61 | echo.
62 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
63 | echo.installed, then set the SPHINXBUILD environment variable to point
64 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
65 | echo.may add the Sphinx directory to PATH.
66 | echo.
67 | echo.If you don't have Sphinx installed, grab it from
68 | echo.http://sphinx-doc.org/
69 | exit /b 1
70 | )
71 |
72 | :sphinx_ok
73 |
74 |
75 | if "%1" == "html" (
76 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
77 | if errorlevel 1 exit /b 1
78 | echo.
79 | echo.Build finished. The HTML pages are in %BUILDDIR%/html.
80 | goto end
81 | )
82 |
83 | if "%1" == "dirhtml" (
84 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
85 | if errorlevel 1 exit /b 1
86 | echo.
87 | echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
88 | goto end
89 | )
90 |
91 | if "%1" == "singlehtml" (
92 | %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
93 | if errorlevel 1 exit /b 1
94 | echo.
95 | echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
96 | goto end
97 | )
98 |
99 | if "%1" == "pickle" (
100 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
101 | if errorlevel 1 exit /b 1
102 | echo.
103 | echo.Build finished; now you can process the pickle files.
104 | goto end
105 | )
106 |
107 | if "%1" == "json" (
108 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
109 | if errorlevel 1 exit /b 1
110 | echo.
111 | echo.Build finished; now you can process the JSON files.
112 | goto end
113 | )
114 |
115 | if "%1" == "htmlhelp" (
116 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
117 | if errorlevel 1 exit /b 1
118 | echo.
119 | echo.Build finished; now you can run HTML Help Workshop with the ^
120 | .hhp project file in %BUILDDIR%/htmlhelp.
121 | goto end
122 | )
123 |
124 | if "%1" == "qthelp" (
125 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
126 | if errorlevel 1 exit /b 1
127 | echo.
128 | echo.Build finished; now you can run "qcollectiongenerator" with the ^
129 | .qhcp project file in %BUILDDIR%/qthelp, like this:
130 | echo.^> qcollectiongenerator %BUILDDIR%\qthelp\watsongraph.qhcp
131 | echo.To view the help file:
132 | echo.^> assistant -collectionFile %BUILDDIR%\qthelp\watsongraph.ghc
133 | goto end
134 | )
135 |
136 | if "%1" == "devhelp" (
137 | %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
138 | if errorlevel 1 exit /b 1
139 | echo.
140 | echo.Build finished.
141 | goto end
142 | )
143 |
144 | if "%1" == "epub" (
145 | %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
146 | if errorlevel 1 exit /b 1
147 | echo.
148 | echo.Build finished. The epub file is in %BUILDDIR%/epub.
149 | goto end
150 | )
151 |
152 | if "%1" == "latex" (
153 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
154 | if errorlevel 1 exit /b 1
155 | echo.
156 | echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
157 | goto end
158 | )
159 |
160 | if "%1" == "latexpdf" (
161 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
162 | cd %BUILDDIR%/latex
163 | make all-pdf
164 | cd %~dp0
165 | echo.
166 | echo.Build finished; the PDF files are in %BUILDDIR%/latex.
167 | goto end
168 | )
169 |
170 | if "%1" == "latexpdfja" (
171 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
172 | cd %BUILDDIR%/latex
173 | make all-pdf-ja
174 | cd %~dp0
175 | echo.
176 | echo.Build finished; the PDF files are in %BUILDDIR%/latex.
177 | goto end
178 | )
179 |
180 | if "%1" == "text" (
181 | %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
182 | if errorlevel 1 exit /b 1
183 | echo.
184 | echo.Build finished. The text files are in %BUILDDIR%/text.
185 | goto end
186 | )
187 |
188 | if "%1" == "man" (
189 | %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
190 | if errorlevel 1 exit /b 1
191 | echo.
192 | echo.Build finished. The manual pages are in %BUILDDIR%/man.
193 | goto end
194 | )
195 |
196 | if "%1" == "texinfo" (
197 | %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
198 | if errorlevel 1 exit /b 1
199 | echo.
200 | echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
201 | goto end
202 | )
203 |
204 | if "%1" == "gettext" (
205 | %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
206 | if errorlevel 1 exit /b 1
207 | echo.
208 | echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
209 | goto end
210 | )
211 |
212 | if "%1" == "changes" (
213 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
214 | if errorlevel 1 exit /b 1
215 | echo.
216 | echo.The overview file is in %BUILDDIR%/changes.
217 | goto end
218 | )
219 |
220 | if "%1" == "linkcheck" (
221 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
222 | if errorlevel 1 exit /b 1
223 | echo.
224 | echo.Link check complete; look for any errors in the above output ^
225 | or in %BUILDDIR%/linkcheck/output.txt.
226 | goto end
227 | )
228 |
229 | if "%1" == "doctest" (
230 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
231 | if errorlevel 1 exit /b 1
232 | echo.
233 | echo.Testing of doctests in the sources finished, look at the ^
234 | results in %BUILDDIR%/doctest/output.txt.
235 | goto end
236 | )
237 |
238 | if "%1" == "coverage" (
239 | %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage
240 | if errorlevel 1 exit /b 1
241 | echo.
242 | echo.Testing of coverage in the sources finished, look at the ^
243 | results in %BUILDDIR%/coverage/python.txt.
244 | goto end
245 | )
246 |
247 | if "%1" == "xml" (
248 | %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml
249 | if errorlevel 1 exit /b 1
250 | echo.
251 | echo.Build finished. The XML files are in %BUILDDIR%/xml.
252 | goto end
253 | )
254 |
255 | if "%1" == "pseudoxml" (
256 | %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml
257 | if errorlevel 1 exit /b 1
258 | echo.
259 | echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.
260 | goto end
261 | )
262 |
263 | :end
264 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | PAPER =
8 | BUILDDIR = _build
9 |
10 | # User-friendly check for sphinx-build
11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
13 | endif
14 |
15 | # Internal variables.
16 | PAPEROPT_a4 = -D latex_paper_size=a4
17 | PAPEROPT_letter = -D latex_paper_size=letter
18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
19 | # the i18n builder cannot share the environment and doctrees with the others
20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
21 |
22 | .PHONY: help
23 | help:
24 | @echo "Please use \`make ' where is one of"
25 | @echo " html to make standalone HTML files"
26 | @echo " dirhtml to make HTML files named index.html in directories"
27 | @echo " singlehtml to make a single large HTML file"
28 | @echo " pickle to make pickle files"
29 | @echo " json to make JSON files"
30 | @echo " htmlhelp to make HTML files and a HTML help project"
31 | @echo " qthelp to make HTML files and a qthelp project"
32 | @echo " applehelp to make an Apple Help Book"
33 | @echo " devhelp to make HTML files and a Devhelp project"
34 | @echo " epub to make an epub"
35 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
36 | @echo " latexpdf to make LaTeX files and run them through pdflatex"
37 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
38 | @echo " text to make text files"
39 | @echo " man to make manual pages"
40 | @echo " texinfo to make Texinfo files"
41 | @echo " info to make Texinfo files and run them through makeinfo"
42 | @echo " gettext to make PO message catalogs"
43 | @echo " changes to make an overview of all changed/added/deprecated items"
44 | @echo " xml to make Docutils-native XML files"
45 | @echo " pseudoxml to make pseudoxml-XML files for display purposes"
46 | @echo " linkcheck to check all external links for integrity"
47 | @echo " doctest to run all doctests embedded in the documentation (if enabled)"
48 | @echo " coverage to run coverage check of the documentation (if enabled)"
49 |
50 | .PHONY: clean
51 | clean:
52 | rm -rf $(BUILDDIR)/*
53 |
54 | .PHONY: html
55 | html:
56 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
57 | @echo
58 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
59 |
60 | .PHONY: dirhtml
61 | dirhtml:
62 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
63 | @echo
64 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
65 |
66 | .PHONY: singlehtml
67 | singlehtml:
68 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
69 | @echo
70 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
71 |
72 | .PHONY: pickle
73 | pickle:
74 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
75 | @echo
76 | @echo "Build finished; now you can process the pickle files."
77 |
78 | .PHONY: json
79 | json:
80 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
81 | @echo
82 | @echo "Build finished; now you can process the JSON files."
83 |
84 | .PHONY: htmlhelp
85 | htmlhelp:
86 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
87 | @echo
88 | @echo "Build finished; now you can run HTML Help Workshop with the" \
89 | ".hhp project file in $(BUILDDIR)/htmlhelp."
90 |
91 | .PHONY: qthelp
92 | qthelp:
93 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
94 | @echo
95 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \
96 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
97 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/watsongraph.qhcp"
98 | @echo "To view the help file:"
99 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/watsongraph.qhc"
100 |
101 | .PHONY: applehelp
102 | applehelp:
103 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp
104 | @echo
105 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp."
106 | @echo "N.B. You won't be able to view it unless you put it in" \
107 | "~/Library/Documentation/Help or install it in your application" \
108 | "bundle."
109 |
110 | .PHONY: devhelp
111 | devhelp:
112 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
113 | @echo
114 | @echo "Build finished."
115 | @echo "To view the help file:"
116 | @echo "# mkdir -p $$HOME/.local/share/devhelp/watsongraph"
117 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/watsongraph"
118 | @echo "# devhelp"
119 |
120 | .PHONY: epub
121 | epub:
122 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
123 | @echo
124 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
125 |
126 | .PHONY: latex
127 | latex:
128 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
129 | @echo
130 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
131 | @echo "Run \`make' in that directory to run these through (pdf)latex" \
132 | "(use \`make latexpdf' here to do that automatically)."
133 |
134 | .PHONY: latexpdf
135 | latexpdf:
136 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
137 | @echo "Running LaTeX files through pdflatex..."
138 | $(MAKE) -C $(BUILDDIR)/latex all-pdf
139 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
140 |
141 | .PHONY: latexpdfja
142 | latexpdfja:
143 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
144 | @echo "Running LaTeX files through platex and dvipdfmx..."
145 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
146 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
147 |
148 | .PHONY: text
149 | text:
150 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
151 | @echo
152 | @echo "Build finished. The text files are in $(BUILDDIR)/text."
153 |
154 | .PHONY: man
155 | man:
156 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
157 | @echo
158 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
159 |
160 | .PHONY: texinfo
161 | texinfo:
162 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
163 | @echo
164 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
165 | @echo "Run \`make' in that directory to run these through makeinfo" \
166 | "(use \`make info' here to do that automatically)."
167 |
168 | .PHONY: info
169 | info:
170 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
171 | @echo "Running Texinfo files through makeinfo..."
172 | make -C $(BUILDDIR)/texinfo info
173 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
174 |
175 | .PHONY: gettext
176 | gettext:
177 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
178 | @echo
179 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
180 |
181 | .PHONY: changes
182 | changes:
183 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
184 | @echo
185 | @echo "The overview file is in $(BUILDDIR)/changes."
186 |
187 | .PHONY: linkcheck
188 | linkcheck:
189 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
190 | @echo
191 | @echo "Link check complete; look for any errors in the above output " \
192 | "or in $(BUILDDIR)/linkcheck/output.txt."
193 |
194 | .PHONY: doctest
195 | doctest:
196 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
197 | @echo "Testing of doctests in the sources finished, look at the " \
198 | "results in $(BUILDDIR)/doctest/output.txt."
199 |
200 | .PHONY: coverage
201 | coverage:
202 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
203 | @echo "Testing of coverage in the sources finished, look at the " \
204 | "results in $(BUILDDIR)/coverage/python.txt."
205 |
206 | .PHONY: xml
207 | xml:
208 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
209 | @echo
210 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml."
211 |
212 | .PHONY: pseudoxml
213 | pseudoxml:
214 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
215 | @echo
216 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
217 |
--------------------------------------------------------------------------------
/docs/_build/html/_static/doctools.js:
--------------------------------------------------------------------------------
1 | /*
2 | * doctools.js
3 | * ~~~~~~~~~~~
4 | *
5 | * Sphinx JavaScript utilities for all documentation.
6 | *
7 | * :copyright: Copyright 2007-2015 by the Sphinx team, see AUTHORS.
8 | * :license: BSD, see LICENSE for details.
9 | *
10 | */
11 |
12 | /**
13 | * select a different prefix for underscore
14 | */
15 | $u = _.noConflict();
16 |
17 | /**
18 | * make the code below compatible with browsers without
19 | * an installed firebug like debugger
20 | if (!window.console || !console.firebug) {
21 | var names = ["log", "debug", "info", "warn", "error", "assert", "dir",
22 | "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace",
23 | "profile", "profileEnd"];
24 | window.console = {};
25 | for (var i = 0; i < names.length; ++i)
26 | window.console[names[i]] = function() {};
27 | }
28 | */
29 |
30 | /**
31 | * small helper function to urldecode strings
32 | */
33 | jQuery.urldecode = function(x) {
34 | return decodeURIComponent(x).replace(/\+/g, ' ');
35 | };
36 |
37 | /**
38 | * small helper function to urlencode strings
39 | */
40 | jQuery.urlencode = encodeURIComponent;
41 |
42 | /**
43 | * This function returns the parsed url parameters of the
44 | * current request. Multiple values per key are supported,
45 | * it will always return arrays of strings for the value parts.
46 | */
47 | jQuery.getQueryParameters = function(s) {
48 | if (typeof s == 'undefined')
49 | s = document.location.search;
50 | var parts = s.substr(s.indexOf('?') + 1).split('&');
51 | var result = {};
52 | for (var i = 0; i < parts.length; i++) {
53 | var tmp = parts[i].split('=', 2);
54 | var key = jQuery.urldecode(tmp[0]);
55 | var value = jQuery.urldecode(tmp[1]);
56 | if (key in result)
57 | result[key].push(value);
58 | else
59 | result[key] = [value];
60 | }
61 | return result;
62 | };
63 |
64 | /**
65 | * highlight a given string on a jquery object by wrapping it in
66 | * span elements with the given class name.
67 | */
68 | jQuery.fn.highlightText = function(text, className) {
69 | function highlight(node) {
70 | if (node.nodeType == 3) {
71 | var val = node.nodeValue;
72 | var pos = val.toLowerCase().indexOf(text);
73 | if (pos >= 0 && !jQuery(node.parentNode).hasClass(className)) {
74 | var span = document.createElement("span");
75 | span.className = className;
76 | span.appendChild(document.createTextNode(val.substr(pos, text.length)));
77 | node.parentNode.insertBefore(span, node.parentNode.insertBefore(
78 | document.createTextNode(val.substr(pos + text.length)),
79 | node.nextSibling));
80 | node.nodeValue = val.substr(0, pos);
81 | }
82 | }
83 | else if (!jQuery(node).is("button, select, textarea")) {
84 | jQuery.each(node.childNodes, function() {
85 | highlight(this);
86 | });
87 | }
88 | }
89 | return this.each(function() {
90 | highlight(this);
91 | });
92 | };
93 |
94 | /*
95 | * backward compatibility for jQuery.browser
96 | * This will be supported until firefox bug is fixed.
97 | */
98 | if (!jQuery.browser) {
99 | jQuery.uaMatch = function(ua) {
100 | ua = ua.toLowerCase();
101 |
102 | var match = /(chrome)[ \/]([\w.]+)/.exec(ua) ||
103 | /(webkit)[ \/]([\w.]+)/.exec(ua) ||
104 | /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) ||
105 | /(msie) ([\w.]+)/.exec(ua) ||
106 | ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) ||
107 | [];
108 |
109 | return {
110 | browser: match[ 1 ] || "",
111 | version: match[ 2 ] || "0"
112 | };
113 | };
114 | jQuery.browser = {};
115 | jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true;
116 | }
117 |
118 | /**
119 | * Small JavaScript module for the documentation.
120 | */
121 | var Documentation = {
122 |
123 | init : function() {
124 | this.fixFirefoxAnchorBug();
125 | this.highlightSearchWords();
126 | this.initIndexTable();
127 | },
128 |
129 | /**
130 | * i18n support
131 | */
132 | TRANSLATIONS : {},
133 | PLURAL_EXPR : function(n) { return n == 1 ? 0 : 1; },
134 | LOCALE : 'unknown',
135 |
136 | // gettext and ngettext don't access this so that the functions
137 | // can safely bound to a different name (_ = Documentation.gettext)
138 | gettext : function(string) {
139 | var translated = Documentation.TRANSLATIONS[string];
140 | if (typeof translated == 'undefined')
141 | return string;
142 | return (typeof translated == 'string') ? translated : translated[0];
143 | },
144 |
145 | ngettext : function(singular, plural, n) {
146 | var translated = Documentation.TRANSLATIONS[singular];
147 | if (typeof translated == 'undefined')
148 | return (n == 1) ? singular : plural;
149 | return translated[Documentation.PLURALEXPR(n)];
150 | },
151 |
152 | addTranslations : function(catalog) {
153 | for (var key in catalog.messages)
154 | this.TRANSLATIONS[key] = catalog.messages[key];
155 | this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')');
156 | this.LOCALE = catalog.locale;
157 | },
158 |
159 | /**
160 | * add context elements like header anchor links
161 | */
162 | addContextElements : function() {
163 | $('div[id] > :header:first').each(function() {
164 | $('\u00B6').
165 | attr('href', '#' + this.id).
166 | attr('title', _('Permalink to this headline')).
167 | appendTo(this);
168 | });
169 | $('dt[id]').each(function() {
170 | $('\u00B6').
171 | attr('href', '#' + this.id).
172 | attr('title', _('Permalink to this definition')).
173 | appendTo(this);
174 | });
175 | },
176 |
177 | /**
178 | * workaround a firefox stupidity
179 | * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075
180 | */
181 | fixFirefoxAnchorBug : function() {
182 | if (document.location.hash)
183 | window.setTimeout(function() {
184 | document.location.href += '';
185 | }, 10);
186 | },
187 |
188 | /**
189 | * highlight the search words provided in the url in the text
190 | */
191 | highlightSearchWords : function() {
192 | var params = $.getQueryParameters();
193 | var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : [];
194 | if (terms.length) {
195 | var body = $('div.body');
196 | if (!body.length) {
197 | body = $('body');
198 | }
199 | window.setTimeout(function() {
200 | $.each(terms, function() {
201 | body.highlightText(this.toLowerCase(), 'highlighted');
202 | });
203 | }, 10);
204 | $('
').appendTo(this.out);
304 |
305 | $('#search-progress').text(_('Preparing search...'));
306 | this.startPulse();
307 |
308 | // index already loaded, the browser was quick!
309 | if (this.hasIndex())
310 | this.query(query);
311 | else
312 | this.deferQuery(query);
313 | },
314 |
315 | /**
316 | * execute search (requires search index to be loaded)
317 | */
318 | query : function(query) {
319 | var i;
320 | var stopwords = ["a","and","are","as","at","be","but","by","for","if","in","into","is","it","near","no","not","of","on","or","such","that","the","their","then","there","these","they","this","to","was","will","with"];
321 |
322 | // stem the searchterms and add them to the correct list
323 | var stemmer = new Stemmer();
324 | var searchterms = [];
325 | var excluded = [];
326 | var hlterms = [];
327 | var tmp = query.split(/\s+/);
328 | var objectterms = [];
329 | for (i = 0; i < tmp.length; i++) {
330 | if (tmp[i] !== "") {
331 | objectterms.push(tmp[i].toLowerCase());
332 | }
333 |
334 | if ($u.indexOf(stopwords, tmp[i].toLowerCase()) != -1 || tmp[i].match(/^\d+$/) ||
335 | tmp[i] === "") {
336 | // skip this "word"
337 | continue;
338 | }
339 | // stem the word
340 | var word = stemmer.stemWord(tmp[i].toLowerCase());
341 | var toAppend;
342 | // select the correct list
343 | if (word[0] == '-') {
344 | toAppend = excluded;
345 | word = word.substr(1);
346 | }
347 | else {
348 | toAppend = searchterms;
349 | hlterms.push(tmp[i].toLowerCase());
350 | }
351 | // only add if not already in the list
352 | if (!$u.contains(toAppend, word))
353 | toAppend.push(word);
354 | }
355 | var highlightstring = '?highlight=' + $.urlencode(hlterms.join(" "));
356 |
357 | // console.debug('SEARCH: searching for:');
358 | // console.info('required: ', searchterms);
359 | // console.info('excluded: ', excluded);
360 |
361 | // prepare search
362 | var terms = this._index.terms;
363 | var titleterms = this._index.titleterms;
364 |
365 | // array of [filename, title, anchor, descr, score]
366 | var results = [];
367 | $('#search-progress').empty();
368 |
369 | // lookup as object
370 | for (i = 0; i < objectterms.length; i++) {
371 | var others = [].concat(objectterms.slice(0, i),
372 | objectterms.slice(i+1, objectterms.length));
373 | results = results.concat(this.performObjectSearch(objectterms[i], others));
374 | }
375 |
376 | // lookup as search terms in fulltext
377 | results = results.concat(this.performTermsSearch(searchterms, excluded, terms, titleterms));
378 |
379 | // let the scorer override scores with a custom scoring function
380 | if (Scorer.score) {
381 | for (i = 0; i < results.length; i++)
382 | results[i][4] = Scorer.score(results[i]);
383 | }
384 |
385 | // now sort the results by score (in opposite order of appearance, since the
386 | // display function below uses pop() to retrieve items) and then
387 | // alphabetically
388 | results.sort(function(a, b) {
389 | var left = a[4];
390 | var right = b[4];
391 | if (left > right) {
392 | return 1;
393 | } else if (left < right) {
394 | return -1;
395 | } else {
396 | // same score: sort alphabetically
397 | left = a[1].toLowerCase();
398 | right = b[1].toLowerCase();
399 | return (left > right) ? -1 : ((left < right) ? 1 : 0);
400 | }
401 | });
402 |
403 | // for debugging
404 | //Search.lastresults = results.slice(); // a copy
405 | //console.info('search results:', Search.lastresults);
406 |
407 | // print the results
408 | var resultCount = results.length;
409 | function displayNextItem() {
410 | // results left, load the summary and display it
411 | if (results.length) {
412 | var item = results.pop();
413 | var listItem = $('');
414 | if (DOCUMENTATION_OPTIONS.FILE_SUFFIX === '') {
415 | // dirhtml builder
416 | var dirname = item[0] + '/';
417 | if (dirname.match(/\/index\/$/)) {
418 | dirname = dirname.substring(0, dirname.length-6);
419 | } else if (dirname == 'index/') {
420 | dirname = '';
421 | }
422 | listItem.append($('').attr('href',
423 | DOCUMENTATION_OPTIONS.URL_ROOT + dirname +
424 | highlightstring + item[2]).html(item[1]));
425 | } else {
426 | // normal html builders
427 | listItem.append($('').attr('href',
428 | item[0] + DOCUMENTATION_OPTIONS.FILE_SUFFIX +
429 | highlightstring + item[2]).html(item[1]));
430 | }
431 | if (item[3]) {
432 | listItem.append($(' (' + item[3] + ')'));
433 | Search.output.append(listItem);
434 | listItem.slideDown(5, function() {
435 | displayNextItem();
436 | });
437 | } else if (DOCUMENTATION_OPTIONS.HAS_SOURCE) {
438 | $.ajax({url: DOCUMENTATION_OPTIONS.URL_ROOT + '_sources/' + item[0] + '.txt',
439 | dataType: "text",
440 | complete: function(jqxhr, textstatus) {
441 | var data = jqxhr.responseText;
442 | if (data !== '' && data !== undefined) {
443 | listItem.append(Search.makeSearchSummary(data, searchterms, hlterms));
444 | }
445 | Search.output.append(listItem);
446 | listItem.slideDown(5, function() {
447 | displayNextItem();
448 | });
449 | }});
450 | } else {
451 | // no source available, just display title
452 | Search.output.append(listItem);
453 | listItem.slideDown(5, function() {
454 | displayNextItem();
455 | });
456 | }
457 | }
458 | // search finished, update title and status message
459 | else {
460 | Search.stopPulse();
461 | Search.title.text(_('Search Results'));
462 | if (!resultCount)
463 | Search.status.text(_('Your search did not match any documents. Please make sure that all words are spelled correctly and that you\'ve selected enough categories.'));
464 | else
465 | Search.status.text(_('Search finished, found %s page(s) matching the search query.').replace('%s', resultCount));
466 | Search.status.fadeIn(500);
467 | }
468 | }
469 | displayNextItem();
470 | },
471 |
472 | /**
473 | * search for object names
474 | */
475 | performObjectSearch : function(object, otherterms) {
476 | var filenames = this._index.filenames;
477 | var objects = this._index.objects;
478 | var objnames = this._index.objnames;
479 | var titles = this._index.titles;
480 |
481 | var i;
482 | var results = [];
483 |
484 | for (var prefix in objects) {
485 | for (var name in objects[prefix]) {
486 | var fullname = (prefix ? prefix + '.' : '') + name;
487 | if (fullname.toLowerCase().indexOf(object) > -1) {
488 | var score = 0;
489 | var parts = fullname.split('.');
490 | // check for different match types: exact matches of full name or
491 | // "last name" (i.e. last dotted part)
492 | if (fullname == object || parts[parts.length - 1] == object) {
493 | score += Scorer.objNameMatch;
494 | // matches in last name
495 | } else if (parts[parts.length - 1].indexOf(object) > -1) {
496 | score += Scorer.objPartialMatch;
497 | }
498 | var match = objects[prefix][name];
499 | var objname = objnames[match[1]][2];
500 | var title = titles[match[0]];
501 | // If more than one term searched for, we require other words to be
502 | // found in the name/title/description
503 | if (otherterms.length > 0) {
504 | var haystack = (prefix + ' ' + name + ' ' +
505 | objname + ' ' + title).toLowerCase();
506 | var allfound = true;
507 | for (i = 0; i < otherterms.length; i++) {
508 | if (haystack.indexOf(otherterms[i]) == -1) {
509 | allfound = false;
510 | break;
511 | }
512 | }
513 | if (!allfound) {
514 | continue;
515 | }
516 | }
517 | var descr = objname + _(', in ') + title;
518 |
519 | var anchor = match[3];
520 | if (anchor === '')
521 | anchor = fullname;
522 | else if (anchor == '-')
523 | anchor = objnames[match[1]][1] + '-' + fullname;
524 | // add custom score for some objects according to scorer
525 | if (Scorer.objPrio.hasOwnProperty(match[2])) {
526 | score += Scorer.objPrio[match[2]];
527 | } else {
528 | score += Scorer.objPrioDefault;
529 | }
530 | results.push([filenames[match[0]], fullname, '#'+anchor, descr, score]);
531 | }
532 | }
533 | }
534 |
535 | return results;
536 | },
537 |
538 | /**
539 | * search for full-text terms in the index
540 | */
541 | performTermsSearch : function(searchterms, excluded, terms, titleterms) {
542 | var filenames = this._index.filenames;
543 | var titles = this._index.titles;
544 |
545 | var i, j, file;
546 | var fileMap = {};
547 | var scoreMap = {};
548 | var results = [];
549 |
550 | // perform the search on the required terms
551 | for (i = 0; i < searchterms.length; i++) {
552 | var word = searchterms[i];
553 | var files = [];
554 | var _o = [
555 | {files: terms[word], score: Scorer.term},
556 | {files: titleterms[word], score: Scorer.title}
557 | ];
558 |
559 | // no match but word was a required one
560 | if ($u.every(_o, function(o){return o.files === undefined;})) {
561 | break;
562 | }
563 | // found search word in contents
564 | $u.each(_o, function(o) {
565 | var _files = o.files;
566 | if (_files === undefined)
567 | return
568 |
569 | if (_files.length === undefined)
570 | _files = [_files];
571 | files = files.concat(_files);
572 |
573 | // set score for the word in each file to Scorer.term
574 | for (j = 0; j < _files.length; j++) {
575 | file = _files[j];
576 | if (!(file in scoreMap))
577 | scoreMap[file] = {}
578 | scoreMap[file][word] = o.score;
579 | }
580 | });
581 |
582 | // create the mapping
583 | for (j = 0; j < files.length; j++) {
584 | file = files[j];
585 | if (file in fileMap)
586 | fileMap[file].push(word);
587 | else
588 | fileMap[file] = [word];
589 | }
590 | }
591 |
592 | // now check if the files don't contain excluded terms
593 | for (file in fileMap) {
594 | var valid = true;
595 |
596 | // check if all requirements are matched
597 | if (fileMap[file].length != searchterms.length)
598 | continue;
599 |
600 | // ensure that none of the excluded terms is in the search result
601 | for (i = 0; i < excluded.length; i++) {
602 | if (terms[excluded[i]] == file ||
603 | titleterms[excluded[i]] == file ||
604 | $u.contains(terms[excluded[i]] || [], file) ||
605 | $u.contains(titleterms[excluded[i]] || [], file)) {
606 | valid = false;
607 | break;
608 | }
609 | }
610 |
611 | // if we have still a valid result we can add it to the result list
612 | if (valid) {
613 | // select one (max) score for the file.
614 | // for better ranking, we should calculate ranking by using words statistics like basic tf-idf...
615 | var score = $u.max($u.map(fileMap[file], function(w){return scoreMap[file][w]}));
616 | results.push([filenames[file], titles[file], '', null, score]);
617 | }
618 | }
619 | return results;
620 | },
621 |
622 | /**
623 | * helper function to return a node containing the
624 | * search summary for a given text. keywords is a list
625 | * of stemmed words, hlwords is the list of normal, unstemmed
626 | * words. the first one is used to find the occurance, the
627 | * latter for highlighting it.
628 | */
629 | makeSearchSummary : function(text, keywords, hlwords) {
630 | var textLower = text.toLowerCase();
631 | var start = 0;
632 | $.each(keywords, function() {
633 | var i = textLower.indexOf(this.toLowerCase());
634 | if (i > -1)
635 | start = i;
636 | });
637 | start = Math.max(start - 120, 0);
638 | var excerpt = ((start > 0) ? '...' : '') +
639 | $.trim(text.substr(start, 240)) +
640 | ((start + 240 - text.length) ? '...' : '');
641 | var rv = $('').text(excerpt);
642 | $.each(hlwords, function() {
643 | rv = rv.highlightText(this, 'highlighted');
644 | });
645 | return rv;
646 | }
647 | };
648 |
649 | $(document).ready(function() {
650 | Search.init();
651 | });
--------------------------------------------------------------------------------
/watsongraph/conceptmodel.py:
--------------------------------------------------------------------------------
1 | from watsongraph.node import Node
2 | import networkx as nx
3 | import watsongraph.event_insight_lib
4 | from networkx.readwrite import json_graph
5 | from mwviews.api import PageviewsClient
6 |
7 |
8 | # import graphistry
9 |
10 |
11 | # TODO: Graphistry-based visualize() method.
12 | # Come back to this task in a while, they're working through Unicode errors at the moment, nothing to be done just yet.
13 |
14 |
15 | class ConceptModel:
16 | """
17 | The `ConceptModel` object is at the core of what this library does.
18 |
19 | Each **concept** in the `ConceptModel` is mapped to a corresponding unique Wikipedia page. These concepts are
20 | connected to one another in turn by **relevance edges** of a 0-to-1 scaled strength. This `ConceptModel` can then
21 | be associated with any number of applications. Basic bindings are provided, in particular, for a recommendation
22 | service using library-provided `Item` and `User` classes.
23 | """
24 |
25 | """
26 | The model itself is stored in the form of a `networkx.Graph` directed graph.
27 | """
28 | graph = None
29 |
30 | def __init__(self, list_of_concepts=None):
31 | """
32 | Initializes a `ConceptModel` around a list of concepts.
33 |
34 | :param list_of_concepts: A list of concept labels (eg. ['Microsoft', 'IBM'] or ['Apple Inc.']) to initialize the
35 | model around.
36 | """
37 | # Initialize the model graph object.
38 | self.graph = nx.Graph()
39 | # Enter and associate the starting nodes.
40 | # TODO: Assert that a list is passed, otherwise it will decompile the string into letters. Common error!
41 | if list_of_concepts:
42 | for concept_label in list_of_concepts:
43 | mixin_concept = Node(concept_label)
44 | self.graph.add_node(mixin_concept)
45 |
46 | ###################################
47 | # Setters, getters, and printers. #
48 | ###################################
49 |
50 | def nodes(self):
51 | """
52 | :return: Returns a list of all of the `Node` objects in the `ConceptModel`.
53 | """
54 | return self.graph.nodes()
55 |
56 | def concepts(self):
57 | """
58 |
59 | :return: Returns a sorted list of all concepts in the `ConceptModel`.
60 | """
61 | return sorted([concept.concept for concept in self.nodes()])
62 |
63 | def edges(self):
64 | """
65 | :return: Returns a list of all `(concept, other concept, strength)` tuples in the `ConceptModel`.
66 | """
67 | return sorted([("{0:.3f}".format(self.graph[edge[0]][edge[1]]['weight']), edge[0].concept, edge[1].concept)
68 | for edge in self.graph.edges()], reverse=True)
69 |
70 | def get_node(self, concept):
71 | """
72 | Returns the `Node` object associated with a concept in the `ConceptModel`.
73 |
74 | :param concept: The concept of a Concept supposedly in the ConceptModel.
75 | :return: The `Node` object in the `ConceptModel`, if it is found. Throws an error if it is not.
76 | """
77 | for node in self.nodes():
78 | if node.concept == concept:
79 | return node
80 | raise RuntimeError('Concept ' + concept + ' not found in ' + str(self))
81 |
82 | def remove(self, concept):
83 | """
84 | Removes the given concept from the `ConceptModel`.
85 |
86 | :param concept: The concept being removed from the model.
87 | """
88 | self.graph.remove_node(self.get_node(concept))
89 |
90 | def neighborhood(self, concept):
91 | """
92 | :param concept: The concept that is the focus of this operation.
93 | :return: Returns the "neighborhood" of a concept: a list of `(correlation, concept)` tuples pointing to/from
94 | it, plus itself. The neighborhood of the concept, a list of `(concept, concept, relevance_edge)` tuples
95 | much like the ones returned by `edges()` that contains every edge drawn from the chosen concept to any
96 | other in the graph.
97 |
98 | Note that graph theoretic convention does not consider a node to be a neighbor to itself. Thus the
99 | relevance tuple `(1, same_concept, same_concept)` is not included in output.
100 | """
101 | return sorted([(self.graph[self.get_node(concept)][node]['weight'], node.concept) for node in
102 | self.graph.neighbors(self.get_node(concept))], reverse=True)
103 |
104 | ######################
105 | # Parameter methods. #
106 | ######################
107 |
108 | def concepts_by_property(self, prop):
109 | """
110 | :param prop: The `property` to sort the returned output by.
111 | :return: Returns a list of `(prop, concept)` tuples sorted by prop. Note that this method will fail if this
112 | property is not initialized for all concepts.
113 | """
114 | return sorted([(node.get_property(prop), node.concept) for node in self.nodes()], reverse=True)
115 |
116 | def concepts_by_view_count(self):
117 | """
118 | Wrapper for `concepts_by_property()` for the `view_count` case.
119 |
120 | :return: Returns a list of `(view_count, concept)` tuples sorted by `view_count`.
121 | """
122 | return self.concepts_by_property('view_count')
123 |
124 | def set_view_counts(self):
125 | """
126 | Initializes the `view_count` property for all of the concepts in the `ConceptModel`.
127 | """
128 | for node in self.nodes():
129 | p = PageviewsClient().article_views("en.wikipedia", [node.concept.replace(' ', '_')])
130 | p = [p[key][node.concept.replace(' ', '_')] for key in p.keys()]
131 | p = int(sum([daily_view_count for daily_view_count in p if daily_view_count]) / len(p))
132 | node.set_property('view_count', p)
133 |
134 | def get_view_count(self, concept):
135 | """
136 | Returns the `view_count` of a concept in the `ConceptModel`.
137 |
138 | :param concept: The concept supposedly in the `ConceptModel`.
139 | :return: The `view_count` int parameter of the concept, if it is found. Throws an error if it is not.
140 | """
141 | return self.get_node(concept).get_property('view_count')
142 |
143 | def set_property(self, concept, param, value):
144 | """
145 | Sets the `param` property of `concept` to `value`.
146 |
147 | :param concept: Concept being given a parameter.
148 | :param param: The parameter being given.
149 | :param value: The value the parameter being given takes on.
150 | """
151 | self.get_node(concept).set_property(param, value)
152 |
153 | def map_property(self, prop, func):
154 | """
155 | Maps the `param` property of all of the concepts in the ConceptModel object by way of the user-provided `func`.
156 |
157 | :param prop: The parameter being given.
158 | :param func: The function that is called on the concept in order to determine the value of `prop`.
159 | """
160 | for node in self.nodes():
161 | node.set_property(prop, func(node.concept))
162 |
163 | ##################
164 | # Graph methods. #
165 | ##################
166 |
167 | def add(self, concept):
168 | """
169 | Simple adder method.
170 |
171 | :param concept: Concept to be added to the model.
172 | """
173 | self.graph = nx.compose(self.graph, ConceptModel([concept]).graph)
174 |
175 | def merge_with(self, mixin_concept_model):
176 | """
177 | Merges the given graph into the current one. The nx.compose method used here compares the hashes of the
178 | Concept objects being merged, and hashes are overwritten to map to labels. e.g. `A = Concept('IBM')` and `B =
179 | Concept('IBM')` have the same `__hash__()`, even though the objects are different, so they will merge into
180 | one when composed.
181 |
182 | :param mixin_concept_model: The `ConceptModel` object that is being folded into the current object.
183 | """
184 | self.graph = nx.compose(self.graph, mixin_concept_model.graph)
185 |
186 | def copy(self):
187 | """
188 | Returns a deep copy of itself. Used by `User.express_interest()` to merge `Item` and `User` concept models
189 | without distorting the `Item` model.
190 |
191 | :return: A deep copy of the current `ConceptModel`.
192 | """
193 | ret = ConceptModel()
194 | ret.graph = self.graph.copy()
195 | return ret
196 |
197 | def augment_by_node(self, node, level=0, limit=50):
198 | """
199 | Augments the ConceptModel by mining the given node and adding newly discovered nodes to the resultant graph.
200 |
201 | :param node: The node to be expanded. Note that this node need not already be present in the graph.
202 | :param level: The limit placed on the depth of the graph. A limit of 0 is highest, corresponding with the
203 | most popular articles; a limit of 5 is the broadest and graphs to the widest cachet of articles. This
204 | parameter is a parameter that is passed directly to the IBM Watson API call.
205 | :param limit: a cutoff placed on the number of related concepts to be returned. This parameter is passed
206 | directly to the IBM Watson API call.
207 | """
208 | mixin = ConceptModel()
209 | related_concepts_raw = watsongraph.event_insight_lib.get_related_concepts(node.concept, level=level,
210 | limit=limit)
211 | if node not in self.nodes():
212 | self.graph.add_node(node)
213 | for raw_concept in related_concepts_raw['concepts']:
214 | # Avoid adding the `A-A` multi-edge returned by the raw `get_related_concepts`.
215 | if raw_concept['concept']['label'] != node.concept:
216 | new_node = Node(raw_concept['concept']['label'])
217 | mixin.graph.add_edge(self.get_node(node.concept), new_node, weight=raw_concept['score'])
218 | self.merge_with(mixin)
219 |
220 | def augment(self, concept, level=0, limit=50):
221 | """
222 | Augments the ConceptModel by assigning the given node to a concept and adding newly discovered nodes to the
223 | resultant graph. This method is an externally-facing wrapper for the internal `augment_by_node()` method:
224 | the difference is that this method maps a concept while that method maps a node.
225 |
226 | :param concept: The concept to be expanded. Note that this concept need not already be present in the graph.
227 |
228 | :param level: The limit placed on the depth of the graph. A limit of 0 is the highest, corresponding with
229 | the most popular articles; a limit of 5 is the broadest.
230 |
231 | :param limit: a cutoff placed on the number of related concepts to be returned. This parameter is passed
232 | directly to the IBM Watson API call.
233 | """
234 | self.augment_by_node(Node(concept), level=level, limit=limit)
235 |
236 | def abridge_by_node(self, node, level=0, limit=50):
237 | """
238 | Performs the inverse operation of augment by removing the expansion of the given node from the graph.
239 |
240 | :param node: The node to be abridged. Note that this node need not already be present in the graph.
241 | :param level: The limit placed on the depth of the graph. A limit of 0 is highest, corresponding with the
242 | most popular articles; a limit of 5 is the broadest and graphs to the widest cachet of articles. This
243 | parameter is a parameter that is passed directly to the IBM Watson API call.
244 |
245 | :param limit: a cutoff placed on the number of related concepts to be returned. This parameter is passed
246 | directly to the IBM Watson API call.
247 | """
248 | inverse = ConceptModel()
249 | inverse.augment_by_node(node, level=level, limit=limit)
250 | for concept_node in [node for node in self.nodes() if node in inverse.graph.nodes()]:
251 | self.graph.remove_node(concept_node)
252 |
253 | def abridge(self, concept, level=0, limit=50):
254 | """
255 | Performs the inverse operation of augment by removing the expansion of the given concept from the graph.
256 | This method is an externally-facing wrapper for the internal `abridge_by_node()` method: the difference is
257 | that this method maps a concept while that method maps a node.
258 |
259 | :param concept: The concept to be expanded. Note that this concept need not already be present in the graph.
260 |
261 | :param level: The limit placed on the depth of the graph. A limit of 0 is highest, corresponding with the most
262 | popular articles; a limit of 5 is the broadest and graphs to the widest cachet of articles. This
263 | parameter is a parameter that is passed directly to the IBM Watson API call.
264 |
265 | :param limit: a cutoff placed on the number of related concepts to be returned. This parameter is passed
266 | directly to the IBM Watson API call.
267 | """
268 | self.abridge_by_node(Node(concept), level=level, limit=limit)
269 |
270 | def explode(self, level=0, limit=50):
271 | """
272 | Explodes a graph by augmenting every concept already in it. Warning: for sufficiently large graphs this is a
273 | very slow operation! See also the expand() method for a more focused version of this operation.
274 |
275 | :param level: The limit placed on the depth of the graph. A limit of 0 is highest, corresponding with the
276 | most popular articles; a limit of 5 is the broadest and graphs to the widest cachet of articles. This
277 | parameter is a parameter that is passed directly to the IBM Watson API call.
278 |
279 | :param limit: a cutoff placed on the number of related concepts to be returned. This parameter is passed
280 | directly to the IBM Watson API call.
281 | """
282 | for concept_node in self.nodes():
283 | self.augment_by_node(concept_node, level=level, limit=limit)
284 |
285 | def expand(self, level=0, limit=50, n=1):
286 | """
287 | Expands a graph by augmenting concepts with only one (or no) edge. Warning: for sufficiently large graphs this
288 | is a slow operation! See also the expand() method for a less focused version of this operation.
289 |
290 | :param level: The limit placed on the depth of the graph. A limit of 0 is highest, corresponding with the
291 | most popular articles; a limit of 5 is the broadest and graphs to the widest cachet of articles. This
292 | parameter is a parameter that is passed directly to the IBM Watson API call.
293 |
294 | :param limit: a cutoff placed on the number of related concepts to be returned. This parameter is passed
295 | directly to the IBM Watson API call.
296 |
297 | :param n: The cutoff for the number of neighbors a node can have.
298 | """
299 | for concept_node in [node for node in self.nodes() if len(self.graph.neighbors(node)) <= n]:
300 | self.augment_by_node(concept_node, level=level, limit=limit)
301 |
302 | def intersection_with_by_nodes(self, mixin_concept_model):
303 | """
304 | :param mixin_concept_model: Another ConceptModel object to be compared to.
305 |
306 | :return: A list of overlapping concept nodes with their relevance parameters set to average relevance.
307 | """
308 | overlapping_concept_nodes = [node for node in self.nodes() if node in mixin_concept_model.nodes()]
309 | for concept_node in overlapping_concept_nodes:
310 | if 'relevance' in concept_node.properties.keys():
311 | concept_node.set_relevance((concept_node.properties['relevance'] + mixin_concept_model.get_node(
312 | concept_node.concept).properties['relevance']) / 2)
313 | return overlapping_concept_nodes
314 |
315 | def add_edges(self, source_concept, list_of_target_concepts, prune=False):
316 | """
317 | Given a source concept and a list of target concepts, creates relevance edges between the source and the
318 | targets and adds them to the graph.
319 |
320 | :param source_concept: The source concept edges are being added from.
321 |
322 | :param list_of_target_concepts: The target concepts edges are being added to.
323 |
324 | :param prune: Watson returns correlations for edges which it does not know enough about as 0.5,
325 | a lack of extensibility which can cause sizable issues: for example you might have both `(0.5, IBM,
326 | Apple Inc.)` and `(0.5, IBM, Apple)`. We as humans know that these are totally not equal comparisons,
327 | but the system does not! When this parameter is set to True (it is set to False by default) only edges
328 | with a correlation higher than 0.5 are added.
329 | """
330 | raw_scores = watsongraph.event_insight_lib.get_relation_scores(source_concept, list_of_target_concepts)
331 | mixin_graph = nx.Graph()
332 | mixin_source_node = Node(source_concept)
333 | own_concepts = self.concepts()
334 | for raw_concept in raw_scores['scores']:
335 | # Check that we pass relevance.
336 | if not prune or (prune and raw_concept['score'] > 0.5):
337 | # Parse the returned nodal concept text into the concept: ".../Watson_(computer)"->"Watson (computer)".
338 | raw_concept['concept'] = raw_concept['concept'].replace('_', ' ')
339 | mixin_concept = raw_concept['concept'][raw_concept['concept'].rfind('/') + 1:]
340 | # We want to keep our graphs simple, so explicitly avoid concept-to-concept loops. Why is the user
341 | # asking for something like that anyway?
342 | if mixin_concept != source_concept:
343 | mixin_target_node = Node(mixin_concept)
344 | # We might want to be examining the relationship between two nodes that are already in the model.
345 | # For example, we might call `ibm = ConceptModel(['IBM', 'Watson (computer)]); ibm.explode_edges()`.
346 | # In this case `nx.compose` will override the existing Node with our new Node. But what if our
347 | # old Node had properties assigned to it? Then these properties are deleted!
348 | # To account for this subtlety we check to see if mixin_concept is already in the model, and,
349 | # if it is, we explicitly attach its properties to the new Node it will be overwritten by.
350 | if mixin_concept in own_concepts:
351 | mixin_target_node.properties = self.get_node(mixin_concept).properties
352 | # Note that this is the `nx.add_edge()` method, not the `conceptmodel.add_edge()` one.
353 | mixin_graph.add_edge(mixin_source_node, mixin_target_node, weight=raw_concept['score'])
354 | self.graph = nx.compose(self.graph, mixin_graph)
355 |
356 | def add_edge(self, source_concept, target_concept, prune=False):
357 | """
358 | Wrapper for `add_edges()` for the single-concept case, so that you don't have to call a list explicitly.
359 |
360 | :param source_concept: The source concept edges are being added from.
361 |
362 | :param target_concept: The target concept an edge is being added to.
363 |
364 | :param prune: Watson returns correlations for edges which it does not know enough about as 0.5,
365 | a lack of extensibility which can cause sizable issues: for example you might have both `(0.5, IBM,
366 | Apple Inc.)` and `(0.5, IBM, Apple)`. We as humans know that these are totally not equal comparisons,
367 | but the system does not! When this parameter is set to `True` (it is set to `False` by default) only edges
368 | with a correlation higher than 0.5 are added.
369 | """
370 | self.add_edges(source_concept, [target_concept], prune=prune)
371 |
372 | def explode_edges(self, prune=False):
373 | """
374 | Calls `add_edges()` on everything in the model, all at once. Like `explode()` but for concept edges!
375 |
376 | :param prune: Watson returns correlations for edges which it does not know enough about as 0.5,
377 | a lack of extensibility which can cause sizable issues: for example you might have both `(0.5, IBM,
378 | Apple Inc.)` and `(0.5, IBM, Apple)`. We as humans know that these are totally not equal comparisons,
379 | but the system does not! When this parameter is set to True (it is set to False by default) only edges with
380 | a correlation higher than 0.5 are added.
381 | """
382 | c_list = self.concepts()
383 | for concept in self.concepts():
384 | c_list.remove(concept)
385 | if c_list:
386 | self.add_edges(concept, c_list, prune=prune)
387 |
388 | ###############
389 | # IO methods. #
390 | ###############
391 |
392 | def to_json(self):
393 | """
394 | Returns the JSON representation of a ConceptModel. Counter-operation to `load_from_dict()`.
395 |
396 | :param self: A ConceptModel.
397 |
398 | :return: The nx dictionary representation of the ConceptModel.
399 | """
400 | flattened_model = nx.relabel_nodes(self.graph, {node: node.concept for node in self.nodes()})
401 | data_repr = json_graph.node_link_data(flattened_model)
402 | for node in data_repr['nodes']:
403 | for prop in self.get_node(node['id']).properties.keys():
404 | node[prop] = self.get_node(node['id']).properties[prop]
405 | return data_repr
406 |
407 | def load_from_json(self, data_repr):
408 | """
409 | Generates a ConceptModel out of a JSON representation. Counter-operation to `to_dict()`.
410 |
411 | :param data_repr: The dictionary being passed to the method.
412 |
413 | :return: The generated ConceptModel.
414 | """
415 | flattened_graph = json_graph.node_link_graph(data_repr)
416 | m = {concept: Node(concept) for concept in flattened_graph.nodes()}
417 | self.graph = nx.relabel_nodes(flattened_graph, m)
418 | for node in data_repr['nodes']:
419 | for key in [key for key in node.keys() if key != 'id']:
420 | self.set_property(node['id'], key, node[key])
421 |
422 | # def visualize(self, filename='graphistry_credentials.json'):
423 | # """
424 | # Generates a ConceptModel visualization. WIP. Need to get a graphistry key first...
425 | # :param filename -- The filename at which Graphistry service credentials are stored. Defaults to
426 | # `graphistry_credentials.json`.
427 | # :return: The generated visualization.
428 | # """
429 | # graphistry_token = import_graphistry_credentials(filename=filename)
430 | # graphistry.register(key=graphistry_token)
431 | # flattened_model = nx.relabel_nodes(self.graph, {node: node.concept for node in self.nodes()})
432 | # flattened_model_dataframe = nx.convert_matrix.to_pandas_dataframe(flattened_model)
433 | # for key in flattened_model_dataframe.keys():
434 | # flattened_model_dataframe[key] = flattened_model_dataframe[key].astype(str)
435 | # g = graphistry.bind(source='source', destination='target')
436 | # g.plot(flattened_model_dataframe)
437 |
438 |
439 | # def import_graphistry_credentials(filename='graphistry_credentials.json'):
440 | # """
441 | # Internal method which finds the credentials file describing the token that's needed to access Graphistry
442 | # services. Graphistry is an alpha-level in-development backend that is used here for visualizing the
443 | # ConceptModel, so keys are given out on a per-user basis; see https://github.com/graphistry/pygraphistry for more
444 | # information.
445 | #
446 | # See also `watsongraph.event_insight_lib.import_credentials()`, which replicates this operation for the (
447 | # required) Concept Insights API service key.
448 | #
449 | # :param filename -- The filename at which Graphistry service credentials are stored. Defaults to
450 | # `graphistry_credentials.json`.
451 | # """
452 | # if filename in [f for f in os.listdir('.') if os.path.isfile(f)]:
453 | # return json.load(open(filename))['credentials']['key']
454 | # else:
455 | # raise IOError(
456 | # 'The visualization methods that come with the watsongraph library require a Graphistry credentials '
457 | # 'token to work. Did you forget to define one? For more information refer '
458 | # 'to:\n\nhttps://github.com/graphistry/pygraphistry#api-key')
459 |
460 |
461 | def model(user_input):
462 | """
463 | Models arbitrary user input and returns an associated ConceptModel. See also the similar `concept.conceptualize`
464 | static method, which binds arbitrary input to a single concept label instead.
465 |
466 | :param user_input: Arbitrary input, be it a name (e.g. Apple (company) -> Apple Inc.) or a text string (e.g.
467 | "the iPhone 5C, released this Thursday..." -> iPhone).
468 | :return: The constructed `ConceptModel` object. Might be empty!
469 | """
470 | new_model = ConceptModel()
471 | if user_input:
472 | related_concepts_raw = watsongraph.event_insight_lib.annotate_text(user_input)
473 | new_data = [(raw_concept['concept']['label'], raw_concept['score']) for raw_concept in
474 | related_concepts_raw['annotations']]
475 | for data in new_data:
476 | new_model.graph.add_node(Node(data[0], relevance=data[1]))
477 | return new_model
478 |
--------------------------------------------------------------------------------
/docs/_build/html/_static/websupport.js:
--------------------------------------------------------------------------------
1 | /*
2 | * websupport.js
3 | * ~~~~~~~~~~~~~
4 | *
5 | * sphinx.websupport utilties for all documentation.
6 | *
7 | * :copyright: Copyright 2007-2015 by the Sphinx team, see AUTHORS.
8 | * :license: BSD, see LICENSE for details.
9 | *
10 | */
11 |
12 | (function($) {
13 | $.fn.autogrow = function() {
14 | return this.each(function() {
15 | var textarea = this;
16 |
17 | $.fn.autogrow.resize(textarea);
18 |
19 | $(textarea)
20 | .focus(function() {
21 | textarea.interval = setInterval(function() {
22 | $.fn.autogrow.resize(textarea);
23 | }, 500);
24 | })
25 | .blur(function() {
26 | clearInterval(textarea.interval);
27 | });
28 | });
29 | };
30 |
31 | $.fn.autogrow.resize = function(textarea) {
32 | var lineHeight = parseInt($(textarea).css('line-height'), 10);
33 | var lines = textarea.value.split('\n');
34 | var columns = textarea.cols;
35 | var lineCount = 0;
36 | $.each(lines, function() {
37 | lineCount += Math.ceil(this.length / columns) || 1;
38 | });
39 | var height = lineHeight * (lineCount + 1);
40 | $(textarea).css('height', height);
41 | };
42 | })(jQuery);
43 |
44 | (function($) {
45 | var comp, by;
46 |
47 | function init() {
48 | initEvents();
49 | initComparator();
50 | }
51 |
52 | function initEvents() {
53 | $(document).on("click", 'a.comment-close', function(event) {
54 | event.preventDefault();
55 | hide($(this).attr('id').substring(2));
56 | });
57 | $(document).on("click", 'a.vote', function(event) {
58 | event.preventDefault();
59 | handleVote($(this));
60 | });
61 | $(document).on("click", 'a.reply', function(event) {
62 | event.preventDefault();
63 | openReply($(this).attr('id').substring(2));
64 | });
65 | $(document).on("click", 'a.close-reply', function(event) {
66 | event.preventDefault();
67 | closeReply($(this).attr('id').substring(2));
68 | });
69 | $(document).on("click", 'a.sort-option', function(event) {
70 | event.preventDefault();
71 | handleReSort($(this));
72 | });
73 | $(document).on("click", 'a.show-proposal', function(event) {
74 | event.preventDefault();
75 | showProposal($(this).attr('id').substring(2));
76 | });
77 | $(document).on("click", 'a.hide-proposal', function(event) {
78 | event.preventDefault();
79 | hideProposal($(this).attr('id').substring(2));
80 | });
81 | $(document).on("click", 'a.show-propose-change', function(event) {
82 | event.preventDefault();
83 | showProposeChange($(this).attr('id').substring(2));
84 | });
85 | $(document).on("click", 'a.hide-propose-change', function(event) {
86 | event.preventDefault();
87 | hideProposeChange($(this).attr('id').substring(2));
88 | });
89 | $(document).on("click", 'a.accept-comment', function(event) {
90 | event.preventDefault();
91 | acceptComment($(this).attr('id').substring(2));
92 | });
93 | $(document).on("click", 'a.delete-comment', function(event) {
94 | event.preventDefault();
95 | deleteComment($(this).attr('id').substring(2));
96 | });
97 | $(document).on("click", 'a.comment-markup', function(event) {
98 | event.preventDefault();
99 | toggleCommentMarkupBox($(this).attr('id').substring(2));
100 | });
101 | }
102 |
103 | /**
104 | * Set comp, which is a comparator function used for sorting and
105 | * inserting comments into the list.
106 | */
107 | function setComparator() {
108 | // If the first three letters are "asc", sort in ascending order
109 | // and remove the prefix.
110 | if (by.substring(0,3) == 'asc') {
111 | var i = by.substring(3);
112 | comp = function(a, b) { return a[i] - b[i]; };
113 | } else {
114 | // Otherwise sort in descending order.
115 | comp = function(a, b) { return b[by] - a[by]; };
116 | }
117 |
118 | // Reset link styles and format the selected sort option.
119 | $('a.sel').attr('href', '#').removeClass('sel');
120 | $('a.by' + by).removeAttr('href').addClass('sel');
121 | }
122 |
123 | /**
124 | * Create a comp function. If the user has preferences stored in
125 | * the sortBy cookie, use those, otherwise use the default.
126 | */
127 | function initComparator() {
128 | by = 'rating'; // Default to sort by rating.
129 | // If the sortBy cookie is set, use that instead.
130 | if (document.cookie.length > 0) {
131 | var start = document.cookie.indexOf('sortBy=');
132 | if (start != -1) {
133 | start = start + 7;
134 | var end = document.cookie.indexOf(";", start);
135 | if (end == -1) {
136 | end = document.cookie.length;
137 | by = unescape(document.cookie.substring(start, end));
138 | }
139 | }
140 | }
141 | setComparator();
142 | }
143 |
144 | /**
145 | * Show a comment div.
146 | */
147 | function show(id) {
148 | $('#ao' + id).hide();
149 | $('#ah' + id).show();
150 | var context = $.extend({id: id}, opts);
151 | var popup = $(renderTemplate(popupTemplate, context)).hide();
152 | popup.find('textarea[name="proposal"]').hide();
153 | popup.find('a.by' + by).addClass('sel');
154 | var form = popup.find('#cf' + id);
155 | form.submit(function(event) {
156 | event.preventDefault();
157 | addComment(form);
158 | });
159 | $('#s' + id).after(popup);
160 | popup.slideDown('fast', function() {
161 | getComments(id);
162 | });
163 | }
164 |
165 | /**
166 | * Hide a comment div.
167 | */
168 | function hide(id) {
169 | $('#ah' + id).hide();
170 | $('#ao' + id).show();
171 | var div = $('#sc' + id);
172 | div.slideUp('fast', function() {
173 | div.remove();
174 | });
175 | }
176 |
177 | /**
178 | * Perform an ajax request to get comments for a node
179 | * and insert the comments into the comments tree.
180 | */
181 | function getComments(id) {
182 | $.ajax({
183 | type: 'GET',
184 | url: opts.getCommentsURL,
185 | data: {node: id},
186 | success: function(data, textStatus, request) {
187 | var ul = $('#cl' + id);
188 | var speed = 100;
189 | $('#cf' + id)
190 | .find('textarea[name="proposal"]')
191 | .data('source', data.source);
192 |
193 | if (data.comments.length === 0) {
194 | ul.html('
No comments yet.
');
195 | ul.data('empty', true);
196 | } else {
197 | // If there are comments, sort them and put them in the list.
198 | var comments = sortComments(data.comments);
199 | speed = data.comments.length * 100;
200 | appendComments(comments, ul);
201 | ul.data('empty', false);
202 | }
203 | $('#cn' + id).slideUp(speed + 200);
204 | ul.slideDown(speed);
205 | },
206 | error: function(request, textStatus, error) {
207 | showError('Oops, there was a problem retrieving the comments.');
208 | },
209 | dataType: 'json'
210 | });
211 | }
212 |
213 | /**
214 | * Add a comment via ajax and insert the comment into the comment tree.
215 | */
216 | function addComment(form) {
217 | var node_id = form.find('input[name="node"]').val();
218 | var parent_id = form.find('input[name="parent"]').val();
219 | var text = form.find('textarea[name="comment"]').val();
220 | var proposal = form.find('textarea[name="proposal"]').val();
221 |
222 | if (text == '') {
223 | showError('Please enter a comment.');
224 | return;
225 | }
226 |
227 | // Disable the form that is being submitted.
228 | form.find('textarea,input').attr('disabled', 'disabled');
229 |
230 | // Send the comment to the server.
231 | $.ajax({
232 | type: "POST",
233 | url: opts.addCommentURL,
234 | dataType: 'json',
235 | data: {
236 | node: node_id,
237 | parent: parent_id,
238 | text: text,
239 | proposal: proposal
240 | },
241 | success: function(data, textStatus, error) {
242 | // Reset the form.
243 | if (node_id) {
244 | hideProposeChange(node_id);
245 | }
246 | form.find('textarea')
247 | .val('')
248 | .add(form.find('input'))
249 | .removeAttr('disabled');
250 | var ul = $('#cl' + (node_id || parent_id));
251 | if (ul.data('empty')) {
252 | $(ul).empty();
253 | ul.data('empty', false);
254 | }
255 | insertComment(data.comment);
256 | var ao = $('#ao' + node_id);
257 | ao.find('img').attr({'src': opts.commentBrightImage});
258 | if (node_id) {
259 | // if this was a "root" comment, remove the commenting box
260 | // (the user can get it back by reopening the comment popup)
261 | $('#ca' + node_id).slideUp();
262 | }
263 | },
264 | error: function(request, textStatus, error) {
265 | form.find('textarea,input').removeAttr('disabled');
266 | showError('Oops, there was a problem adding the comment.');
267 | }
268 | });
269 | }
270 |
271 | /**
272 | * Recursively append comments to the main comment list and children
273 | * lists, creating the comment tree.
274 | */
275 | function appendComments(comments, ul) {
276 | $.each(comments, function() {
277 | var div = createCommentDiv(this);
278 | ul.append($(document.createElement('li')).html(div));
279 | appendComments(this.children, div.find('ul.comment-children'));
280 | // To avoid stagnating data, don't store the comments children in data.
281 | this.children = null;
282 | div.data('comment', this);
283 | });
284 | }
285 |
286 | /**
287 | * After adding a new comment, it must be inserted in the correct
288 | * location in the comment tree.
289 | */
290 | function insertComment(comment) {
291 | var div = createCommentDiv(comment);
292 |
293 | // To avoid stagnating data, don't store the comments children in data.
294 | comment.children = null;
295 | div.data('comment', comment);
296 |
297 | var ul = $('#cl' + (comment.node || comment.parent));
298 | var siblings = getChildren(ul);
299 |
300 | var li = $(document.createElement('li'));
301 | li.hide();
302 |
303 | // Determine where in the parents children list to insert this comment.
304 | for(i=0; i < siblings.length; i++) {
305 | if (comp(comment, siblings[i]) <= 0) {
306 | $('#cd' + siblings[i].id)
307 | .parent()
308 | .before(li.html(div));
309 | li.slideDown('fast');
310 | return;
311 | }
312 | }
313 |
314 | // If we get here, this comment rates lower than all the others,
315 | // or it is the only comment in the list.
316 | ul.append(li.html(div));
317 | li.slideDown('fast');
318 | }
319 |
320 | function acceptComment(id) {
321 | $.ajax({
322 | type: 'POST',
323 | url: opts.acceptCommentURL,
324 | data: {id: id},
325 | success: function(data, textStatus, request) {
326 | $('#cm' + id).fadeOut('fast');
327 | $('#cd' + id).removeClass('moderate');
328 | },
329 | error: function(request, textStatus, error) {
330 | showError('Oops, there was a problem accepting the comment.');
331 | }
332 | });
333 | }
334 |
335 | function deleteComment(id) {
336 | $.ajax({
337 | type: 'POST',
338 | url: opts.deleteCommentURL,
339 | data: {id: id},
340 | success: function(data, textStatus, request) {
341 | var div = $('#cd' + id);
342 | if (data == 'delete') {
343 | // Moderator mode: remove the comment and all children immediately
344 | div.slideUp('fast', function() {
345 | div.remove();
346 | });
347 | return;
348 | }
349 | // User mode: only mark the comment as deleted
350 | div
351 | .find('span.user-id:first')
352 | .text('[deleted]').end()
353 | .find('div.comment-text:first')
354 | .text('[deleted]').end()
355 | .find('#cm' + id + ', #dc' + id + ', #ac' + id + ', #rc' + id +
356 | ', #sp' + id + ', #hp' + id + ', #cr' + id + ', #rl' + id)
357 | .remove();
358 | var comment = div.data('comment');
359 | comment.username = '[deleted]';
360 | comment.text = '[deleted]';
361 | div.data('comment', comment);
362 | },
363 | error: function(request, textStatus, error) {
364 | showError('Oops, there was a problem deleting the comment.');
365 | }
366 | });
367 | }
368 |
369 | function showProposal(id) {
370 | $('#sp' + id).hide();
371 | $('#hp' + id).show();
372 | $('#pr' + id).slideDown('fast');
373 | }
374 |
375 | function hideProposal(id) {
376 | $('#hp' + id).hide();
377 | $('#sp' + id).show();
378 | $('#pr' + id).slideUp('fast');
379 | }
380 |
381 | function showProposeChange(id) {
382 | $('#pc' + id).hide();
383 | $('#hc' + id).show();
384 | var textarea = $('#pt' + id);
385 | textarea.val(textarea.data('source'));
386 | $.fn.autogrow.resize(textarea[0]);
387 | textarea.slideDown('fast');
388 | }
389 |
390 | function hideProposeChange(id) {
391 | $('#hc' + id).hide();
392 | $('#pc' + id).show();
393 | var textarea = $('#pt' + id);
394 | textarea.val('').removeAttr('disabled');
395 | textarea.slideUp('fast');
396 | }
397 |
398 | function toggleCommentMarkupBox(id) {
399 | $('#mb' + id).toggle();
400 | }
401 |
402 | /** Handle when the user clicks on a sort by link. */
403 | function handleReSort(link) {
404 | var classes = link.attr('class').split(/\s+/);
405 | for (var i=0; iThank you! Your comment will show up '
558 | + 'once it is has been approved by a moderator.');
559 | }
560 | // Prettify the comment rating.
561 | comment.pretty_rating = comment.rating + ' point' +
562 | (comment.rating == 1 ? '' : 's');
563 | // Make a class (for displaying not yet moderated comments differently)
564 | comment.css_class = comment.displayed ? '' : ' moderate';
565 | // Create a div for this comment.
566 | var context = $.extend({}, opts, comment);
567 | var div = $(renderTemplate(commentTemplate, context));
568 |
569 | // If the user has voted on this comment, highlight the correct arrow.
570 | if (comment.vote) {
571 | var direction = (comment.vote == 1) ? 'u' : 'd';
572 | div.find('#' + direction + 'v' + comment.id).hide();
573 | div.find('#' + direction + 'u' + comment.id).show();
574 | }
575 |
576 | if (opts.moderator || comment.text != '[deleted]') {
577 | div.find('a.reply').show();
578 | if (comment.proposal_diff)
579 | div.find('#sp' + comment.id).show();
580 | if (opts.moderator && !comment.displayed)
581 | div.find('#cm' + comment.id).show();
582 | if (opts.moderator || (opts.username == comment.username))
583 | div.find('#dc' + comment.id).show();
584 | }
585 | return div;
586 | }
587 |
588 | /**
589 | * A simple template renderer. Placeholders such as <%id%> are replaced
590 | * by context['id'] with items being escaped. Placeholders such as <#id#>
591 | * are not escaped.
592 | */
593 | function renderTemplate(template, context) {
594 | var esc = $(document.createElement('div'));
595 |
596 | function handle(ph, escape) {
597 | var cur = context;
598 | $.each(ph.split('.'), function() {
599 | cur = cur[this];
600 | });
601 | return escape ? esc.text(cur || "").html() : cur;
602 | }
603 |
604 | return template.replace(/<([%#])([\w\.]*)\1>/g, function() {
605 | return handle(arguments[2], arguments[1] == '%' ? true : false);
606 | });
607 | }
608 |
609 | /** Flash an error message briefly. */
610 | function showError(message) {
611 | $(document.createElement('div')).attr({'class': 'popup-error'})
612 | .append($(document.createElement('div'))
613 | .attr({'class': 'error-message'}).text(message))
614 | .appendTo('body')
615 | .fadeIn("slow")
616 | .delay(2000)
617 | .fadeOut("slow");
618 | }
619 |
620 | /** Add a link the user uses to open the comments popup. */
621 | $.fn.comment = function() {
622 | return this.each(function() {
623 | var id = $(this).attr('id').substring(1);
624 | var count = COMMENT_METADATA[id];
625 | var title = count + ' comment' + (count == 1 ? '' : 's');
626 | var image = count > 0 ? opts.commentBrightImage : opts.commentImage;
627 | var addcls = count == 0 ? ' nocomment' : '';
628 | $(this)
629 | .append(
630 | $(document.createElement('a')).attr({
631 | href: '#',
632 | 'class': 'sphinx-comment-open' + addcls,
633 | id: 'ao' + id
634 | })
635 | .append($(document.createElement('img')).attr({
636 | src: image,
637 | alt: 'comment',
638 | title: title
639 | }))
640 | .click(function(event) {
641 | event.preventDefault();
642 | show($(this).attr('id').substring(2));
643 | })
644 | )
645 | .append(
646 | $(document.createElement('a')).attr({
647 | href: '#',
648 | 'class': 'sphinx-comment-close hidden',
649 | id: 'ah' + id
650 | })
651 | .append($(document.createElement('img')).attr({
652 | src: opts.closeCommentImage,
653 | alt: 'close',
654 | title: 'close'
655 | }))
656 | .click(function(event) {
657 | event.preventDefault();
658 | hide($(this).attr('id').substring(2));
659 | })
660 | );
661 | });
662 | };
663 |
664 | var opts = {
665 | processVoteURL: '/_process_vote',
666 | addCommentURL: '/_add_comment',
667 | getCommentsURL: '/_get_comments',
668 | acceptCommentURL: '/_accept_comment',
669 | deleteCommentURL: '/_delete_comment',
670 | commentImage: '/static/_static/comment.png',
671 | closeCommentImage: '/static/_static/comment-close.png',
672 | loadingImage: '/static/_static/ajax-loader.gif',
673 | commentBrightImage: '/static/_static/comment-bright.png',
674 | upArrow: '/static/_static/up.png',
675 | downArrow: '/static/_static/down.png',
676 | upArrowPressed: '/static/_static/up-pressed.png',
677 | downArrowPressed: '/static/_static/down-pressed.png',
678 | voting: false,
679 | moderator: false
680 | };
681 |
682 | if (typeof COMMENT_OPTIONS != "undefined") {
683 | opts = jQuery.extend(opts, COMMENT_OPTIONS);
684 | }
685 |
686 | var popupTemplate = '\
687 |
\ 689 | Sort by:\ 690 | best rated\ 691 | newest\ 692 | oldest\ 693 |
\ 694 |\ 698 |
Add a comment\ 700 | (markup):
\ 701 |``code``, \ 704 | code blocks:::and an indented block after blank line