├── couchapp
├── _id
├── vendor
│ └── couchapp
│ │ ├── couchapp.js
│ │ ├── metadata.json
│ │ ├── README.md
│ │ ├── date.js
│ │ ├── template.js
│ │ ├── path.js
│ │ └── _attachments
│ │ └── jquery.couchapp.js
├── couchapp.json
├── .couchapprc
├── fulltext
│ └── all
│ │ └── index.js
└── _attachments
│ ├── style
│ └── main.css
│ ├── index.html
│ └── js
│ ├── crawler.js
│ ├── jquery.url-1.0.js
│ ├── underscore-0.5.2.js
│ ├── jquery.mustache-0.2.1.js
│ └── jquery.query-2.1.7.js
├── python
├── couchcrawler
│ ├── __init__.py
│ ├── spiders
│ │ ├── __init__.py
│ │ ├── wiki.py
│ │ └── yammer.py
│ ├── items.py
│ ├── pipelines.py
│ └── settings.py
├── scrapy-ctl.py
└── couchcrawler-sample.cfg
├── .gitignore
├── LICENSE
└── README.md
/couchapp/_id:
--------------------------------------------------------------------------------
1 | _design/crawler
--------------------------------------------------------------------------------
/python/couchcrawler/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.pyc
2 | dropin.cache
3 | python/couchcrawler.cfg
4 |
--------------------------------------------------------------------------------
/couchapp/vendor/couchapp/couchapp.js:
--------------------------------------------------------------------------------
1 | // this stuff should be properly namespaced etc
2 |
--------------------------------------------------------------------------------
/couchapp/couchapp.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "Name of your CouchApp",
3 | "description": "CouchApp"
4 | }
--------------------------------------------------------------------------------
/couchapp/vendor/couchapp/metadata.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "couchapp",
3 | "description": "Official couchapp vendor."
4 | }
--------------------------------------------------------------------------------
/couchapp/.couchapprc:
--------------------------------------------------------------------------------
1 | {
2 | "env": {
3 | "default": {
4 | "db": "http://localhost:5984/crawler"
5 | }
6 | }
7 | }
8 |
--------------------------------------------------------------------------------
/couchapp/vendor/couchapp/README.md:
--------------------------------------------------------------------------------
1 | ## CouchApp - more than just a filesystem mapper
2 |
3 | This is where documentation will go for the client and server JavaScript parts of CouchApp.
--------------------------------------------------------------------------------
/python/scrapy-ctl.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import os
4 | os.environ.setdefault('SCRAPY_SETTINGS_MODULE', 'couchcrawler.settings')
5 |
6 | from scrapy.command.cmdline import execute
7 | execute()
8 |
--------------------------------------------------------------------------------
/python/couchcrawler/spiders/__init__.py:
--------------------------------------------------------------------------------
1 | # This package will contain the spiders of your Scrapy project
2 | #
3 | # To create the first spider for your project use this command:
4 | #
5 | # scrapy-ctl.py genspider myspider myspider-domain.com
6 | #
7 | # For more info see:
8 | # http://doc.scrapy.org/topics/spiders.html
9 |
10 |
--------------------------------------------------------------------------------
/python/couchcrawler-sample.cfg:
--------------------------------------------------------------------------------
1 | [couchdb]
2 | host=http://localhost:5984
3 | db=crawler
4 |
5 | [wiki]
6 | domain_name=mydomain.com
7 | extra_domain_names=myotherdomain.com,yetanotherdomain.net
8 | start_urls=http://mydomain.com/wiki/index.php/Special:RecentChanges,http://yetanotherdomain.net/bin/view/Main/WebChanges?limit=100
9 |
10 | [yammer]
11 | json_messages_url=https://www.yammer.com/api/v1/messages.json
12 | consumer_key=my_consumer_key
13 | consumer_secret=my_consumer_secret
14 | app_token=my_app_token
15 |
16 |
17 |
18 |
--------------------------------------------------------------------------------
/python/couchcrawler/items.py:
--------------------------------------------------------------------------------
1 | # Define here the models for your scraped items
2 | #
3 | # See documentation in:
4 | # http://doc.scrapy.org/topics/items.html
5 |
6 | from scrapy.item import Item, Field
7 |
8 | class IndexableItem(Item):
9 | url = Field()
10 | contents = Field()
11 | title = Field()
12 | mod_datetime = Field()
13 | type = Field()
14 |
15 | class YammerItem(IndexableItem):
16 | url = Field()
17 | contents = Field()
18 | title = Field()
19 | mod_datetime = Field()
20 | type = Field()
21 |
22 | author = Field()
23 | parent_url = Field()
24 | thread_url = Field()
25 | likes = Field()
26 |
--------------------------------------------------------------------------------
/python/couchcrawler/pipelines.py:
--------------------------------------------------------------------------------
1 | # Define your item pipelines here
2 | #
3 | # Don't forget to add your pipeline to the ITEM_PIPELINES setting
4 | # See: http://doc.scrapy.org/topics/item-pipeline.html
5 |
6 | from couchdb.client import Server, ResourceNotFound
7 | from couchcrawler.settings import config
8 |
9 | class CouchCrawlerPipeline(object):
10 | def __init__(self):
11 | server = Server(config.get('couchdb', 'host'))
12 | self.conn = server[config.get('couchdb', 'db')]
13 |
14 | def process_item(self, domain, item):
15 | url = item['url']
16 |
17 | try:
18 | doc = self.conn[url]
19 | for key, val in item.iteritems():
20 | doc[key] = val
21 | except ResourceNotFound:
22 | self.conn[url] = dict(item)
23 |
24 | return item
25 |
--------------------------------------------------------------------------------
/python/couchcrawler/settings.py:
--------------------------------------------------------------------------------
1 | # Scrapy settings for couchcrawler project
2 | #
3 | # For simplicity, this file contains only the most important settings by
4 | # default. All the other settings are documented here:
5 | #
6 | # http://doc.scrapy.org/topics/settings.html
7 | #
8 | # Or you can copy and paste them from where they're defined in Scrapy:
9 | #
10 | # scrapy/conf/default_settings.py
11 | #
12 |
13 | BOT_NAME = 'couchcrawler'
14 | BOT_VERSION = '1.0'
15 |
16 | SPIDER_MODULES = ['couchcrawler.spiders']
17 | NEWSPIDER_MODULE = 'couchcrawler.spiders'
18 | DEFAULT_ITEM_CLASS = 'couchcrawler.items.IndexableItem'
19 | USER_AGENT = '%s/%s' % (BOT_NAME, BOT_VERSION)
20 | ITEM_PIPELINES = [
21 | 'couchcrawler.pipelines.CouchCrawlerPipeline'
22 | ]
23 |
24 | from ConfigParser import ConfigParser
25 | config = ConfigParser()
26 | config.read(['couchcrawler.cfg'])
27 |
28 |
--------------------------------------------------------------------------------
/couchapp/vendor/couchapp/date.js:
--------------------------------------------------------------------------------
1 | function f(n) { // Format integers to have at least two digits.
2 | return n < 10 ? '0' + n : n;
3 | }
4 |
5 | Date.prototype.rfc3339 = function() {
6 | return this.getUTCFullYear() + '-' +
7 | f(this.getUTCMonth() + 1) + '-' +
8 | f(this.getUTCDate()) + 'T' +
9 | f(this.getUTCHours()) + ':' +
10 | f(this.getUTCMinutes()) + ':' +
11 | f(this.getUTCSeconds()) + 'Z';
12 | };
13 |
14 | // This is a format that collates in order and tends to work with
15 | // JavaScript's new Date(string) date parsing capabilities, unlike rfc3339.
16 | Date.prototype.toJSON = function() {
17 | return this.getUTCFullYear() + '/' +
18 | f(this.getUTCMonth() + 1) + '/' +
19 | f(this.getUTCDate()) + ' ' +
20 | f(this.getUTCHours()) + ':' +
21 | f(this.getUTCMinutes()) + ':' +
22 | f(this.getUTCSeconds()) + ' +0000';
23 | };
--------------------------------------------------------------------------------
/couchapp/fulltext/all/index.js:
--------------------------------------------------------------------------------
1 | function(doc) {
2 | var output = new Document();
3 | var valid_fields = {
4 | mod_datetime: {
5 | field: "mod_datetime",
6 | store: "yes",
7 | index: "analyzed"
8 | },
9 |
10 | title: {
11 | field: "title",
12 | store: "yes",
13 | index: "analyzed"
14 | },
15 |
16 | url: {
17 | field: "url",
18 | store: "yes",
19 | index: "analyzed"
20 | },
21 |
22 | contents: {
23 | field: "contents",
24 | store: "no",
25 | index: "analyzed"
26 | }
27 | };
28 |
29 | for (var field in valid_fields) {
30 | output.add(doc[field], valid_fields[field]);
31 | }
32 |
33 | output.add(
34 | doc["title"] + "\n" + doc["contents"],
35 | {field: "default", store: "no", index: "analyzed"}
36 | );
37 |
38 | output.add(doc["contents"].substr(0, 140), {field: "snippet", store: "yes", index: "no"});
39 |
40 | return output;
41 | }
42 |
--------------------------------------------------------------------------------
/couchapp/_attachments/style/main.css:
--------------------------------------------------------------------------------
1 | * {
2 | font-family: arial;
3 | }
4 |
5 | hr {
6 | border: 1px solid #ddd;
7 | }
8 |
9 | #search_form input {
10 | font-size: 1.125em;
11 | }
12 |
13 | #input_q {
14 | width: 60%;
15 | }
16 |
17 | #search_results_metadata {
18 | font-size: .9em;
19 | margin-top:1em;
20 | margin-bottom:1em;
21 | }
22 |
23 | #search_results {
24 | width: 600px;
25 | margin-left: 25px;
26 | }
27 |
28 | .search_result {
29 | display: block;
30 | margin-top: 1em;
31 | margin-bottom: 1em;
32 | font-size: 0.9em;
33 | }
34 |
35 | .search_result h1 {
36 | font-size: 1.1em;
37 | font-weight: bold;
38 | white-space: nowrap;
39 | }
40 |
41 | .search_result header * {
42 | padding: 0em;
43 | margin: 0em;
44 | }
45 |
46 | .search_result p {
47 | padding: 0em;
48 | margin-top: 0.25em;
49 | margin-bottom: 0.25em;
50 | }
51 |
52 | .search_result footer {
53 | color: green;
54 | white-space: nowrap;
55 | }
56 |
57 | #search_pager {
58 | display: block;
59 | font-size:1em;
60 | width: 600px;
61 | text-align: center;
62 | margin-left: 25px;
63 | padding-top: 10px;
64 | padding-bottom: 100px;
65 | }
66 |
67 |
68 |
69 |
--------------------------------------------------------------------------------
/couchapp/vendor/couchapp/template.js:
--------------------------------------------------------------------------------
1 | // Simple JavaScript Templating
2 | // John Resig - http://ejohn.org/ - MIT Licensed
3 | var cache = {};
4 |
5 | function template(str, data){
6 | // Figure out if we're getting a template, or if we need to
7 | // load the template - and be sure to cache the result.
8 | var fn = cache[str] ||
9 |
10 | // Generate a reusable function that will serve as a template
11 | // generator (and which will be cached).
12 | new Function("obj",
13 | "var p=[],print=function(){p.push.apply(p,arguments);};" +
14 |
15 | // Introduce the data as local variables using with(){}
16 | "with(obj){p.push('" +
17 |
18 | // Convert the template into pure JavaScript
19 | str
20 | .replace(/\n/g, "\\n")
21 | .replace(/[\r\t]/g, " ")
22 | .replace(/'(?=[^%]*%>)/g,"\t")
23 | .split("'").join("\\'")
24 | .split("\t").join("'")
25 | .replace(/<%=(.+?)%>/g, "',$1,'")
26 | .split("<%").join("');")
27 | .split("%>").join("p.push('")
28 | + "');}return p.join('');");
29 | cache[str] = fn;
30 |
31 | // Provide some basic currying to the user
32 | return data ? fn( data ) : fn;
33 | };
--------------------------------------------------------------------------------
/couchapp/_attachments/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Search Engine!
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 | Search Engine!
18 |
19 |
27 |
28 |
29 |
30 |
31 |
37 |
38 |
39 |
--------------------------------------------------------------------------------
/python/couchcrawler/spiders/wiki.py:
--------------------------------------------------------------------------------
1 |
2 | from datetime import datetime
3 |
4 | from scrapy.contrib.spiders import CrawlSpider, Rule
5 | from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
6 | from scrapy.selector import HtmlXPathSelector
7 |
8 | from couchcrawler.items import IndexableItem
9 |
10 | from couchcrawler.settings import config
11 |
12 | class WikiSpider(CrawlSpider):
13 | ''' Can spider MediaWiki and Twiki wikis.
14 | '''
15 |
16 | domain_name = config.get('wiki', 'domain_name')
17 | extra_domain_names = config.get('wiki', 'extra_domain_names').split(',')
18 | start_urls = config.get('wiki', 'start_urls').split(',')
19 |
20 | rules = [
21 | Rule(SgmlLinkExtractor(deny=["\?", "Special:.*"]),
22 | callback='parse_wiki',
23 | follow=True),
24 | ]
25 |
26 | def parse_wiki(self, response):
27 | hxs = HtmlXPathSelector(response)
28 |
29 | item = IndexableItem()
30 | item['type'] = 'wiki'
31 | item['title'] = hxs.select('//title/text()').extract()[0]
32 | item['url'] = response.url
33 | item['mod_datetime'] = datetime.now().isoformat()
34 |
35 | to_exclude = ["[not(self::%s)]" % x for x in ["script", "style"]]
36 | content_xpath = "//body//*%s/text()" % ("".join(to_exclude))
37 | item['contents'] = "\n".join([
38 | s.strip() for s
39 | in hxs.select(content_xpath).extract()
40 | if s.strip()
41 | ])
42 |
43 | return item
44 |
45 | SPIDER = WikiSpider()
46 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2010, Carlo Cabanilla
2 | All rights reserved.
3 |
4 | Redistribution and use in source and binary forms, with or without
5 | modification, are permitted provided that the following conditions are met:
6 | * Redistributions of source code must retain the above copyright
7 | notice, this list of conditions and the following disclaimer.
8 | * Redistributions in binary form must reproduce the above copyright
9 | notice, this list of conditions and the following disclaimer in the
10 | documentation and/or other materials provided with the distribution.
11 | * Neither the name of the author nor the
12 | names of its contributors may be used to endorse or promote products
13 | derived from this software without specific prior written permission.
14 |
15 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ''AS IS''
16 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
19 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
23 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | Couch Crawler
2 | =============
3 |
4 | A search engine built on top of couchdb-lucene.
5 |
6 | Dependencies
7 | ------------
8 | [CouchDB](http://couchdb.apache.org/)
9 |
10 | * [couchdb-lucene v0.4](http://github.com/rnewson/couchdb-lucene/tree/v0.4)
11 | * [couchapp](http://github.com/couchapp/couchapp)
12 |
13 | [Python](http://www.python.org/)
14 |
15 | * [couchdb-python](http://code.google.com/p/couchdb-python/)
16 | * [scrapy](http://scrapy.org/)
17 |
18 | Optionally for [Yammer](http://yammer.com) spidering:
19 |
20 | * [pyopenssl](http://pypi.python.org/pypi/pyOpenSSL)
21 | * [oauth](http://code.google.com/p/oauth/)
22 |
23 | Installation
24 | ------------
25 |
26 | Assuming couchdb-lucene was installed to the "_fti" endpoint, you can push
27 | Couch Crawler to your CouchDB instance with the command:
28 |
29 | cd couchapp
30 | couchapp push
31 |
32 | This will create a new CouchDB database called "crawler" on the localhost:5984
33 | CouchDB instance. To change the db, modify couchapp/.couchapprc and do another
34 | couchapp push.
35 |
36 | To configure the crawler, copy python/couchcrawler-sample.cfg to python/couchcrawler.cfg and fill out the appropriate configuration values.
37 |
38 | To start indexing pages, run the crawler script:
39 |
40 | cd python
41 | ./scrapy-ctl.py crawl domain_to_crawl.com
42 |
43 | While it's indexing, you can visit the search engine at the following url:
44 |
45 | http://localhost:5984/crawler/_design/crawler/index.html
46 |
47 | Spiders
48 | -------
49 | The crawler current has spiders for:
50 |
51 | * MediaWiki
52 | * Twiki
53 | * Yammer
54 |
55 | It's pretty easy to create your own. See python/couchcrawler/spiders/wiki.py for an example, or [Scrapy documentation](http://doc.scrapy.org/intro/tutorial.html) for more a more in-depth explanation.
56 |
57 |
--------------------------------------------------------------------------------
/couchapp/vendor/couchapp/path.js:
--------------------------------------------------------------------------------
1 | // from couch.js
2 | function encodeOptions(options, noJson) {
3 | var buf = []
4 | if (typeof(options) == "object" && options !== null) {
5 | for (var name in options) {
6 | if (!options.hasOwnProperty(name)) continue;
7 | var value = options[name];
8 | if (!noJson && (name == "key" || name == "startkey" || name == "endkey")) {
9 | value = toJSON(value);
10 | }
11 | buf.push(encodeURIComponent(name) + "=" + encodeURIComponent(value));
12 | }
13 | }
14 | if (!buf.length) {
15 | return "";
16 | }
17 | return "?" + buf.join("&");
18 | }
19 |
20 | function concatArgs(array, args) {
21 | for (var i=0; i < args.length; i++) {
22 | array.push(args[i]);
23 | };
24 | return array;
25 | };
26 |
27 | function makePath(array) {
28 | var options, path;
29 |
30 | if (typeof array[array.length - 1] != "string") {
31 | // it's a params hash
32 | options = array.pop();
33 | }
34 | path = array.map(function(item) {return encodeURIComponent(item)}).join('/');
35 | if (options) {
36 | return path + encodeOptions(options);
37 | } else {
38 | return path;
39 | }
40 | };
41 |
42 | function assetPath() {
43 | var p = req.path, parts = ['', p[0], p[1] , p[2]];
44 | return makePath(concatArgs(parts, arguments));
45 | };
46 |
47 | function showPath() {
48 | var p = req.path, parts = ['', p[0], p[1] , p[2], '_show'];
49 | return makePath(concatArgs(parts, arguments));
50 | };
51 |
52 | function listPath() {
53 | var p = req.path, parts = ['', p[0], p[1] , p[2], '_list'];
54 | return makePath(concatArgs(parts, arguments));
55 | };
56 |
57 | function olderPath(info) {
58 | if (!info) return null;
59 | var q = req.query;
60 | q.startkey = info.prev_key;
61 | q.skip=1;
62 | return listPath('index','recent-posts',q);
63 | }
64 |
65 | function makeAbsolute(req, path) {
66 | return 'http://' + req.headers.Host + path;
67 | }
68 |
69 |
70 | function currentPath() {
71 | path = req.path.map(function(item) {return encodeURIComponent(item)}).join('/');
72 | if (req.query) {
73 | return path + encodeOptions(req.query, true);
74 | } else {
75 | return path;
76 | }
77 | }
--------------------------------------------------------------------------------
/python/couchcrawler/spiders/yammer.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | from oauth.oauth import OAuthConsumer, OAuthSignatureMethod_PLAINTEXT, OAuthRequest, OAuthToken
4 |
5 | from scrapy.spider import BaseSpider
6 | from scrapy.http import Request
7 |
8 | from couchcrawler.items import YammerItem
9 | from couchcrawler.settings import config
10 |
11 | class YammerSpider(BaseSpider):
12 | domain_name = 'yammer.com'
13 | start_urls = [config.get('yammer', 'json_messages_url')]
14 |
15 | def __init__(self, domain_name=None):
16 | BaseSpider.__init__(self, domain_name)
17 |
18 | consumer_key = config.get('yammer', 'consumer_key')
19 | consumer_secret = config.get('yammer', 'consumer_secret')
20 | app_token = config.get('yammer', 'app_token')
21 |
22 | self.consumer = OAuthConsumer(consumer_key, consumer_secret)
23 | self.signature = OAuthSignatureMethod_PLAINTEXT()
24 | self.token = OAuthToken.from_string(app_token)
25 |
26 | def make_requests_from_url(self, url):
27 | oauth_request = OAuthRequest.from_consumer_and_token(
28 | self.consumer,
29 | token=self.token,
30 | http_method='GET',
31 | http_url=url)
32 | oauth_request.sign_request(self.signature, self.consumer, self.token)
33 |
34 | return Request(oauth_request.to_url(), callback=self.parse, dont_filter=True)
35 |
36 | def parse(self, response):
37 | data = json.loads(response.body)
38 |
39 | for message in data['messages']:
40 | item = YammerItem()
41 | item['type'] = 'yammer'
42 | item['url'] = message['web_url']
43 |
44 | body = message['body']['plain']
45 | item['contents'] = body
46 |
47 | max_title_length = 40
48 | if len(body) > 40:
49 | item['title'] = body[0:max_title_length - 3] + '...'
50 | else:
51 | item['title'] = body
52 |
53 | item['mod_datetime'] = message['created_at']
54 | item['author'] = message['sender_id']
55 | item['parent_url'] = message['replied_to_id']
56 | item['thread_url'] = message['thread_id']
57 | item['likes'] = message['liked_by']
58 |
59 | yield item
60 |
61 | SPIDER = YammerSpider()
62 |
--------------------------------------------------------------------------------
/couchapp/_attachments/js/crawler.js:
--------------------------------------------------------------------------------
1 | $(document).ready(function() {
2 | var q = $.query.get('q');
3 | // kind of a hack to handle the case of "?q=", which sets q to the boolean true
4 | var q_is_not_a_bool_true = (+q != true);
5 |
6 | if (q && q_is_not_a_bool_true) {
7 | $('#input_q').attr('value', q);
8 |
9 | var url = $.mustache(
10 | '/{{db}}/_fti/crawler/all{{query_string}}',
11 | {
12 | db: jQuery.url.segment(0),
13 | query_string: $.query.toString()
14 | }
15 | );
16 |
17 | $.getJSON(
18 | url,
19 | function(search_results) {
20 | $("#search_results_metadata").html(crawler.search_results_metadata(search_results));
21 | $("#search_results").html(crawler.search_results(search_results));
22 | $("#search_pager").html(crawler.search_pager(search_results, $.query));
23 | });
24 | }
25 | });
26 |
27 |
28 | var crawler = {
29 | search_results_metadata: function(data) {
30 | return $.mustache(
31 | "Results {{from}} - {{to}} of {{total_rows}} ({{duration}} seconds)",
32 | {
33 | from: data.skip + 1,
34 | to: Math.min(data.skip + data.limit, data.total_rows),
35 | total_rows: data.total_rows,
36 | duration: ((data.search_duration
37 | + data.fetch_duration) / 1000).toFixed(3)
38 | }
39 | )
40 | },
41 |
42 | search_results: function(data) {
43 | return $.mustache(
44 | [
45 | '{{#rows}}',
46 | '',
47 | '',
48 | '{{snippet}}
',
49 | '',
50 | '',
51 | '{{/rows}}'
52 | ].join("\n"),
53 | {rows: _(data.rows).map( function(row) { return row.fields } )}
54 | )
55 | },
56 |
57 | search_pager: function(data, search_query) {
58 | var num_pages = Math.ceil(data.total_rows / data.limit);
59 |
60 | if (num_pages == 1) {
61 | return '';
62 | }
63 |
64 | var current_page = parseInt(data.skip / data.limit) + 1;
65 |
66 | return _(_.range(num_pages)).map(function(i) {
67 | var page_num = i + 1;
68 |
69 | if (page_num == current_page) {
70 | return page_num;
71 |
72 | } else {
73 | var skip = i * search_query.get('limit');
74 | return $.mustache(
75 | '',
76 | {
77 | "page_num": page_num,
78 | "query": search_query.set('skip', skip).toString()
79 | }
80 | );
81 |
82 | }
83 | }).join(" | ")
84 | }
85 | }
86 |
87 |
88 |
89 |
--------------------------------------------------------------------------------
/couchapp/_attachments/js/jquery.url-1.0.js:
--------------------------------------------------------------------------------
1 | /* ===========================================================================
2 | *
3 | * JQuery URL Parser
4 | * Version 1.0
5 | * Parses URLs and provides easy access to information within them.
6 | *
7 | * Author: Mark Perkins
8 | * Author email: mark@allmarkedup.com
9 | *
10 | * For full documentation and more go to http://projects.allmarkedup.com/jquery_url_parser/
11 | *
12 | * ---------------------------------------------------------------------------
13 | *
14 | * CREDITS:
15 | *
16 | * Parser based on the Regex-based URI parser by Steven Levithan.
17 | * For more information (including a detailed explaination of the differences
18 | * between the 'loose' and 'strict' pasing modes) visit http://blog.stevenlevithan.com/archives/parseuri
19 | *
20 | * ---------------------------------------------------------------------------
21 | *
22 | * LICENCE:
23 | *
24 | * Released under a MIT Licence. See licence.txt that should have been supplied with this file,
25 | * or visit http://projects.allmarkedup.com/jquery_url_parser/licence.txt
26 | *
27 | * ---------------------------------------------------------------------------
28 | *
29 | * EXAMPLES OF USE:
30 | *
31 | * Get the domain name (host) from the current page URL
32 | * jQuery.url.attr("host")
33 | *
34 | * Get the query string value for 'item' for the current page
35 | * jQuery.url.param("item") // null if it doesn't exist
36 | *
37 | * Get the second segment of the URI of the current page
38 | * jQuery.url.segment(2) // null if it doesn't exist
39 | *
40 | * Get the protocol of a manually passed in URL
41 | * jQuery.url.setUrl("http://allmarkedup.com/").attr("protocol") // returns 'http'
42 | *
43 | */
44 |
45 | jQuery.url = function()
46 | {
47 | var segments = {};
48 |
49 | var parsed = {};
50 |
51 | /**
52 | * Options object. Only the URI and strictMode values can be changed via the setters below.
53 | */
54 | var options = {
55 |
56 | url : window.location, // default URI is the page in which the script is running
57 |
58 | strictMode: false, // 'loose' parsing by default
59 |
60 | key: ["source","protocol","authority","userInfo","user","password","host","port","relative","path","directory","file","query","anchor"], // keys available to query
61 |
62 | q: {
63 | name: "queryKey",
64 | parser: /(?:^|&)([^&=]*)=?([^&]*)/g
65 | },
66 |
67 | parser: {
68 | strict: /^(?:([^:\/?#]+):)?(?:\/\/((?:(([^:@]*):?([^:@]*))?@)?([^:\/?#]*)(?::(\d*))?))?((((?:[^?#\/]*\/)*)([^?#]*))(?:\?([^#]*))?(?:#(.*))?)/, //less intuitive, more accurate to the specs
69 | loose: /^(?:(?![^:@]+:[^:@\/]*@)([^:\/?#.]+):)?(?:\/\/)?((?:(([^:@]*):?([^:@]*))?@)?([^:\/?#]*)(?::(\d*))?)(((\/(?:[^?#](?![^?#\/]*\.[^?#\/.]+(?:[?#]|$)))*\/?)?([^?#\/]*))(?:\?([^#]*))?(?:#(.*))?)/ // more intuitive, fails on relative paths and deviates from specs
70 | }
71 |
72 | };
73 |
74 | /**
75 | * Deals with the parsing of the URI according to the regex above.
76 | * Written by Steven Levithan - see credits at top.
77 | */
78 | var parseUri = function()
79 | {
80 | str = decodeURI( options.url );
81 |
82 | var m = options.parser[ options.strictMode ? "strict" : "loose" ].exec( str );
83 | var uri = {};
84 | var i = 14;
85 |
86 | while ( i-- ) {
87 | uri[ options.key[i] ] = m[i] || "";
88 | }
89 |
90 | uri[ options.q.name ] = {};
91 | uri[ options.key[12] ].replace( options.q.parser, function ( $0, $1, $2 ) {
92 | if ($1) {
93 | uri[options.q.name][$1] = $2;
94 | }
95 | });
96 |
97 | return uri;
98 | };
99 |
100 | /**
101 | * Returns the value of the passed in key from the parsed URI.
102 | *
103 | * @param string key The key whose value is required
104 | */
105 | var key = function( key )
106 | {
107 | if ( ! parsed.length )
108 | {
109 | setUp(); // if the URI has not been parsed yet then do this first...
110 | }
111 | if ( key == "base" )
112 | {
113 | if ( parsed.port !== null && parsed.port !== "" )
114 | {
115 | return parsed.protocol+"://"+parsed.host+":"+parsed.port+"/";
116 | }
117 | else
118 | {
119 | return parsed.protocol+"://"+parsed.host+"/";
120 | }
121 | }
122 |
123 | return ( parsed[key] === "" ) ? null : parsed[key];
124 | };
125 |
126 | /**
127 | * Returns the value of the required query string parameter.
128 | *
129 | * @param string item The parameter whose value is required
130 | */
131 | var param = function( item )
132 | {
133 | if ( ! parsed.length )
134 | {
135 | setUp(); // if the URI has not been parsed yet then do this first...
136 | }
137 | return ( parsed.queryKey[item] === null ) ? null : parsed.queryKey[item];
138 | };
139 |
140 | /**
141 | * 'Constructor' (not really!) function.
142 | * Called whenever the URI changes to kick off re-parsing of the URI and splitting it up into segments.
143 | */
144 | var setUp = function()
145 | {
146 | parsed = parseUri();
147 |
148 | getSegments();
149 | };
150 |
151 | /**
152 | * Splits up the body of the URI into segments (i.e. sections delimited by '/')
153 | */
154 | var getSegments = function()
155 | {
156 | var p = parsed.path;
157 | segments = []; // clear out segments array
158 | segments = parsed.path.length == 1 ? {} : ( p.charAt( p.length - 1 ) == "/" ? p.substring( 1, p.length - 1 ) : path = p.substring( 1 ) ).split("/");
159 | };
160 |
161 | return {
162 |
163 | /**
164 | * Sets the parsing mode - either strict or loose. Set to loose by default.
165 | *
166 | * @param string mode The mode to set the parser to. Anything apart from a value of 'strict' will set it to loose!
167 | */
168 | setMode : function( mode )
169 | {
170 | strictMode = mode == "strict" ? true : false;
171 | return this;
172 | },
173 |
174 | /**
175 | * Sets URI to parse if you don't want to to parse the current page's URI.
176 | * Calling the function with no value for newUri resets it to the current page's URI.
177 | *
178 | * @param string newUri The URI to parse.
179 | */
180 | setUrl : function( newUri )
181 | {
182 | options.url = newUri === undefined ? window.location : newUri;
183 | setUp();
184 | return this;
185 | },
186 |
187 | /**
188 | * Returns the value of the specified URI segment. Segments are numbered from 1 to the number of segments.
189 | * For example the URI http://test.com/about/company/ segment(1) would return 'about'.
190 | *
191 | * If no integer is passed into the function it returns the number of segments in the URI.
192 | *
193 | * @param int pos The position of the segment to return. Can be empty.
194 | */
195 | segment : function( pos )
196 | {
197 | if ( ! parsed.length )
198 | {
199 | setUp(); // if the URI has not been parsed yet then do this first...
200 | }
201 | if ( pos === undefined )
202 | {
203 | return segments.length;
204 | }
205 | return ( segments[pos] === "" || segments[pos] === undefined ) ? null : segments[pos];
206 | },
207 |
208 | attr : key, // provides public access to private 'key' function - see above
209 |
210 | param : param // provides public access to private 'param' function - see above
211 |
212 | };
213 |
214 | }();
--------------------------------------------------------------------------------
/couchapp/vendor/couchapp/_attachments/jquery.couchapp.js:
--------------------------------------------------------------------------------
1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not
2 | // use this file except in compliance with the License. You may obtain a copy
3 | // of the License at
4 | //
5 | // http://www.apache.org/licenses/LICENSE-2.0
6 | //
7 | // Unless required by applicable law or agreed to in writing, software
8 | // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
9 | // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
10 | // License for the specific language governing permissions and limitations under
11 | // the License.
12 |
13 | // Usage: The passed in function is called when the page is ready.
14 | // CouchApp passes in the app object, which takes care of linking to
15 | // the proper database, and provides access to the CouchApp helpers.
16 | // $.CouchApp(function(app) {
17 | // app.db.view(...)
18 | // ...
19 | // });
20 |
21 | (function($) {
22 |
23 | function f(n) { // Format integers to have at least two digits.
24 | return n < 10 ? '0' + n : n;
25 | }
26 |
27 | Date.prototype.toJSON = function() {
28 | return this.getUTCFullYear() + '/' +
29 | f(this.getUTCMonth() + 1) + '/' +
30 | f(this.getUTCDate()) + ' ' +
31 | f(this.getUTCHours()) + ':' +
32 | f(this.getUTCMinutes()) + ':' +
33 | f(this.getUTCSeconds()) + ' +0000';
34 | };
35 |
36 | function Design(db, name) {
37 | this.view = function(view, opts) {
38 | db.view(name+'/'+view, opts);
39 | };
40 | };
41 |
42 | var login;
43 |
44 | function init(app) {
45 | $(function() {
46 | var dbname = document.location.href.split('/')[3];
47 | var dname = unescape(document.location.href).split('/')[5];
48 | var db = $.couch.db(dbname);
49 | var design = new Design(db, dname);
50 |
51 | // docForm applies CouchDB behavior to HTML forms.
52 | function docForm(formSelector, opts) {
53 | var localFormDoc = {};
54 | opts = opts || {};
55 | opts.fields = opts.fields || [];
56 |
57 | // turn the form into deep json
58 | // field names like 'author-email' get turned into json like
59 | // {"author":{"email":"quentin@example.com"}}
60 | function formToDeepJSON(form, fields, doc) {
61 | var form = $(form);
62 | opts.fields.forEach(function(field) {
63 | var val = form.find("[name="+field+"]").val()
64 | if (!val) return;
65 | var parts = field.split('-');
66 | var frontObj = doc, frontName = parts.shift();
67 | while (parts.length > 0) {
68 | frontObj[frontName] = frontObj[frontName] || {}
69 | frontObj = frontObj[frontName];
70 | frontName = parts.shift();
71 | }
72 | frontObj[frontName] = val;
73 | });
74 | };
75 |
76 | // Apply the behavior
77 | $(formSelector).submit(function(e) {
78 | e.preventDefault();
79 | // formToDeepJSON acts on localFormDoc by reference
80 | formToDeepJSON(this, opts.fields, localFormDoc);
81 | if (opts.beforeSave) opts.beforeSave(localFormDoc);
82 | db.saveDoc(localFormDoc, {
83 | success : function(resp) {
84 | if (opts.success) opts.success(resp, localFormDoc);
85 | }
86 | })
87 |
88 | return false;
89 | });
90 |
91 | // populate form from an existing doc
92 | function docToForm(doc) {
93 | var form = $(formSelector);
94 | // fills in forms
95 | opts.fields.forEach(function(field) {
96 | var parts = field.split('-');
97 | var run = true, frontObj = doc, frontName = parts.shift();
98 | while (frontObj && parts.length > 0) {
99 | frontObj = frontObj[frontName];
100 | frontName = parts.shift();
101 | }
102 | if (frontObj && frontObj[frontName])
103 | form.find("[name="+field+"]").val(frontObj[frontName]);
104 | });
105 | };
106 |
107 | if (opts.id) {
108 | db.openDoc(opts.id, {
109 | success: function(doc) {
110 | if (opts.onLoad) opts.onLoad(doc);
111 | localFormDoc = doc;
112 | docToForm(doc);
113 | }});
114 | } else if (opts.template) {
115 | if (opts.onLoad) opts.onLoad(opts.template);
116 | localFormDoc = opts.template;
117 | docToForm(localFormDoc);
118 | }
119 | var instance = {
120 | deleteDoc : function(opts) {
121 | opts = opts || {};
122 | if (confirm("Really delete this document?")) {
123 | db.removeDoc(localFormDoc, opts);
124 | }
125 | },
126 | localDoc : function() {
127 | formToDeepJSON(formSelector, opts.fields, localFormDoc);
128 | return localFormDoc;
129 | }
130 | }
131 | return instance;
132 | }
133 |
134 | function prettyDate(time){
135 | var date = new Date(time),
136 | diff = (((new Date()).getTime() - date.getTime()) / 1000),
137 | day_diff = Math.floor(diff / 86400);
138 |
139 | // if ( isNaN(day_diff) || day_diff < 0 || day_diff >= 31 ) return;
140 |
141 | return day_diff < 1 && (
142 | diff < 60 && "just now" ||
143 | diff < 120 && "1 minute ago" ||
144 | diff < 3600 && Math.floor( diff / 60 ) + " minutes ago" ||
145 | diff < 7200 && "1 hour ago" ||
146 | diff < 86400 && Math.floor( diff / 3600 ) + " hours ago") ||
147 | day_diff == 1 && "yesterday" ||
148 | day_diff < 21 && day_diff + " days ago" ||
149 | day_diff < 45 && Math.ceil( day_diff / 7 ) + " weeks ago" ||
150 | day_diff < 730 && Math.ceil( day_diff / 31 ) + " months ago" ||
151 | Math.ceil( day_diff / 365 ) + " years ago";
152 | };
153 |
154 | app({
155 | showPath : function(funcname, docid) {
156 | // I wish this was shared with path.js...
157 | return '/'+[dbname, '_design', dname, '_show', funcname, docid].join('/')
158 | },
159 | listPath : function(funcname, viewname) {
160 | return '/'+[dbname, '_design', dname, '_list', funcname, viewname].join('/')
161 | },
162 | slugifyString : function(string) {
163 | return string.replace(/\W/g,'-').
164 | replace(/\-*$/,'').replace(/^\-*/,'').
165 | replace(/\-{2,}/,'-');
166 | },
167 | attemptLogin : function(win, fail) {
168 | // depends on nasty hack in blog validation function
169 | db.saveDoc({"author":"_self"}, { error: function(s, e, r) {
170 | var namep = r.split(':');
171 | if (namep[0] == '_self') {
172 | login = namep.pop();
173 | $.cookies.set("login", login, '/'+dbname)
174 | win && win(login);
175 | } else {
176 | $.cookies.set("login", "", '/'+dbname)
177 | fail && fail(s, e, r);
178 | }
179 | }});
180 | },
181 | loggedInNow : function(loggedIn, loggedOut) {
182 | login = login || $.cookies.get("login");
183 | if (login) {
184 | loggedIn && loggedIn(login);
185 | } else {
186 | loggedOut && loggedOut();
187 | }
188 | },
189 | db : db,
190 | design : design,
191 | view : design.view,
192 | docForm : docForm,
193 | prettyDate : prettyDate
194 | });
195 | });
196 | };
197 |
198 | $.CouchApp = $.CouchApp || init;
199 |
200 | })(jQuery);
201 |
--------------------------------------------------------------------------------
/couchapp/_attachments/js/underscore-0.5.2.js:
--------------------------------------------------------------------------------
1 | (function(){var j=this,n=j._,i=function(a){this._wrapped=a},m=typeof StopIteration!=="undefined"?StopIteration:"__break__",b=j._=function(a){return new i(a)};if(typeof exports!=="undefined")exports._=b;var k=Array.prototype.slice,o=Array.prototype.unshift,p=Object.prototype.toString,q=Object.prototype.hasOwnProperty,r=Object.prototype.propertyIsEnumerable;b.VERSION="0.5.2";b.each=function(a,c,d){try{if(a.forEach)a.forEach(c,d);else if(b.isArray(a)||b.isArguments(a))for(var e=0,f=a.length;e=e.computed&&(e={value:f,computed:g})});return e.value};b.min=function(a,c,d){if(!c&&b.isArray(a))return Math.min.apply(Math,a);var e={computed:Infinity};b.each(a,function(f,g,h){g=c?c.call(d,f,g,h):f;gf?1:0}),"value")};b.sortedIndex=function(a,c,d){d=d||b.identity;for(var e=0,f=a.length;e>1;d(a[g])=0})})};b.zip=function(){for(var a=b.toArray(arguments),c=b.max(b.pluck(a,"length")),d=new Array(c),e=0;e0?f-c:c-f)>=0)return e;e[g++]=f}};b.bind=function(a,c){var d=b.rest(arguments,2);return function(){return a.apply(c||j,d.concat(b.toArray(arguments)))}};b.bindAll=function(a){var c=b.rest(arguments);if(c.length==0)c=b.functions(a);b.each(c,function(d){a[d]=b.bind(a[d],
10 | a)});return a};b.delay=function(a,c){var d=b.rest(arguments,2);return setTimeout(function(){return a.apply(a,d)},c)};b.defer=function(a){return b.delay.apply(b,[a,1].concat(b.rest(arguments)))};b.wrap=function(a,c){return function(){var d=[a].concat(b.toArray(arguments));return c.apply(c,d)}};b.compose=function(){var a=b.toArray(arguments);return function(){for(var c=b.toArray(arguments),d=a.length-1;d>=0;d--)c=[a[d].apply(this,c)];return c[0]}};b.keys=function(a){if(b.isArray(a))return b.range(0,
11 | a.length);var c=[];for(var d in a)q.call(a,d)&&c.push(d);return c};b.values=function(a){return b.map(a,b.identity)};b.functions=function(a){return b.select(b.keys(a),function(c){return b.isFunction(a[c])}).sort()};b.extend=function(a,c){for(var d in c)a[d]=c[d];return a};b.clone=function(a){if(b.isArray(a))return a.slice(0);return b.extend({},a)};b.tap=function(a,c){c(a);return a};b.isEqual=function(a,c){if(a===c)return true;var d=typeof a;if(d!=typeof c)return false;if(a==c)return true;if(!a&&c||
12 | a&&!c)return false;if(a.isEqual)return a.isEqual(c);if(b.isDate(a)&&b.isDate(c))return a.getTime()===c.getTime();if(b.isNaN(a)&&b.isNaN(c))return true;if(b.isRegExp(a)&&b.isRegExp(c))return a.source===c.source&&a.global===c.global&&a.ignoreCase===c.ignoreCase&&a.multiline===c.multiline;if(d!=="object")return false;if(a.length&&a.length!==c.length)return false;d=b.keys(a);var e=b.keys(c);if(d.length!=e.length)return false;for(var f in a)if(!b.isEqual(a[f],c[f]))return false;return true};b.isEmpty=
13 | function(a){return b.keys(a).length==0};b.isElement=function(a){return!!(a&&a.nodeType==1)};b.isArray=function(a){return a&&a.concat&&a.unshift};b.isArguments=function(a){return a&&b.isNumber(a.length)&&!b.isArray(a)&&!r.call(a,"length")};b.isFunction=function(a){return a&&a.constructor&&a.call&&a.apply};b.isString=function(a){return a===""||a&&a.charCodeAt&&a.substr};b.isNumber=function(a){return p.call(a)==="[object Number]"};b.isDate=function(a){return a&&a.getTimezoneOffset&&a.setUTCFullYear};
14 | b.isRegExp=function(a){return a&&a.test&&a.exec&&(a.ignoreCase||a.ignoreCase===false)};b.isNaN=function(a){return b.isNumber(a)&&isNaN(a)};b.isNull=function(a){return a===null};b.isUndefined=function(a){return typeof a=="undefined"};b.noConflict=function(){j._=n;return this};b.identity=function(a){return a};b.breakLoop=function(){throw m;};var s=0;b.uniqueId=function(a){var c=s++;return a?a+c:c};b.template=function(a,c){a=new Function("obj","var p=[],print=function(){p.push.apply(p,arguments);};with(obj){p.push('"+
15 | a.replace(/[\r\t\n]/g," ").split("<%").join("\t").replace(/((^|%>)[^\t]*)'/g,"$1\r").replace(/\t=(.*?)%>/g,"',$1,'").split("\t").join("');").split("%>").join("p.push('").split("\r").join("\\'")+"');}return p.join('');");return c?a(c):a};b.forEach=b.each;b.foldl=b.inject=b.reduce;b.foldr=b.reduceRight;b.filter=b.select;b.every=b.all;b.some=b.any;b.head=b.first;b.tail=b.rest;b.methods=b.functions;var l=function(a,c){return c?b(a).chain():a};b.each(b.functions(b),function(a){var c=b[a];i.prototype[a]=
16 | function(){o.call(arguments,this._wrapped);return l(c.apply(b,arguments),this._chain)}});b.each(["pop","push","reverse","shift","sort","splice","unshift"],function(a){var c=Array.prototype[a];i.prototype[a]=function(){c.apply(this._wrapped,arguments);return l(this._wrapped,this._chain)}});b.each(["concat","join","slice"],function(a){var c=Array.prototype[a];i.prototype[a]=function(){return l(c.apply(this._wrapped,arguments),this._chain)}});i.prototype.chain=function(){this._chain=true;return this};
17 | i.prototype.value=function(){return this._wrapped}})();
18 |
--------------------------------------------------------------------------------
/couchapp/_attachments/js/jquery.mustache-0.2.1.js:
--------------------------------------------------------------------------------
1 | /*
2 | Shameless port of a shameless port
3 | @defunkt => @janl => @aq
4 |
5 | See http://github.com/defunkt/mustache for more info.
6 | */
7 |
8 | ;(function($) {
9 |
10 | /*
11 | Shamless port of http://github.com/defunkt/mustache
12 | by Jan Lehnardt , Alexander Lang ,
13 | Sebastian Cohnen
14 |
15 | Thanks @defunkt for the awesome code.
16 |
17 | See http://github.com/defunkt/mustache for more info.
18 | */
19 |
20 | var Mustache = function() {
21 | var Renderer = function() {};
22 |
23 | Renderer.prototype = {
24 | otag: "{{",
25 | ctag: "}}",
26 | pragmas: {},
27 |
28 | render: function(template, context, partials) {
29 | // fail fast
30 | if(template.indexOf(this.otag) == -1) {
31 | return template;
32 | }
33 |
34 | template = this.render_pragmas(template);
35 | var html = this.render_section(template, context, partials);
36 | return this.render_tags(html, context, partials);
37 | },
38 |
39 | /*
40 | Looks for %PRAGMAS
41 | */
42 | render_pragmas: function(template) {
43 | // no pragmas
44 | if(template.indexOf(this.otag + "%") == -1) {
45 | return template;
46 | }
47 |
48 | var that = this;
49 | var regex = new RegExp(this.otag + "%(.+)" + this.ctag);
50 | return template.replace(regex, function(match, pragma) {
51 | that.pragmas[pragma] = true;
52 | return "";
53 | // ignore unknown pragmas silently
54 | });
55 | },
56 |
57 | /*
58 | Tries to find a partial in the global scope and render it
59 | */
60 | render_partial: function(name, context, partials) {
61 | if(typeof(context[name]) != "object") {
62 | throw({message: "subcontext for '" + name + "' is not an object"});
63 | }
64 | if(!partials || !partials[name]) {
65 | throw({message: "unknown_partial"});
66 | }
67 | return this.render(partials[name], context[name], partials);
68 | },
69 |
70 | /*
71 | Renders boolean and enumerable sections
72 | */
73 | render_section: function(template, context, partials) {
74 | if(template.indexOf(this.otag + "#") == -1) {
75 | return template;
76 | }
77 | var that = this;
78 | // CSW - Added "+?" so it finds the tighest bound, not the widest
79 | var regex = new RegExp(this.otag + "\\#(.+)" + this.ctag +
80 | "\\s*([\\s\\S]+?)" + this.otag + "\\/\\1" + this.ctag + "\\s*", "mg");
81 |
82 | // for each {{#foo}}{{/foo}} section do...
83 | return template.replace(regex, function(match, name, content) {
84 | var value = that.find(name, context);
85 | if(that.is_array(value)) { // Enumerable, Let's loop!
86 | return that.map(value, function(row) {
87 | return that.render(content, that.merge(context,
88 | that.create_context(row)), partials);
89 | }).join('');
90 | } else if(value) { // boolean section
91 | return that.render(content, context, partials);
92 | } else {
93 | return "";
94 | }
95 | });
96 | },
97 |
98 | /*
99 | Replace {{foo}} and friends with values from our view
100 | */
101 | render_tags: function(template, context, partials) {
102 | var lines = template.split("\n");
103 |
104 | var new_regex = function() {
105 | return new RegExp(that.otag + "(=|!|>|\\{|%)?([^\/#]+?)\\1?" +
106 | that.ctag + "+", "g");
107 | };
108 |
109 | // tit for tat
110 | var that = this;
111 |
112 | var regex = new_regex();
113 | for (var i=0; i < lines.length; i++) {
114 | lines[i] = lines[i].replace(regex, function (match,operator,name) {
115 | switch(operator) {
116 | case "!": // ignore comments
117 | return match;
118 | case "=": // set new delimiters, rebuild the replace regexp
119 | that.set_delimiters(name);
120 | regex = new_regex();
121 | // redo the line in order to get tags with the new delimiters
122 | // on the same line
123 | i--;
124 | return "";
125 | case ">": // render partial
126 | return that.render_partial(name, context, partials);
127 | case "{": // the triple mustache is unescaped
128 | return that.find(name, context);
129 | return "";
130 | default: // escape the value
131 | return that.escape(that.find(name, context));
132 | }
133 | },this);
134 | };
135 | return lines.join("\n");
136 | },
137 |
138 | set_delimiters: function(delimiters) {
139 | var dels = delimiters.split(" ");
140 | this.otag = this.escape_regex(dels[0]);
141 | this.ctag = this.escape_regex(dels[1]);
142 | },
143 |
144 | escape_regex: function(text) {
145 | // thank you Simon Willison
146 | if(!arguments.callee.sRE) {
147 | var specials = [
148 | '/', '.', '*', '+', '?', '|',
149 | '(', ')', '[', ']', '{', '}', '\\'
150 | ];
151 | arguments.callee.sRE = new RegExp(
152 | '(\\' + specials.join('|\\') + ')', 'g'
153 | );
154 | }
155 | return text.replace(arguments.callee.sRE, '\\$1');
156 | },
157 |
158 | /*
159 | find `name` in current `context`. That is find me a value
160 | from the view object
161 | */
162 | find: function(name, context) {
163 | name = this.trim(name);
164 | if(typeof context[name] === "function") {
165 | return context[name].apply(context);
166 | }
167 | if(context[name] !== undefined) {
168 | return context[name];
169 | }
170 | // silently ignore unkown variables
171 | return "";
172 | },
173 |
174 | // Utility methods
175 |
176 | /*
177 | Does away with nasty characters
178 | */
179 | escape: function(s) {
180 | return s.toString().replace(/[&"<>\\]/g, function(s) {
181 | switch(s) {
182 | case "&": return "&";
183 | case "\\": return "\\\\";;
184 | case '"': return '\"';;
185 | case "<": return "<";
186 | case ">": return ">";
187 | default: return s;
188 | }
189 | });
190 | },
191 |
192 | /*
193 | Merges all properties of object `b` into object `a`.
194 | `b.property` overwrites a.property`
195 | */
196 | merge: function(a, b) {
197 | var _new = {};
198 | for(var name in a) {
199 | if(a.hasOwnProperty(name)) {
200 | _new[name] = a[name];
201 | }
202 | };
203 | for(var name in b) {
204 | if(b.hasOwnProperty(name)) {
205 | _new[name] = b[name];
206 | }
207 | };
208 | return _new;
209 | },
210 |
211 | // by @langalex, support for arrays of strings
212 | create_context: function(_context) {
213 | if(this.is_object(_context)) {
214 | return _context;
215 | } else if(this.pragmas["JSTACHE-ENABLE-STRING-ARRAYS"]) {
216 | return {'.': _context};
217 | }
218 | },
219 |
220 | is_object: function(a) {
221 | return a && typeof a == 'object'
222 | },
223 |
224 | /*
225 | Thanks Doug Crockford
226 | JavaScript — The Good Parts lists an alternative that works better with
227 | frames. Frames can suck it, we use the simple version.
228 | */
229 | is_array: function(a) {
230 | return (a &&
231 | typeof a === 'object' &&
232 | a.constructor === Array);
233 | },
234 |
235 | /*
236 | Gets rid of leading and trailing whitespace
237 | */
238 | trim: function(s) {
239 | return s.replace(/^\s*|\s*$/g, '');
240 | },
241 |
242 | /*
243 | Why, why, why? Because IE. Cry, cry cry.
244 | */
245 | map: function(array, fn) {
246 | if (typeof array.map == "function") {
247 | return array.map(fn)
248 | } else {
249 | var r = [];
250 | var l = array.length;
251 | for(i=0;i 0) queryString.push($hash);
216 | queryString.push(chunks.join($separator));
217 |
218 | return queryString.join("");
219 | }
220 | };
221 |
222 | return new queryObject(location.search, location.hash);
223 | };
224 | }(jQuery.query || {}); // Pass in jQuery.query as settings object
225 |
--------------------------------------------------------------------------------