├── README.md ├── .gitignore ├── requirements.txt ├── tests ├── foo.csv ├── elevation_test.json ├── importTagData.json ├── twilight_test.json └── coord_test.json ├── geolight.R ├── Readme.mkd ├── filespec_draft.mkd ├── util.py └── geologger.py /README.md: -------------------------------------------------------------------------------- 1 | pygeologger 2 | =========== 3 | 4 | python scripts to backend geologgerui -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.swp 2 | virtpy 3 | Vagrantfile 4 | .vagrant 5 | rdrafts 6 | *.pyc 7 | .RData 8 | .Rhistory 9 | *.pyc 10 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | amqplib==1.0.2 2 | anyjson==0.3.3 3 | billiard==2.7.3.12 4 | celery==3.0.5 5 | geojson==1.0.1 6 | kombu==2.3.2 7 | ordereddict==1.1 8 | pymongo==2.1 9 | numpy 10 | pandas 11 | -------------------------------------------------------------------------------- /tests/foo.csv: -------------------------------------------------------------------------------- 1 | threshold,active,type,tFirst,tSecond 2 | 5.5,True,sunset,2011-07-30T06:58:15.000Z,2011-07-30T16:21:30.000Z 3 | 5.5,True,sunrise,2011-07-30T16:21:30.000Z,2011-07-31T06:53:08.181Z 4 | 5.5,True,sunset,2011-07-31T06:53:08.181Z,2011-07-31T16:25:39.230Z 5 | -------------------------------------------------------------------------------- /tests/elevation_test.json: -------------------------------------------------------------------------------- 1 | { 2 | "data": [ 3 | { 4 | "threshold": 5.5, 5 | "active": true, 6 | "tSecond": "2011-07-30T16:21:30.000Z", 7 | "tFirst": "2011-07-30T06:58:15.000Z", 8 | "type": "sunset" 9 | } 10 | ], 11 | "tagname": "Pabu_test", 12 | "release_location": [ 13 | 35.1, 14 | -97.0 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /tests/importTagData.json: -------------------------------------------------------------------------------- 1 | { "data": [ {"light": 12, "datetime": "2010-01-01T00:00:01Z"}, {"light":15, "datetime": "2010-01-02T00:00:02Z"} ], 2 | "tagname": "Sometesting tag name", 3 | "release_location": [-97.5, 35.0], 4 | "release_time": "2010-01-01T00:00:00", 5 | "recapture_time": "2011-01-01T00:00:00", 6 | "recapture_location": [-97.5, 35.0], 7 | "notes": "We're testing the file upload", 8 | "species": "Passerina ciris" 9 | } 10 | -------------------------------------------------------------------------------- /tests/twilight_test.json: -------------------------------------------------------------------------------- 1 | [ { 2 | "datetime": "2011-07-30T06:58:15.000Z", 3 | "threshold": 5.5, 4 | "type": "sunset", 5 | "active": false 6 | }, 7 | { 8 | "datetime": "2011-07-30T06:58:15.000Z", 9 | "threshold": 5.5, 10 | "type": "sunset", 11 | "active": false 12 | }, 13 | { 14 | "datetime": "2011-07-30T06:58:15.000Z", 15 | "threshold": 5.5, 16 | "type": "sunset", 17 | "active": false 18 | }, 19 | { 20 | "datetime": "2011-07-30T06:58:15.000Z", 21 | "threshold": 5.5, 22 | "type": "sunset", 23 | "active": false 24 | } 25 | ] 26 | -------------------------------------------------------------------------------- /geolight.R: -------------------------------------------------------------------------------- 1 | library(GeoLight) 2 | 3 | # Parse command line arguments 4 | options <- commandArgs(trailingOnly = TRUE) 5 | infile <- options[0] 6 | xy <- options[1] 7 | 8 | lig <- read.csv(infile, header=T) 9 | trans <- twilightCalc(lig$datetime,lig$light, ask=F) 10 | calib <- subset(trans, as.numeric(trans$tSecond) < as.numeric(strptime("2011-06-25 11:24:30", "%Y-%m-%d %H:%M:%S"))) 11 | 12 | x=-98.7 13 | y=34.77 14 | elev <- getElevation(calib$tFirst, calib$tSecond, calib$type, known.coord=c(x,y) ) 15 | 16 | coord <- coord(trans$tFirst,trans$tSecond,trans$type,degElevation=elev) 17 | head(coord) 18 | 19 | -------------------------------------------------------------------------------- /tests/coord_test.json: -------------------------------------------------------------------------------- 1 | { 2 | "tagname": "Jduck_test", 3 | "sunelevation": -4.5, 4 | "twilights": [{ 5 | "tFirst": "2011-07-30T15:21:24.000Z", 6 | "tSecond": "2011-07-31T15:21:24.000Z", 7 | "type": "sunrise", 8 | "active": true 9 | }, { 10 | "tFirst": "2011-07-30T15:21:24.000Z", 11 | "tSecond": "2011-07-31T15:21:24.000Z", 12 | "type": "sunrise", 13 | "active": true 14 | }] 15 | } 16 | -------------------------------------------------------------------------------- /Readme.mkd: -------------------------------------------------------------------------------- 1 | # Geologger Processing 2 | 3 | 4 | ## GeoLight 5 | Each record will be uniquely identified based on: 6 | `(tagname, user_id)` 7 | 8 | - `tagname` is user specified when lightlog data is uploaded. 9 | - `user_id` is the integer of the user in the user database. 10 | 11 | Right now we'll be allowing multiple entries for a given `tagname` and `user_id` 12 | 13 | Because of this, queries should pull only the most recent entry. As part of application queries you should append: 14 | 15 | {"sort":[("timestamp",-1)],"limit":1} 16 | 17 | All data is in the `geologger` database in mongodb. 18 | 19 | http://test.cybercommons.org/mongo/db_find/geologger/ 20 | 21 | For GeoLight, there are MongoDB collections for `twilights`, `lightlogs` and `changelight`. 22 | 23 | - `lightlogs`: Light logs containing datetime, light columns. 24 | - `twilights`: Data set of tFirst, tSecond, type and any other acillary columns, `format` can be either `RJSONIO` or `JSON-list`. 25 | - `changelight`: Output from changelight with `.` in variable names replaced with `_`. 26 | 27 | Formats for data: 28 | -`RJSONIO` 29 | 30 | 31 | 32 | -------------------------------------------------------------------------------- /filespec_draft.mkd: -------------------------------------------------------------------------------- 1 | Tilde Output - TILDE (Twilight Indicated Light Data Export) 2 | =========================================================== 3 | 4 | Metadata 5 | -------- 6 | - tagname: 7 | - unique_id: 8 | - {calibration: 9 | [{location: [lat,lon]. 10 | time: { start: Date() , end: Date() } 11 | notes: "" 12 | }] 13 | - release_location: [lat,lon] 14 | - release_time: Date() 15 | - recapture_location: [lat,lon] 16 | - recapture_time: Date() 17 | - notes: String 18 | - species: 19 | 20 | Data 21 | ---- 22 | - datetime: the observed date time 23 | - light: light level 24 | - type: sunrise/sunset 25 | - interp: TRUE/FALSE 26 | - excluded: TRUE/FALSE 27 | 28 | 29 | tripEstimation and DayTripR 30 | --------------------------- 31 | Home Range Location: xmin, xmax, ymin, ymax 32 | release_location: not-required 33 | release_datetime: not-required 34 | recapture_location: not-required 35 | recaprute_datetime: not-required 36 | 37 | Stat s from user 38 | --------------- 39 | model_variance X(times of twilight) lon, lat, att 40 | model_variance Z(times between twilight) lon, lat 41 | 42 | Move model type - 43 | Ekstrom - 44 | 45 | 46 | 47 | -------------------------------------------------------------------------------- /util.py: -------------------------------------------------------------------------------- 1 | import pymongo 2 | import csv 3 | import tempfile 4 | import pandas 5 | import urllib 6 | import urlparse 7 | import os 8 | import datetime 9 | from StringIO import StringIO 10 | 11 | TASK_DB = "cybercom_queue" 12 | TASK_COLLECTION = "task_log" 13 | 14 | def csv2json(fname, dateformat=None, fromstring=False): 15 | """ Convert CSV file to JSON document """ 16 | if fromstring: 17 | csvfile = StringIO(fname) 18 | reader = csv.DictReader(csvfile,dialect="excel") 19 | else: 20 | reader = csv.DictReader(open(fname,'rU')) 21 | rows = [ convertdate(row, dateformat) for row in reader ] 22 | return rows 23 | 24 | def convertdate(data,dtformat=None): 25 | """ Manipulate datetime into correct format""" 26 | if dtformat: 27 | data['datetime'] = datetime.datetime.strptime(data['datetime'], dtformat).isoformat() 28 | #.strftime("%Y-%m-%d %H:%M:%S") 29 | return data 30 | else: 31 | return data 32 | 33 | def stringsave(instring): 34 | outfile = tempfile.NamedTemporaryFile(mode="wb+", delete=False).name 35 | fout = open(outfile,'w') 36 | for line in instring: 37 | fout.writelines(line) 38 | return outfile 39 | 40 | 41 | def cleanup( files ): 42 | for file in files: 43 | os.remove(file) 44 | return "Deleted %s" % files 45 | 46 | 47 | def dict2csv(data, outfile=None, subkey=None): 48 | """ Convert regular structured list of dictionaries to CSV 49 | - If outfile is not specified a temporary file is created and its name returned 50 | - Subkey will select a subkey of the returned JSON to generate the CSV from: 51 | Example: 52 | data = {"data": [ { "date": "2011-15-10T12:00:00Z", "light": "10" } ], 53 | "location": [ "a", "b" ], "tagname": "PABU" 54 | } 55 | subkey = "data" 56 | dict2csv(data,subkey) 57 | 58 | """ 59 | if subkey: 60 | data = data[subkey] 61 | if not outfile: 62 | outfile = tempfile.NamedTemporaryFile(mode="wb+", delete=False).name 63 | f = csv.writer(open(outfile,'wb+')) 64 | f.writerow( data[0].keys() ) 65 | for item in data: 66 | f.writerow( item.values() ) 67 | return outfile 68 | 69 | def df2csv(data, outfile=None, subkey=None): 70 | """ Deserializes a JSON representation of an R Data frame convereted using RJSONIO toJSON """ 71 | if subkey: 72 | data = data[subkey] 73 | if not outfile: 74 | outfile = tempfile.NamedTemporaryFile(mode="wb+", delete=False).name 75 | pandas.DataFrame(data).to_csv(outfile) 76 | return outfile 77 | 78 | def pandasdf(data): 79 | """ Convert to verbose python dictionary representation """ 80 | return pandas.DataFrame(data) 81 | 82 | def url_fix(s, charset='utf-8'): 83 | """ Replace unsafe characters in URLs """ 84 | if isinstance(s, unicode): 85 | s = s.encode(charset, 'ignore') 86 | scheme, netloc, path, qs, anchor = urlparse.urlsplit(s) 87 | path = urllib.quote(path,'/%') 88 | qs = urllib.quote_plus(qs, ':&=') 89 | return urlparse.urlunsplit((scheme, netloc, path, qs, anchor)) 90 | 91 | def mongoconnect(db,col): 92 | """ Connect to Mongo and return connection object, assumes localhost to 93 | force installation of mongos on host 94 | """ 95 | return pymongo.Connection()[db][col] 96 | 97 | 98 | -------------------------------------------------------------------------------- /geologger.py: -------------------------------------------------------------------------------- 1 | from celery import task 2 | import subprocess 3 | import csv 4 | import simplejson as json 5 | import pymongo 6 | import tempfile 7 | import datetime, time 8 | import urlparse, urllib 9 | import pandas 10 | import rpy2.robjects as robjects 11 | import os 12 | from util import * 13 | import geojson 14 | 15 | template = """ 16 | library(lattice) 17 | library(ggplot2) 18 | library(plyr) 19 | %(version)s 20 | """ 21 | 22 | def runR(datain, 23 | script, 24 | outformat, 25 | saveoutput=False, 26 | savedisplay=False, 27 | saverdata=False): 28 | """ Helper function to make running R scripted tasks easier""" 29 | # Set current work directory to a tmp dir for R script, gather up all output from there when done. 30 | r = robjects.r 31 | if saveoutput | savedisplay: 32 | tempdir = ""# create temporary directory 33 | if saveoutput: 34 | r('setwd("%s")' % tempdir ) 35 | # Optionally store and persist .RData to disk 36 | # PDF Grabbing - grab PDF output and place in sensible location 37 | if savedisplay: 38 | r('pdf("%s")' % tempdir ) 39 | r(script) 40 | # cleanup temp directory 41 | if saverdata & saveoutput: 42 | r('save.image()') 43 | return 44 | 45 | def getTagData(tagname, user_id="guest", db="geologger", col="lightlogs"): 46 | """ Get light level data for a tag """ 47 | url = "http://test.cybercommons.org/mongo/db_find/%s/%s/{'spec':{'tagname':'%s','user_id':'%s'}}" %(db,col,tagname, user_id) 48 | url_get = urllib.urlopen(url_fix(url)).read() 49 | if url_get == "[]": 50 | return {"error": "Empty result"} 51 | else: 52 | return json.loads(url_get)[0] 53 | 54 | @task 55 | def importTagData_manual( uploadloc, tagname, notes, location, dateformat=None , task_id=None, user_id=None): 56 | """ Import a geologger tag to mongodb """ 57 | data = { 58 | "tagname":tagname, 59 | "notes": notes, 60 | "release_location": location, 61 | "user_id": user_id, 62 | "timestamp": "%sZ" % datetime.datetime.now().isoformat(), 63 | "task_id": task_id 64 | } 65 | data['data'] = csv2json(uploadloc, dateformat) 66 | try: 67 | c = mongoconnect('geologger','lightlogs') 68 | c.insert( data ) 69 | return url_fix('http://test.cybercommons.org/mongo/db_find/geologger/lightlogs/{"spec":{"tagname":"%s","user_id":"%s"}}' % (tagname,user_id)) 70 | except: 71 | return "Error saving to mongodb" 72 | @task 73 | def importTagData( data=None, task_id=None, user_id=None ): 74 | """ A task for importing geologger tag data """ 75 | if isinstance(data,unicode or str): 76 | datain = json.loads(data) 77 | else: 78 | datain = data 79 | 80 | dataout = { "data": datain['data'], 81 | "tagname": datain['tagname'], 82 | "notes": datain['notes'], 83 | "species": datain['species'], 84 | "timestamp": "%sZ" % datetime.datetime.now().isoformat(), 85 | "user_id": user_id, 86 | "task_id": task_id 87 | } 88 | try: 89 | c = mongoconnect('geologger','lightlogs') 90 | c.insert(dataout) 91 | return url_fix('http://test.cybercommons.org/mongo/db_find/geologger/lightlogs/{"spec":{"tagname":"%s","user_id":"%s"}}' % (dataout['tagname'],dataout['user_id'])) 92 | except: 93 | return "Error saving to mongo" 94 | 95 | 96 | @task 97 | def twilightCalc( tagname=None, threshold=None, task_id=None, user_id=None): 98 | """ Python wrapper for GeoLight twilightCalc() """ 99 | r = robjects.r 100 | r.library('GeoLight') 101 | r.library('RJSONIO') 102 | tagdata = getTagData(tagname,user_id) 103 | if tagdata != {"error": "Empty result"}: 104 | ligdata = dict2csv(tagdata,subkey="data") 105 | r('lig <- read.csv("%s", header=T)' % ligdata) 106 | r('trans <- twilightCalc(lig$datetime, lig$light, LightThreshold=%s, ask=F)' % threshold) 107 | c = mongoconnect('geologger','twilights') 108 | data = { 109 | "data":json.loads(r('toJSON(trans)')[0]), 110 | "tagname": tagname, 111 | "user_id": user_id, 112 | "threshold": threshold, 113 | "timestamp": datetime.datetime.now().isoformat(), 114 | "format": "RJSONIO", 115 | "task_id": task_id 116 | } 117 | c.insert(data) 118 | cleanup([ligdata]) 119 | return 'http://test.cybercommons.org/mongo/db_find/geologger/twilights/{"spec":{"tagname":"%s","user_id":"%s"}}' % (tagname, user_id) 120 | else: 121 | return "Had a problem finding lightlog data" 122 | 123 | @task 124 | def twilightInsert(tagname=None, data=None, threshold=None, task_id=None,user_id=None): 125 | """ Take twilight data from web interface """ 126 | c = mongoconnect('geologger','twilights') 127 | 128 | data = { 129 | "data": json.loads( data ), 130 | "tagname": tagname, 131 | "user_id": user_id, 132 | "threshold": threshold, 133 | "timestamp": datetime.datetime.now().isoformat(), 134 | "format": "JSON-list", 135 | "task_id": task_id 136 | } 137 | c.save(data) 138 | return 'http://test.cybercommons.org/mongo/db_find/geologger/twilights/{"spec":{"tagname":"%s","user_id":"%s"}}' % (tagname, user_id) 139 | 140 | 141 | @task 142 | def deleteTag(tagname=None, user_id=None): 143 | l = mongoconnect('geologger','lightlogs') 144 | l.remove({"tagname":tagname,"user_id":user_id}) 145 | t = mongoconnect('geologger','twilights') 146 | t.remove({"tagname":tagname,"user_id":user_id}) 147 | c = mongoconnect('geologger','coord') 148 | c.remove({"tagname":tagname,"user_id":user_id}) 149 | 150 | @task 151 | def changeLight( tagname=None, riseprob=None, setprob=None, days=None, task_id=None, user_id=None): 152 | """ Python wrapper for GeoLight changeLight() """ 153 | r = robjects.r 154 | r.library('GeoLight') 155 | r.library('RJSONIO') 156 | twilight = df2csv(getTagData(tagname=tagname, user_id=user_id, col="twilights"), subkey="data") 157 | if len(twilight) < 5: 158 | return "Twilights have not yet been calculated, please compute twilight events and then try again" 159 | r('twilight <- read.csv("%s", header=T)' % twilight) 160 | r('twilight$tFirst <- as.POSIXlt(twilight$tFirst, origin="1970-01-01")') # Convert to R Datetime 161 | r('twilight$tSecond <- as.POSIXlt(twilight$tFirst, origin="1970-01-01")') # Convert to R Datetime 162 | r('change <- changeLight(twilight$tFirst, twilight$tSecond, twilight$type, rise.prob=%s, set.prob=%s, days=%s,plot=F)' % (riseprob,setprob,days)) 163 | # Hack to get "." out of variable names so json can be stored in MongoDB 164 | # see: "http://docs.mongodb.org/manual/reference/limits/#Restrictions on Field Names" 165 | r('names(change)[3] <- "rise_prob"') 166 | r('names(change)[4] <- "set_prob"') 167 | r('names(change$setProb)[2] <- "prob_y"') 168 | r('names(change$riseProb)[2] <- "prob_y"') 169 | r('names(change$migTable)[5] <- "P_start"') 170 | r('names(change$migTable)[6] <- "P_end"') 171 | c = mongoconnect('geologger','changelight') 172 | data = { 173 | "data": json.loads(r('toJSON(change)')[0]), 174 | "params": { "riseprob": riseprob, "setprob":setprob,"days":days }, 175 | "user_id": user_id, 176 | "tagname": tagname, 177 | "timestamp": datetime.datetime.now().isoformat(), 178 | "task_id": task_id 179 | } 180 | c.insert(data) 181 | cleanup([twilight]) 182 | return 'http://test.cybercommons.org/mongo/db_find/geologger/changelight/{"spec":{"tagname":"%s","user_id":"%s"}}' % (tagname, user_id) 183 | 184 | @task 185 | def distanceFilter( transdata, elevation, distance, task_id=None, user_id=None ): 186 | """ Python wrapper for GeoLight distanceFilter() """ 187 | pass 188 | 189 | @task 190 | def coord( data=None, task_id=None, user_id=None ): 191 | """ Python wrapper for GeoLight coord() 192 | expects data like: 193 | data = { 194 | "tagname": "PABU_test", 195 | "sunelevation": -4.5, 196 | "computed": True, 197 | "threshold": 4.5, 198 | "twilights": [{ 199 | "tFirst": "2011-07-30T15:21:24.000Z", 200 | "tSecond": "2011-07-31T15:21:24.000Z", 201 | "type": "sunrise", 202 | "active": True 203 | }, { 204 | "tFirst": "2011-07-30T15:21:24.000Z", 205 | "tSecond": "2011-07-31T15:21:24.000Z", 206 | "type": "sunrise", 207 | "active": True 208 | }], 209 | "calibperiod": ["2011-07-30T15:21:24.000Z", "2011-07-30T15:21:24.000Z"] 210 | 211 | } 212 | 213 | Data can be provided as JSON string or as a python dictionary. 214 | """ 215 | if isinstance(data,unicode or str): 216 | datain = json.loads(data) 217 | else: 218 | datain = data 219 | 220 | datain['user_id'] = user_id 221 | datain['timestamp'] = datetime.datetime.now().isoformat() 222 | tagname = datain['tagname'] 223 | sunelevation = datain['sunelevation'] 224 | r = robjects.r 225 | r.library('GeoLight') 226 | r.library('RJSONIO') 227 | # Save input twilights from UI 228 | t = mongoconnect('geologger','twilights') 229 | t.save(datain) 230 | # Convert input to csv for reading in R 231 | twilight = df2csv(datain, subkey="twilights") 232 | r('twilights <- read.csv("%s", header=T)' % (twilight)) 233 | # Filter actives 234 | r('twilights <- subset(twilights, twilights$active == "True")') 235 | # Convert sunrise/sunset to 1,2 236 | r('twilights$typecat[twilights$type == "sunrise"] <- 1') 237 | r('twilights$typecat[twilights$type == "sunset"] <- 2') 238 | # Convert datetimes 239 | r('twilights$tFirst <- as.POSIXct(strptime(twilights$tFirst, format="%Y-%m-%dT%H:%M:%OSZ", tz="GMT"))') 240 | r('twilights$tSecond <- as.POSIXct(strptime(twilights$tSecond, format="%Y-%m-%dT%H:%M:%OSZ", tz="GMT"))') 241 | r('coord <- coord(twilights$tFirst, twilights$tSecond, twilights$typecat, degElevation = %s)'% sunelevation) 242 | r('coord <- as.data.frame(cbind(as.data.frame(coord), twilights$tFirst, twilights$tSecond))' ) 243 | 244 | r('names(coord) <- c("x","y","tFirst","tSecond")') 245 | r('coord <- subset(coord, !is.na(y) & !is.na(x))') 246 | r('coord$tFirst <- as.character(strftime(coord$tFirst, "%Y-%m-%dT%H:%M:%SZ"))') 247 | r('coord$tSecond <- as.character(strftime(coord$tSecond, "%Y-%m-%dT%H:%M:%SZ"))') 248 | #r('coord <- subset(coord, !is.na(x))') 249 | d = mongoconnect('geologger', 'debug') 250 | c = mongoconnect('geologger','coord') 251 | 252 | 253 | # dataout = dict(geojson.FeatureCollection(geojson.Feature(geojson.MultiPoint(json.loads(r('toJSON(coord)')[0]))))) 254 | df = pandasdf(json.loads(r('toJSON(coord)')[0])) 255 | track = [ dict([ 256 | (colname, row[i]) 257 | for i,colname in enumerate(df.columns) 258 | ]) 259 | for row in df.values 260 | ] 261 | 262 | d.insert({"dataframe": df.to_string(), "fromR": json.loads(r('toJSON(coord)')[0])}) 263 | 264 | dataout = json.loads( 265 | geojson.dumps( 266 | geojson.FeatureCollection( [ 267 | geojson.Feature(geometry=geojson.Point( 268 | [item['x'],item['y']]), properties={"tFirst": item['tFirst'], "tSecond": item['tSecond']} 269 | ) 270 | for item in track 271 | ] 272 | ) 273 | ) 274 | ) 275 | dataout['properties'] = { 276 | "sunelevation": sunelevation, 277 | "tagname": tagname, 278 | "user_id": user_id, 279 | "timestamp": datetime.datetime.now().isoformat(), 280 | "task_id": task_id 281 | } 282 | c.insert(dataout) 283 | cleanup([twilight]) 284 | return 'http://test.cybercommons.org/mongo/db_find/geologger/coord/{"spec":{"tagname":"%s","user_id":"%s"}}' % (tagname,user_id) 285 | 286 | 287 | @task 288 | def getElevation( data=None, task_id=None, user_id=None): 289 | """ 290 | Wrapper for GeoLight getElevation 291 | Expects data like: 292 | data = { 293 | "twilights": [ 294 | { 295 | "active": true, 296 | "tSecond": "2011-07-30T16:21:30.000Z", 297 | "tFirst": "2011-07-30T06:58:15.000Z", 298 | "type": "sunset" 299 | }, 300 | { 301 | "active": true, 302 | "tSecond": "2011-07-31T06:53:08.181Z", 303 | "tFirst": "2011-07-30T16:21:30.000Z", 304 | "type": "sunrise" 305 | }, 306 | { 307 | "active": true, 308 | "tSecond": "2011-07-31T16:25:39.230Z", 309 | "tFirst": "2011-07-31T06:53:08.181Z", 310 | "type": "sunset" 311 | } 312 | ], 313 | "tagname": "Pabu_test", 314 | "release_location": [ 315 | 35.1, 316 | -97.0 317 | ], 318 | "threshold": 5.5 319 | } 320 | """ 321 | if isinstance(data,unicode or str): 322 | datain = json.loads(data) 323 | else: 324 | datain = data 325 | 326 | r = robjects.r 327 | r.library('GeoLight') 328 | r.library('RJSONIO') 329 | lat, lon = datain['release_location'] 330 | tagname = datain['tagname'] 331 | twjson = dict2csv(datain, subkey="twilights") 332 | r('twilights <- read.csv("%s", header=T)' % twjson) 333 | r('twilights$tFirst <- strptime(twilights$tFirst, format="%Y-%m-%dT%H:%M:%OSZ")') 334 | r('twilights$tSecond <- strptime(twilights$tSecond, format="%Y-%m-%dT%H:%M:%OSZ")') 335 | r('paste(levels(twilights$type))') 336 | r('levels(twilights$type) <- c(1,2)') 337 | r('twilights <- subset(twilights, twilights$active == "True")') 338 | r('elev <- getElevation(twilights$tFirst, twilights$tSecond, twilights$type, known.coord=c(%s,%s), plot=F)' %(lon, lat) ) 339 | elev = r('elev') 340 | dataout = { "task_id": task_id, "user_id": user_id, "sunelevation": elev[0], "timestamp": datetime.datetime.now().isoformat() , "tagname": tagname } 341 | cleanup([twjson]) 342 | return dataout 343 | 344 | 345 | --------------------------------------------------------------------------------