33 |
34 |
--------------------------------------------------------------------------------
/assets/styles/importer.less:
--------------------------------------------------------------------------------
1 | /**
2 | * importer.less
3 | *
4 | * By default, new Sails projects are configured to compile this file
5 | * from LESS to CSS. Unlike CSS files, LESS files are not compiled and
6 | * included automatically unless they are imported below.
7 | *
8 | * The LESS files imported below are compiled and included in the order
9 | * they are listed. Mixins, variables, etc. should be imported first
10 | * so that they can be accessed by subsequent LESS stylesheets.
11 | *
12 | * (Just like the rest of the asset pipeline bundled in Sails, you can
13 | * always omit, customize, or replace this behavior with SASS, SCSS,
14 | * or any other Grunt tasks you like.)
15 | */
16 |
17 |
18 |
19 | // For example:
20 | //
21 | // @import 'variables/colors.less';
22 | // @import 'mixins/foo.less';
23 | // @import 'mixins/bar.less';
24 | // @import 'mixins/baz.less';
25 | //
26 | // @import 'styleguide.less';
27 | // @import 'pages/login.less';
28 | // @import 'pages/signup.less';
29 | //
30 | // etc.
31 |
--------------------------------------------------------------------------------
/edX-datascrub/src/checkData/getCertsFromId.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | '''
3 | A simple interactive program to compare the ids in a file with those in a
4 | certificates file
5 |
6 | This program will prompt the user for the name of a csv file containing
7 | only user ids, and a csv of a certificates file, and see if there are any
8 | ids in the first file that correspond to entries in the certificates file.
9 |
10 | '''
11 |
12 | import csv
13 | import sys
14 | import certificates
15 | import utils
16 |
17 | if len(sys.argv) > 2:
18 | f1name = sys.argv[1]
19 | f2name = sys.argv[2]
20 | else:
21 | f1name = utils.getFileName('Enter csv file with ids : ')
22 | f2name = utils.getFileName('Enter certificates csv file name : ')
23 |
24 | f1 = csv.reader(open(f1name, 'r'))
25 | f2 = csv.reader(open(f2name, 'r'))
26 | certdict = certificates.builddict(f2)
27 |
28 | f1.readrow()
29 |
30 | for [ident] in f1:
31 | if ident in certdict:
32 | print 'found new identifier ' + ident + ' in certificates file'
33 |
34 |
--------------------------------------------------------------------------------
/edX-datascrub/src/checkData/certsAndusers.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | '''
4 | Looks for differences between the user listed in the users file and those in the certificates file
5 |
6 | In particular, looks for any users not found in the users file who have received a certificate.
7 | '''
8 | import csv
9 | import certificates
10 | import user
11 |
12 | ufile = csv.reader(open('users.csv', 'r'))
13 | udict = user.builddict(ufile)
14 | cfile = csv.reader(open('certificates.csv', 'r'))
15 | cDict = certificates.builddict(cfile)
16 |
17 | certsMissing = []
18 |
19 | for c in iter(cDict):
20 | if (cDict[c].status == 'downloadable') and (c not in udict):
21 | certsMissing.append(c)
22 |
23 | if len(certsMissing) > 0:
24 | print 'found ' + str(len(certsMissing)) + ' certificates with no associated user'
25 | outfile = csv.writer(open('certsAndusers.csv', 'w'))
26 | outfile.writerow(['Missing user ids that have certificates'])
27 | for u in certsMissing:
28 | outfile.writerow([u])
29 |
30 |
31 |
32 |
--------------------------------------------------------------------------------
/api/controllers/NoticeController.js:
--------------------------------------------------------------------------------
1 | /**
2 | * NoticeController
3 | *
4 | * @description :: Server-side logic for managing notices
5 | * @help :: See http://links.sailsjs.org/docs/controllers
6 | */
7 |
8 | module.exports = {
9 | create: function(req, res) {
10 | Notice.create(req.params.all(), function(err, notice) {
11 | if (err || !notice) {
12 | sails.log.error(err);
13 | FlashService.error(req, "Unable to create notice.");
14 | return res.redirect('/admin/manage_notices');
15 | }
16 |
17 | FlashService.success(req, 'Successfully created a notice.');
18 | return res.redirect('/admin/manage_notices');
19 | });
20 | },
21 | destroy: function(req, res) {
22 | Notice.destroy(req.param('id'), function(err) {
23 | if (err) {
24 | FlashService.error(req, "Unable to destroy notice.");
25 | } else {
26 | FlashService.success(req, "Successfully destroyed notice.");
27 | }
28 | return res.redirect('/admin/manage_notices');
29 | });
30 | }
31 | };
32 |
33 |
--------------------------------------------------------------------------------
/edX-datascrub/src/findBrowserEventTypes.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | '''Find all of the event types in a JSON-style log dump
4 |
5 | This script will take as input an edX activity log file and find all
6 | of the event types in that file. The resulting event types will be
7 | written to a file, one type per line, and the number of event types
8 | (which may be very large) will be displayed at the end of the
9 | run. The script will deal with non-ascii characters, writing the
10 | output file as a latin-1 encoding
11 |
12 | '''
13 | import json
14 | import sys
15 | import codecs
16 |
17 | infile = open(sys.argv[1], 'r')
18 | outfile = codecs.open(sys.argv[2],'w', 'latin-1', 'replace')
19 | typelist = set()
20 | i = 0
21 |
22 | for line in infile:
23 | elems = json.loads(line)
24 | etype = elems['event_type']
25 | if (elems['event_source'] == 'browser'):
26 | if etype not in typelist:
27 | typelist.add(etype)
28 | outfile.write(etype + '\n')
29 | i = i + 1
30 |
31 | print i
32 |
33 |
34 |
35 |
36 |
--------------------------------------------------------------------------------
/edX-datascrub/src/logs/distillUnknownLabels.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | '''
3 | Created on Oct 26, 2013
4 | Goes through all of the files containing log entries that were not able
5 | to be classified by course, and creates a list of the event types and
6 | the counts for that event type. Will print out the event types in increasing
7 | order so that the most common will be at the end.
8 | @author: waldo
9 | '''
10 | import json
11 | import glob
12 |
13 | def buildList(fname, rdict):
14 | with open (fname, 'r') as fin:
15 | for line in fin:
16 | lstr = json.loads(line)
17 | st = lstr['event_type']
18 | if st not in rdict:
19 | rdict[st] = 1
20 | else:
21 | rdict[st] += 1
22 | return rdict
23 |
24 | if __name__ == '__main__':
25 | ukdict = {}
26 | fname = glob.glob('*/unknown*.log')
27 | for n in fname:
28 | ukdict = buildList(n, ukdict)
29 |
30 | s = sorted(ukdict.items(), key = lambda(k,v):(v,k))
31 | for i in s:
32 | print i
33 |
34 |
35 |
--------------------------------------------------------------------------------
/config/bootstrap.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Bootstrap
3 | * (sails.config.bootstrap)
4 | *
5 | * An asynchronous bootstrap function that runs before your Sails app gets lifted.
6 | * This gives you an opportunity to set up your data model, run jobs, or perform some special logic.
7 | *
8 | * For more information on bootstrapping your app, check out:
9 | * http://sailsjs.org/#/documentation/reference/sails.config/sails.config.bootstrap.html
10 | */
11 |
12 | module.exports.bootstrap = function(cb) {
13 |
14 | // Prevents logging in the 'test' env
15 | if (process.env.NODE_ENV == 'test') {
16 | sails.log.debug = LoggerService.debug;
17 | sails.log.info = LoggerService.info;
18 | sails.log.error = LoggerService.error;
19 | }
20 |
21 | // Launch message
22 | sails.log.info('moocRP: Learning Analytics Platform');
23 |
24 |
25 | // Launch jobs queue
26 | QueueService.launchQueue();
27 |
28 | // It's very important to trigger this callback method when you are finished
29 | // with the bootstrap! (otherwise your server will never lift, since it's waiting on the bootstrap)
30 | cb();
31 | };
32 |
--------------------------------------------------------------------------------
/config/ssl/server.csr:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE REQUEST-----
2 | MIICyDCCAbACAQAwgYIxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTERMA8GA1UE
3 | BwwIQmVya2VsZXkxDzANBgNVBAoMBm1vb2NSUDEPMA0GA1UECwwGbW9vY1JQMQ8w
4 | DQYDVQQDDAZtb29jUlAxIDAeBgkqhkiG9w0BCQEWEWtrYW9AYmVya2VsZXkuZWR1
5 | MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuIxAdZjcW4Tx5Z91PPaC
6 | ZqxKHdsMWQ9NV12+3V+j/lMxXGzvRaMuo7zaL7eHsO9fgG4n0lkxS2YrSdMaO2Rt
7 | eTglmHoTmEGTmIF+WjvOnGxvsgeAcA3qwVfImw/8mL337tqmoS1TW1cMo+Wqa2y/
8 | ERuu7hrgAmRpcUIk879iJKEZZPe5+AM8agKBrmzWOkbtqYw8Aj6/Ftbl/iJmjxD7
9 | tKwAeDLbDUoglSI6lCmcGDpHIVQNLmiWFM9WsxdST3iS7IYZP49X055mm7mfEFgK
10 | NaxLyA+89nKSXoPKRmSM2bbqyB8yERh93RZbCX6iupJtvgNzZ/DLCQ6AonLnHKwR
11 | BwIDAQABoAAwDQYJKoZIhvcNAQELBQADggEBAFcaWSODL1drRmQyAuIrOfYDqVK9
12 | gAnSf8noLY2XSLQ1rlAD+0jiBXooLqTCSGrRiLHmvSWY4dnGG/UBXBnjdABpneBc
13 | IxSnncyHiF2iuTHSkUhhC490qkqA8v7BYBlisGVc6JYyq0smDiaBu1dLXnarwK90
14 | 3TizOUIX+SGYXKroLn5HoMAubbj8QSuV1el54ir/k3oCqg5yCC6WAAAe9paim4Aa
15 | 9UQGi4UTN7EEHRnsczfcDWj2fVQ6S2Ex9+34i1MBpGdVV46dVe7TrXMAX+M44+My
16 | H1FEI/L8dO9wrzLpyJr5y0fQs6G5zUNIsEc3/TcgdeN+2XOOeTDbt5TeBlg=
17 | -----END CERTIFICATE REQUEST-----
18 |
--------------------------------------------------------------------------------
/edX-datascrub/src/logs/selectLogRange.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | '''
4 | Created on Dec 22, 2013
5 |
6 | @author: waldo
7 | '''
8 |
9 | import shutil
10 | import sys
11 | import glob
12 |
13 | def getFiles(srcDir, fromDate, toDate):
14 | retList = []
15 | candidates = glob.glob(srcDir + '/*/*.log')
16 | for f in candidates:
17 | fname = f[f.rfind('/')+1:]
18 | fdate = fname[:fname.find('_')]
19 | if (fromDate <= fdate) and (fdate <= toDate):
20 | retList.append(f)
21 | return retList
22 |
23 | if __name__ == '__main__':
24 | if len(sys.argv) < 4:
25 | print "usage: selectLogRange srcDir destDir fromDate [toDate]"
26 | sys.exit(1)
27 |
28 | srcDir = sys.argv[1]
29 | print srcDir
30 | destDir = sys.argv[2]
31 | print destDir
32 | fromDate = sys.argv[3]
33 | print fromDate
34 | if len(sys.argv) > 4:
35 | toDate = sys.argv[4]
36 | else:
37 | toDate = '2020'
38 | print toDate
39 | copyList = getFiles(srcDir, fromDate, toDate)
40 | print copyList
41 |
42 |
43 |
44 |
--------------------------------------------------------------------------------
/views/home/about.ejs:
--------------------------------------------------------------------------------
1 |
2 |
3 |
About
4 |
5 |
6 |
7 |
8 |
9 |
About moocRP
10 |
11 |
moocRP is currently being developed as part of a research project at University of California, Berkeley. moocRP aims to be a platform where research conducted on large amounts of MOOC data can be shared, through a revolutionizing way of sharing data analytics and visualizations.
12 |
moocRP serves as a platform to bring the educational research community together and further the potential of research discoveries through the sharing of research results, tackling the problem of "reproducible research."
32 |
33 |
34 |
35 |
36 |
37 |
38 |
--------------------------------------------------------------------------------
/documentation/data_models.md:
--------------------------------------------------------------------------------
1 | [Back to README](../README.md)
2 |
3 | Data Models
4 | ================
5 | This page is a work in-progress. You can contribute to it by forking this repository and making pull requests.
6 |
7 | ## Raw edX Event Tracking
8 |
9 | ## Raw edX-Database
10 |
11 | ## X-API (HarvardX)
12 | The schema of the HarvardX data model is:
13 | time,secs_to_next,actor,verb,object_name,object_type,result,meta,ip,event,event_type,page,agent
14 |
15 | The data is stored in a CSV format, so one can use the ```csv``` function in d3.js to read in the data in a visualization.
16 |
17 | * _time_: the time of the action
18 | * _secs_to_next_: the number of seconds to the next actor
19 | * _actor_: who is performing the action
20 | * _verb_: the action being performed
21 | * _object_name_: the object that the action is being performed on
22 | * _result_:
23 | * _meta_:
24 | * _ip_:
25 | * _event_:
26 | * _event_type_:
27 | * _page_:
28 | * _agent_:
29 |
30 | _Sample Entry_:
31 | ```
32 | 2014-01-23T07:48:43.123460+00:00,5.462487,applevo,page_view,Course Info,tab_name,,,1.2.3.4,"{""POST"": {}, ""GET"": {}}",/courses/University/Course/info,,"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.77 Safari/537.36"
33 | ```
34 |
35 | The filename to retrieve from the data dictionary for this data model is _course_name_.csv.
36 |
37 | ## MOOCdb
38 |
39 | ## Stanford
40 |
41 | ## Placeholder
42 |
--------------------------------------------------------------------------------
/edX-datascrub/src/logs/cleanLogDups.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | '''
3 | Reads an edX supplied log file, finding and eliminating any duplicate lines
4 |
5 | Reads an edX log file, finding any duplicate lines. Writes a file with the same
6 | name as the log file with the additional postfix "scrub" that contains only the
7 | non-duplicated lines.
8 |
9 | At the end of the run, prints out the number of lines read, the number of duplicate
10 | lines, and the number of non-duplicate lines.
11 | '''
12 | import json
13 | import sys
14 |
15 | class Line(object):
16 | def __init__(self, lineNo, lineCont):
17 | self.line = lineNo
18 | self.content = lineCont
19 |
20 |
21 | linedict = {}
22 |
23 | f1 = open(sys.argv[1], 'r')
24 | f2 = open(sys.argv[1] + 'scrub', 'w')
25 | ln = 0;
26 | duplines = 0;
27 |
28 | dc = json.JSONDecoder()
29 |
30 | for line in f1:
31 | ln += 1;
32 | dl = dc.decode(line)
33 | key = dl['time'] + dl['username']
34 | if key not in linedict:
35 | f2.write(line)
36 | lo = Line(ln, line)
37 | linedict[key] = lo
38 | else:
39 | if linedict[key].content == line:
40 | duplines += 1
41 | print line
42 | else:
43 | f2.write(line)
44 |
45 | print 'total number of lines = ' + str(ln)
46 | print 'total number of duplicate lines = ' + str(duplines)
47 | print 'total lines of real data = ' + str(ln - duplines)
48 |
49 |
50 |
51 |
--------------------------------------------------------------------------------
/api/responses/ok.js:
--------------------------------------------------------------------------------
1 | /**
2 | * 200 (OK) Response
3 | *
4 | * Usage:
5 | * return res.ok();
6 | * return res.ok(data);
7 | * return res.ok(data, 'auth/login');
8 | *
9 | * @param {Object} data
10 | * @param {String|Object} options
11 | * - pass string to render specified view
12 | */
13 |
14 | module.exports = function sendOK (data, options) {
15 |
16 | // Get access to `req`, `res`, & `sails`
17 | var req = this.req;
18 | var res = this.res;
19 | var sails = req._sails;
20 |
21 | sails.log.silly('res.ok() :: Sending 200 ("OK") response');
22 |
23 | // Set status code
24 | res.status(200);
25 |
26 | // If appropriate, serve data as JSON(P)
27 | if (req.wantsJSON) {
28 | return res.jsonx(data);
29 | }
30 |
31 | // If second argument is a string, we take that to mean it refers to a view.
32 | // If it was omitted, use an empty object (`{}`)
33 | options = (typeof options === 'string') ? { view: options } : options || {};
34 |
35 | // If a view was provided in options, serve it.
36 | // Otherwise try to guess an appropriate view, or if that doesn't
37 | // work, just send JSON.
38 | if (options.view) {
39 | return res.view(options.view, { data: data });
40 | }
41 |
42 | // If no second argument provided, try to serve the implied view,
43 | // but fall back to sending JSON(P) if no view can be inferred.
44 | else return res.guessView({ data: data }, function couldNotGuessView () {
45 | return res.jsonx(data);
46 | });
47 |
48 | };
49 |
--------------------------------------------------------------------------------
/tasks/config/jst.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Precompiles Underscore templates to a `.jst` file.
3 | *
4 | * ---------------------------------------------------------------
5 | *
6 | * (i.e. basically it takes HTML files and turns them into tiny little
7 | * javascript functions that you pass data to and return HTML. This can
8 | * speed up template rendering on the client, and reduce bandwidth usage.)
9 | *
10 | * For usage docs see:
11 | * https://github.com/gruntjs/grunt-contrib-jst
12 | *
13 | */
14 |
15 | module.exports = function(grunt) {
16 |
17 | var templateFilesToInject = [
18 | 'templates/**/*.html'
19 | ];
20 |
21 | grunt.config.set('jst', {
22 | dev: {
23 |
24 | // To use other sorts of templates, specify a regexp like the example below:
25 | // options: {
26 | // templateSettings: {
27 | // interpolate: /\{\{(.+?)\}\}/g
28 | // }
29 | // },
30 |
31 | // Note that the interpolate setting above is simply an example of overwriting lodash's
32 | // default interpolation. If you want to parse templates with the default _.template behavior
33 | // (i.e. using ), there's no need to overwrite `templateSettings.interpolate`.
34 |
35 |
36 | files: {
37 | // e.g.
38 | // 'relative/path/from/gruntfile/to/compiled/template/destination' : ['relative/path/to/sourcefiles/**/*.html']
39 | '.tmp/public/jst.js': require('../pipeline').templateFilesToInject
40 | }
41 | }
42 | });
43 |
44 | grunt.loadNpmTasks('grunt-contrib-jst');
45 | };
46 |
--------------------------------------------------------------------------------
/edX-datascrub/src/checkData/compUser.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | '''
3 | A simple script to compare two user files and a certificates file
4 |
5 | Generates a report of users differences between multiple user files,
6 | with the additional check to see if those users that are in one of the
7 | files but not the other are in the certificates files. This script assumes
8 | that the certificates file being used is in the directory in which the
9 | script is run, and takes as arguments the file names of the two user
10 | files to be compared.
11 | '''
12 |
13 | import user
14 | import csv
15 | import sys
16 | import certificates
17 |
18 | f1 = csv.reader(open(sys.argv[1], 'r'))
19 | f2 = csv.reader(open(sys.argv[2], 'r'))
20 | f3 = csv.writer(open('additions.csv', 'w'))
21 | f4 = csv.reader(open('certificates.csv', 'r'))
22 | f3.writerow(['id', 'in certificate file'])
23 | f3.writerow(['User ids in first file, not in second'])
24 | u1 = user.builddict(f1)
25 | u2 = user.builddict(f2)
26 | cdict = certificates.builddict(f4)
27 |
28 | for key in u1.iterkeys():
29 | if u1[key].id not in u2:
30 | if key in cdict:
31 | f3.writerow([key, 'Yes'])
32 | else:
33 | f3.writerow([key, 'No'])
34 |
35 | f3.writerow(['User ids in second file, not in first'])
36 |
37 | for key in u2.iterkeys():
38 | if u2[key].id not in u1:
39 | if key in cdict:
40 | f3.writerow([key, 'Yes'])
41 | else:
42 | f3.writerow([key, 'No'])
43 |
44 |
45 |
46 |
--------------------------------------------------------------------------------
/api/models/Request.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Request
3 | *
4 | * @module :: Model
5 | * @description :: A short summary of how this model works and what it represents.
6 | * @docs :: http://sailsjs.org/#!documentation/models
7 | */
8 |
9 | module.exports = {
10 |
11 | attributes: {
12 |
13 | /* e.g.
14 | nickname: 'string'
15 | */
16 | firstName: {
17 | type: 'STRING',
18 | required: true
19 | },
20 | lastName: {
21 | type: 'STRING',
22 | required: true
23 | },
24 | email: {
25 | type: 'STRING',
26 | email: true,
27 | required: true
28 | },
29 | reason: {
30 | type: 'TEXT',
31 | },
32 | requestingUser: {
33 | model: 'user',
34 | required: true
35 | },
36 | dataModel: {
37 | model: 'datamodel',
38 | required: true
39 | },
40 | dataset: {
41 | type: 'STRING',
42 | required: true
43 | },
44 | requestType: {
45 | type: 'STRING',
46 | required: true
47 | },
48 | approved: {
49 | type: 'BOOLEAN',
50 | defaultsTo: false
51 | },
52 | rejected: {
53 | type: 'BOOLEAN',
54 | defaultsTo: false
55 | },
56 | downloaded: {
57 | type: 'BOOLEAN',
58 | defaultsTo: false
59 | }
60 | },
61 | beforeCreate: function (values, next) {
62 | // Default reason value
63 | if (values.reason == '') {
64 | values.reason = 'No reason specified';
65 | }
66 | return next();
67 | }
68 |
69 | };
70 |
--------------------------------------------------------------------------------
/api/models/Analytic.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Analytic
3 | *
4 | * @module :: Model
5 | * @description :: A short summary of how this model works and what it represents.
6 | * @docs :: http://sailsjs.org/#!documentation/models
7 | */
8 |
9 | module.exports = {
10 |
11 | attributes: {
12 |
13 | /* e.g.
14 | nickname: 'string'
15 | */
16 | owner: {
17 | model: 'user',
18 | required: true
19 | },
20 | name: {
21 | type: 'STRING',
22 | required: true
23 | },
24 | fileName: {
25 | type: 'STRING',
26 | required: true
27 | },
28 | seededFileName: {
29 | type: 'STRING',
30 | required: true
31 | },
32 | description: {
33 | type: 'TEXT',
34 | required: true
35 | },
36 | url: {
37 | type: 'STRING'
38 | },
39 | approved: {
40 | type: 'BOOLEAN',
41 | defaultsTo: false
42 | },
43 | rejected: {
44 | type: 'BOOLEAN',
45 | defaultsTo: false
46 | },
47 | // i.e. D3, R, Plateau
48 | type: {
49 | type: 'STRING',
50 | required: true
51 | },
52 | // i.e. HarvardX, StanfordX, moocDB
53 | dataModels: {
54 | type: 'ARRAY',
55 | required: true
56 | },
57 | usersWhoStarred: {
58 | collection: 'user',
59 | via: 'starredAnalytics'
60 | },
61 |
62 | // Instance methods
63 | toJSON: function() {
64 | var obj = this.toObject();
65 | delete obj._csrf;
66 | return obj;
67 | },
68 | }
69 | };
70 |
--------------------------------------------------------------------------------
/edX-datascrub/src/usersAndClasses.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import os
4 | import glob
5 | import user
6 | import csv
7 | import ipGeoloc as loc
8 | import sys
9 |
10 | class userClasses:
11 | def __init__(self, uname, className):
12 | self.numClasses = 1
13 | self.uname = uname
14 | self.country = ''
15 | self.classList = [className]
16 |
17 |
18 | geoFile = csv.reader(open(sys.argv[1], 'r'))
19 | geoDict = loc.builddict(geoFile)
20 |
21 | dirList = glob.glob('[A-Z]*')
22 | classDict = {}
23 |
24 | for d in dirList:
25 | filein = open(d+'/users.csv', 'r')
26 | fin = csv.reader(filein)
27 | cName = d
28 | fin.next()
29 | udict = user.builddict(fin)
30 | for u in iter(udict):
31 | if u in classDict:
32 | classDict[u].numClasses += 1
33 | classDict[u].classList.append(cName)
34 | if udict[u].username != classDict[u].uname:
35 | classDict[u].uname = 'Duplicate user name'
36 | else:
37 | classDict[u] = userClasses(udict[u].username, cName)
38 | if udict[u].username in geoDict:
39 | classDict[u].country = geoDict[udict[u].username]
40 |
41 | filein.close()
42 |
43 | outf = csv.writer(open('studentClassList.csv', 'w'))
44 | outf.writerow(['user Id', 'User name','country', 'number of classes', 'classes'])
45 | for u in iter(classDict):
46 | outf.writerow([u, classDict[u].uname, classDict[u].country, classDict[u].numClasses, classDict[u].classList])
47 |
--------------------------------------------------------------------------------
/edX-datascrub/src/checkData/corrUsers.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | '''
3 | Checks to insure that users appear in the enrollment and profiles file
4 |
5 | Looks at the user, enrollment, and profile file in the directory in which
6 | the script is run to insure that all of the entries in the user file
7 | have entries in the enrollment and profiles file, and that fall of the
8 | entries in the profiles and enrollment file have entries in the user file.
9 | '''
10 |
11 |
12 | import csv
13 | import user
14 | import demographics.userprofile as uprofile
15 | import course_enrollment as ce
16 |
17 | csv.field_size_limit(1000000)
18 | uIn = csv.reader(open('users.csv', 'r'))
19 | uDict = user.builddict(uIn)
20 |
21 | upIn = csv.reader(open('profiles.csv', 'r'))
22 | upDict = uprofile.builddict(upIn)
23 |
24 | ceIn = csv.reader(open('enrollment.csv', 'r'))
25 | ceDict = ce.builddict(ceIn)
26 |
27 | of = csv.writer(open('userDiffs.csv', 'w'))
28 |
29 | of.writerow(['ids in user file, not in profiles file'])
30 | for u in iter(uDict):
31 | if u not in upDict:
32 | of.writerow([u])
33 |
34 | of.writerow(['ids in profiles file, not in user file'])
35 | for p in iter(upDict):
36 | if p not in uDict:
37 | of.writerow([p])
38 |
39 | of.writerow(['ids in user file, not in enrollment file'])
40 | for u in iter(uDict):
41 | if u not in ceDict:
42 | of.writerow([u])
43 |
44 | of.writerow(['ids in enrollment file, not in user file'])
45 | for e in iter(ceDict):
46 | if e not in uDict:
47 | of.writerow([u])
48 |
49 |
50 |
--------------------------------------------------------------------------------
/views/home/license.ejs:
--------------------------------------------------------------------------------
1 |
2 |
3 |
License
4 |
5 |
6 |
7 |
8 |
MIT License
9 |
10 |
11 |
12 |
13 | The MIT License (MIT)
14 |
15 |
16 | Copyright (c) 2014-2015 moocRP.
17 |
18 |
19 | Permission is hereby granted, free of charge, to any person obtaining a copy
20 | of this software and associated documentation files (the "Software"), to deal
21 | in the Software without restriction, including without limitation the rights
22 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
23 | copies of the Software, and to permit persons to whom the Software is
24 | furnished to do so, subject to the following conditions:
25 |
26 |
27 | The above copyright notice and this permission notice shall be included in
28 | all copies or substantial portions of the Software.
29 |
30 |
31 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
32 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
34 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
35 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
36 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
37 | THE SOFTWARE.
38 |
39 |
--------------------------------------------------------------------------------
/api/services/AuthService.js:
--------------------------------------------------------------------------------
1 | // Various CAS settings
2 | var settings = sails.config;
3 | var casOptions = settings.casOptions;
4 |
5 | module.exports = {
6 | loginRoute: function(params) {
7 | // Can override with own login system
8 | var baseURL = casOptions.casURL + casOptions.login + '?service=' + AuthService.serviceURL + '/user/validate';
9 | if (params) {
10 | for (var key in params) {
11 | if (params.hasOwnProperty(key)) baseURL = baseURL + '&' + key + '=' + params[key];
12 | }
13 | }
14 | return baseURL
15 | },
16 |
17 | logoutRoute: function() {
18 | return casOptions.casURL + casOptions.logout + '?url=' + AuthService.serviceURL;
19 | },
20 |
21 | validateRoute: function(params) {
22 | var baseURL = casOptions.casURL + casOptions.validate + '?service=' + AuthService.serviceURL + '/user/validate';
23 | if (params) {
24 | for (var key in params) {
25 | if (params.hasOwnProperty(key)) baseURL = baseURL + '&' + key + '=' + params[key];
26 | }
27 | }
28 | return baseURL
29 | },
30 |
31 | validate: function(url, cb) {
32 | var request = require('request');
33 |
34 | request({uri: url, secureProtocol: 'TLSv1_method' }, function(err, response, body) {
35 | var uid = undefined;
36 |
37 | if (!err && body) {
38 | var lines = body.split('\n');
39 | if (lines && lines[0] == 'yes') {
40 | uid = lines[1];
41 | }
42 | }
43 | return cb(err, uid);
44 | });
45 | },
46 |
47 | serviceURL: settings.protocol + settings.appEnvMap[settings.environment] + ":" + settings.port
48 | }
--------------------------------------------------------------------------------
/test/bootstrap.js:
--------------------------------------------------------------------------------
1 | // http://stackoverflow.com/questions/26837522/sails-js-how-to-actually-run-tests
2 | // https://github.com/albertosouza/sails-test-example
3 | /**
4 | * Test starter - with this version of sails.js we can only start one sails server,
5 | * to solve this problem we use only one before All and after All to start and
6 | * stop the server
7 | */
8 | var Sails = require('sails');
9 | var _ = require('lodash');
10 |
11 | global.DOMAIN = 'http://localhost';
12 | global.PORT = 1337;
13 | global.HOST = DOMAIN + ':' + PORT;
14 |
15 | before(function(callback) {
16 | this.timeout(7000);
17 |
18 | var configs = {
19 | log: {
20 | level: 'info'
21 | },
22 | connections: {
23 | memory: {
24 | // lets use memory tests ...
25 | adapter : 'sails-memory'
26 | }
27 | },
28 | models: {
29 | connection: 'memory'
30 | },
31 | port: PORT,
32 | environment: 'test',
33 |
34 | // @TODO needs support to csrf token
35 | csrf: false,
36 |
37 | // we dont need this configs in API test
38 | hooks: {
39 | grunt: false,
40 | socket: false,
41 | pubsub: false
42 | }
43 | };
44 |
45 | // Sails.load() might be better, but lift() loads bootstrap.js in config, for
46 | // removing logging within the app
47 | Sails.lift(configs, function(err, sails) {
48 | if (err) {
49 | console.error(err);
50 | return callback(err);
51 | }
52 |
53 | // here you can load fixtures, etc.
54 | callback(err, sails);
55 | });
56 | });
57 |
58 | after(function(done) {
59 | // here you can clear fixtures, etc.
60 | sails.lower(done);
61 | });
--------------------------------------------------------------------------------
/config/log.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Built-in Log Configuration
3 | * (sails.config.log)
4 | *
5 | * Configure the log level for your app, as well as the transport
6 | * (Underneath the covers, Sails uses Winston for logging, which
7 | * allows for some pretty neat custom transports/adapters for log messages)
8 | *
9 | * For more information on the Sails logger, check out:
10 | * http://sailsjs.org/#/documentation/concepts/Logging
11 | */
12 |
13 | var winston = require('winston');
14 |
15 | var customLogger = new winston.Logger({
16 | transports: [
17 | new(winston.transports.File)({
18 | level: 'debug',
19 | filename: './logs/development.log'
20 | }),
21 | ],
22 | });
23 |
24 | module.exports.log = {
25 |
26 | /***************************************************************************
27 | * *
28 | * Valid `level` configs: i.e. the minimum log level to capture with *
29 | * sails.log.*() *
30 | * *
31 | * The order of precedence for log levels from lowest to highest is: *
32 | * silly, verbose, info, debug, warn, error *
33 | * *
34 | * You may also set the level to "silent" to suppress all logs. *
35 | * *
36 | ***************************************************************************/
37 |
38 | //colors: false,
39 | //custom: customLogger
40 |
41 | };
42 |
--------------------------------------------------------------------------------
/api/controllers/JobController.js:
--------------------------------------------------------------------------------
1 | /**
2 | * JobController
3 | *
4 | * @description :: Server-side logic for managing jobs
5 | * @help :: See http://links.sailsjs.org/docs/controllers
6 | */
7 |
8 | var updateStartTime = function (jobID, startTime, cb) {
9 | Job.findOne(jobID, function (err, job) {
10 | if (err || !job) return cb(err);
11 | params = { status: status };
12 | job.startTime = startTime;
13 | job.save(function (err2) {
14 | if (err2) return cb(err2);
15 | return cb();
16 | });
17 | });
18 | }
19 |
20 | var updateEndTime = function (jobID, endTime, cb) {
21 | Job.findOne(jobID, function (err, job) {
22 | if (err || !job) return cb(err);
23 | params = { status: status };
24 | job.endTime = endTime;
25 | job.save(function (err2) {
26 | if (err2) return cb(err2);W
27 | return cb();
28 | });
29 | });
30 | }
31 |
32 | var updateStatus = function(jobID, status, cb) {
33 | Job.findOne(jobID, function (err, job) {
34 | if (err || !job) return cb(err);
35 | params = { status: status };
36 | job.status = status;
37 | job.save(function (err2) {
38 | if (err2) return cb(err2);
39 | return cb();
40 | });
41 | });
42 | }
43 |
44 | module.exports = {
45 | create: function(req, res) {
46 | var params = req.params.all();
47 | Job.create(params, function (err, job) {
48 | if (err || !job) return res.json( {'success': false });
49 | return res.json({ 'success': true });
50 | });
51 | },
52 | destroy: function(req, res) {
53 | Job.destroy(req.param['id'], function (err, job) {
54 | return res.json({ 'success': true });
55 | });
56 | },
57 | update: function(req, res) {
58 | // TODO
59 | }
60 | };
61 |
62 |
--------------------------------------------------------------------------------
/config/ssl/server.key:
--------------------------------------------------------------------------------
1 | -----BEGIN RSA PRIVATE KEY-----
2 | MIIEowIBAAKCAQEAuIxAdZjcW4Tx5Z91PPaCZqxKHdsMWQ9NV12+3V+j/lMxXGzv
3 | RaMuo7zaL7eHsO9fgG4n0lkxS2YrSdMaO2RteTglmHoTmEGTmIF+WjvOnGxvsgeA
4 | cA3qwVfImw/8mL337tqmoS1TW1cMo+Wqa2y/ERuu7hrgAmRpcUIk879iJKEZZPe5
5 | +AM8agKBrmzWOkbtqYw8Aj6/Ftbl/iJmjxD7tKwAeDLbDUoglSI6lCmcGDpHIVQN
6 | LmiWFM9WsxdST3iS7IYZP49X055mm7mfEFgKNaxLyA+89nKSXoPKRmSM2bbqyB8y
7 | ERh93RZbCX6iupJtvgNzZ/DLCQ6AonLnHKwRBwIDAQABAoIBACyEYJQ9gIJvKm7q
8 | rTw8dq5Pxz02dt8Q6uY1TfJWvNd/t/uEp59Tws9qofM6wXez9oSjjeWW8GYwyiZv
9 | zvceva2tFpyWbh2fS/xQ2Grp3GgtXDBE0P22zbc/9rs4+wTZZnJuuh1NMrto3zq2
10 | DwsE235EAEmdoAXTtP/GJeXKd+E/tlAK8OtAmnMPeaCDy22+6R7h8oVSlOVu+N1u
11 | 32sC1gla+jmKj2RUVzKlWVz9gmng+UeULB+3R0fLpbKQis7yEXMZCqEORzca3m8V
12 | r9dACNko3AGHbixLZnC/ZIU7p/xvyU6PdEw1n7SsmTXmEjtBOH8Fxeja0tVDe8BV
13 | DTnehwkCgYEA4e9FDk9I4NmUw6KAYl7ZbSwb2/1m656oiDcqdFP3nEI3GA621FHE
14 | hm8HVw9AGLGQXxc66IP9g24qREoLS3YhVSlUMXu8WXq/vz7lHI2b/g24U5pIF7Iv
15 | pPjvsFDhmH9qrjU8kh8camBJtcvT4ct2nptrY2+AsNcdl9AvA3MxhjUCgYEA0RsX
16 | nshDmcztl/XH0wARVO7vz1PMy3xkvRWfqaeCWnC1IycpARlZqUHmbBqvpecWJ/Yw
17 | d2YsUuehNb2e6QNVAAwOj0ygcyjSpVKw7y9bc+ttWg+jFf4il/VgtzfikossQkzz
18 | QfLT93ffKAKOjxoYM7lZbYJP4fhzHp5WIU+sscsCgYAZOnSFkojawrD/32dilKDG
19 | tgQuXm9dpAvBmhddgfrGMgag4xO7RZ4iPMefCw7nMvyiAaAMUqC+SlDh6zqzpG84
20 | aTMDi4OOokxC+KzwsUdX6QRKIZInQzhavYlWMNHgC2pIJZ2r21l672GLsUTpk6Sd
21 | NVGaetrt5DdjulVllzlSeQKBgBcNbpAxscot3m+nR+1KD58WbFel/GjegNibnqt8
22 | bRF9ZWrHsWyOl+Th//4g/wZCMJ4dNQCkwfQt1wburaswk9laeuxvXSz07iwNrrXf
23 | uaxQ4xBPswIEr1mjUpNAVPkk4K86foLhu16H15E4nvDFxq9FGsfI2velhUN13zby
24 | 6q37AoGBANSZ+oEukbYyo0s4EGhK0a85eD9lNjxfRPGfMScyqpqZRERG485AYBYY
25 | zVBLxEiTNKirY4RJgHcD/8KpEUEWQYLIfxCNlQtsRvobKqyj5zcqPsoTLDNm/bI7
26 | dmWzfP3t/86gGoeJax2RhuX+xLQUXLjFMVpXJ3kSlxhn3875w1XG
27 | -----END RSA PRIVATE KEY-----
28 |
--------------------------------------------------------------------------------
/edX-datascrub/src/logs/findLogDups.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | '''
4 | Find and report on duplicate lines in an edX log file
5 |
6 | Reads the edX log file supplied as the first parameter. Building a dictionary
7 | keyed by a concatenation of the username and time of the log entry, compares any
8 | two lines with the same key for equivalence. If they are the same, increments the
9 | count of duplicate lines and prints out the contents of the line, along with the
10 | sequence number of the duplicated line and the sequence number of the line it
11 | duplicates.
12 |
13 | At the end of the run, prints out the number of log lines read, the number of
14 | duplicate lines found, and the number of non-duplicate lines.
15 |
16 | If duplicate lines are found, they can be eliminated by running the script
17 | cleanLogDups.py
18 | '''
19 | import json
20 | import sys
21 |
22 | class Line(object):
23 | def __init__(self, lineNo, lineCont):
24 | self.line = lineNo
25 | self.content = lineCont
26 |
27 |
28 | linedict = {}
29 |
30 | f1 = open(sys.argv[1], 'r')
31 | ln = 0;
32 | duplines = 0;
33 |
34 | dc = json.JSONDecoder()
35 |
36 | for line in f1:
37 | ln += 1;
38 | dl = dc.decode(line)
39 | key = dl['time'] + dl['username']
40 | if key not in linedict:
41 | lo = Line(ln, line)
42 | linedict[key] = lo
43 | else:
44 | if linedict[key].content == line:
45 | duplines += 1
46 | print line
47 | print 'line no ' + str(lo) + 'duplicates line no ' + str(linedict[key].line)
48 |
49 | print 'total number of lines = ' + str(ln)
50 | print 'total number of duplicate lines = ' + str(duplines)
51 | print 'total lines of real data = ' + str(ln - duplines)
52 |
53 |
54 |
55 |
--------------------------------------------------------------------------------
/edX-datascrub/src/convertfiles/makeIdCountryFile.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | '''
3 | Takes a file mapping usernames to country and converts to userId to country
4 |
5 | Used for an earlier version of the edX geolocation files, this takes a csv for a particular
6 | course that mapped the username to the country indicated by the most-often used IP address
7 | for the student and creates a file that maps from the userId to the country.
8 |
9 | This is largely obsoleted by the new format for a country mapping.
10 |
11 | Created on May 4, 2013
12 |
13 | @author: waldo
14 | '''
15 |
16 | import user
17 | import csv
18 | import os
19 | import utils
20 | import sys
21 |
22 |
23 | def buildNameCountry(cfile):
24 | retDict = {}
25 | for [country, username] in cfile:
26 | retDict[username] = country
27 | return retDict
28 |
29 | if len(sys.argv) > 3:
30 | cFileName = sys.argv[1]
31 | userFileName = sys.argv[2]
32 | clName = sys.argv[3]
33 | else:
34 | cFileName = utils.getFileName('user name to country file')
35 | userFileName = utils.getFileName('user file')
36 | clName = raw_input("Please enter the name of the class : ")
37 |
38 | cfile = csv.reader(open(cFileName, 'r'))
39 | nameDict = buildNameCountry(cfile)
40 | ufile = csv.reader(open(userFileName, 'r'))
41 | userDict = user.builddict(ufile)
42 |
43 |
44 | clfName = clName + '_id_country.csv'
45 | outfile = csv.writer(open(clfName, 'w'))
46 |
47 | users = userDict.keys()
48 |
49 | outfile.writerow(['User id', 'Country'])
50 | for u in users:
51 | userName = userDict[u].username
52 | if (userName in nameDict):
53 | country = nameDict[userDict[u].username]
54 | outfile.writerow([u, country])
55 | else:
56 | print ('unknown userName ' + userName)
57 |
58 |
59 |
--------------------------------------------------------------------------------
/config/ssl/server.key.secure:
--------------------------------------------------------------------------------
1 | -----BEGIN RSA PRIVATE KEY-----
2 | Proc-Type: 4,ENCRYPTED
3 | DEK-Info: DES-EDE3-CBC,73D65A98A60F9174
4 |
5 | Z0GAgB2erUHdf9RU/FgAIXwz5C7hab950KParYritXMbKUBA2XA4iHJoxElSrfDq
6 | 8BmeNtB6Ha8VTbtfLN1x23xEm5sag59jyYy3pqd9vQFGF5wZIbD1JI9i6n2hl4V5
7 | s86++wRUPbzQkw3JpjSCjt738FeEc05k0X07vvMiQ1i0b6fstDUWhpS05i8nGAEH
8 | nmbqa22uXvYp5s2Jq2E9hfOY3qjIUIFYuDETmUy+WTk4JjkPY/l4lhUJXd4x4ksd
9 | dDKkiRA1zOJ612T3vl4wtD75wXjpJg5TOW35fROGgkgvVnB1jYyD9yNKOcHWS2Xb
10 | 8Eh1YLb9v8SQt405rbVxSW+WP/SaPRZZPUH4GYvgtDXsl6JJj8Rzas61ymQ+c4v0
11 | MvheoOyVSF2+28jqi9vxwsAO/tHA3gYbSn6JCf9fM3j9qPotk+okDrEL2HvGYOKW
12 | nICtUxaeLDtLwmpmCQsZ2Da221J1Q0EoHAZEb0fv/BVLZObe5EWyrrcNQpHNsuOU
13 | F5ilem++4eEYia18W8MttzIRL5/QeWVDZljjo6EyvxaKqG2llX3lANbg1TdX7ABU
14 | D0+8FU5bg10C0sF4FYE6mo2ocqiK0Plt85nrqSoLsCFZorUMc/3ecxLlcyIWxn0L
15 | u7ZRUm9XLZWiEq9HWKSMoCvBJjJG9rvSQeupcV2DxrwZ40f2jl9YBYeEe6uz8K8w
16 | 2HttyX2KTdzqicX1/1wk65AC8NPC6KZyKw6uoLaqf9ldWZh5amTcVFD/QzIIG8Q6
17 | ByBHxpWvPEIOceV4i957qjXA7WrUqpqueJKrLHKMQMPj81ActUTAX862T68I0gqX
18 | DAqmTpTHo3G1VTRZuA/GflAXkicOMc3la05Czic7xxOODjzP8PYo651k/od4GVkf
19 | ZXZpCxVqdwvd4f1ccqwRzf/4Mm4SXdfahOgMu4zXScoCaS+wYlluf4ITVJGIDqYC
20 | yF7/9ktGRZJXatLTe+TcNxNvNqtlhTE3OjfUStG48EZ1fcQqCW0vOBDyGaoBobII
21 | xW2F5Iz+wRG2M9FlRrd4Co228MDDucnRNf0hw2OM4S9QAYAqM5h7huzmkrHyEi4O
22 | 6TNhHmRFzLzD9qoeBv/ZHE0c4DtpRPXvZesBot795atEqjq7gAlMERS5Qh7teMxh
23 | rljnVk80A9MFdCsboORVIPkRaqAvkdUBwfqeigPBndIbuFyaJ5DOSeH+af8lQ1BZ
24 | STM9bVamS2dE6UomA5v9owUVHocVHAKi/IY7pjxVxXFUhv7Hf667gRWd0ZCqr2YW
25 | Rw71Hko8qXO/g++d1lGR8FLMKrdkO/WTlIRGILb3YN8GGa8LgV0GXEVRVWkL4Wyh
26 | 0VwC5ylAXX8xDXxjHrZjlEOJDP1xGFZCQLaFqUowhOL/orDACCj0ogCNAJDrk3OH
27 | OZQx1uVBZqyZ1+M7iIOp5mCPo6EpMx9oJ8YX56acl1Lsyug4itWgDpO+HRJdva2c
28 | m4GqF8qEnog8NyuBpji5NXEDvxfNDiU3/fsighf1FEn3LNlsnvkZ97dpM9pLJUlM
29 | 6fL+IllL1mDGH9mtSiT+u8LgExX9EKGl8puzyP8eKnOxR1oW6yhpHw==
30 | -----END RSA PRIVATE KEY-----
31 |
--------------------------------------------------------------------------------
/app.js:
--------------------------------------------------------------------------------
1 | /**
2 | * app.js
3 | *
4 | * Use `app.js` to run your app without `sails lift`.
5 | * To start the server, run: `node app.js`.
6 | *
7 | * This is handy in situations where the sails CLI is not relevant or useful.
8 | *
9 | * For example:
10 | * => `node app.js`
11 | * => `forever start app.js`
12 | * => `node debug app.js`
13 | * => `modulus deploy`
14 | * => `heroku scale`
15 | *
16 | *
17 | * The same command-line arguments are supported, e.g.:
18 | * `node app.js --silent --port=80 --prod`
19 | */
20 |
21 | // Ensure a "sails" can be located:
22 | (function() {
23 | var sails;
24 | try {
25 | sails = require('sails');
26 | } catch (e) {
27 | console.error('To run an app using `node app.js`, you usually need to have a version of `sails` installed in the same directory as your app.');
28 | console.error('To do that, run `npm install sails`');
29 | console.error('');
30 | console.error('Alternatively, if you have sails installed globally (i.e. you did `npm install -g sails`), you can use `sails lift`.');
31 | console.error('When you run `sails lift`, your app will still use a local `./node_modules/sails` dependency if it exists,');
32 | console.error('but if it doesn\'t, the app will run with the global sails instead!');
33 | return;
34 | }
35 |
36 | // Try to get `rc` dependency
37 | var rc;
38 | try {
39 | rc = require('rc');
40 | } catch (e0) {
41 | try {
42 | rc = require('sails/node_modules/rc');
43 | } catch (e1) {
44 | console.error('Could not find dependency: `rc`.');
45 | console.error('Your `.sailsrc` file(s) will be ignored.');
46 | console.error('To resolve this, run:');
47 | console.error('npm install rc --save');
48 | rc = function () { return {}; };
49 | }
50 | }
51 |
52 |
53 | // Start server
54 | sails.lift(rc('sails'));
55 | })();
56 |
--------------------------------------------------------------------------------
/api/controllers/HomeController.js:
--------------------------------------------------------------------------------
1 | /*******************************************
2 | * Copyright 2014, moocRP *
3 | * Author: Kevin Kao *
4 | *******************************************/
5 |
6 | /**
7 | * HomeController
8 | *
9 | * @module :: Controller
10 | * @description :: A set of functions called `actions`.
11 | *
12 | * Actions contain code telling Sails how to respond to a certain type of request.
13 | * (i.e. do stuff, then send some JSON, show an HTML page, or redirect to another URL)
14 | *
15 | * You can configure the blueprint URLs which trigger these actions (`config/controllers.js`)
16 | * and/or override them with custom routes (`config/routes.js`)
17 | *
18 | * NOTE: The code you write here supports both HTTP and Socket.io automatically.
19 | *
20 | * @docs :: http://sailsjs.org/#!documentation/controllers
21 | */
22 |
23 | module.exports = {
24 | /**
25 | * Overrides for the settings in `config/controllers.js`
26 | * (specific to HomeController)
27 | */
28 | _config: {},
29 |
30 | about: function(req, res) {
31 | res.view({
32 | title: 'About'
33 | });
34 | },
35 |
36 | index: function(req, res) {
37 | if (req.session.authenticated) {
38 | res.redirect('/dashboard');
39 | } else {
40 | res.view({
41 | title: 'Home'
42 | });
43 | }
44 | },
45 |
46 | tos: function(req, res) {
47 | res.view({
48 | title: 'Terms of Service'
49 | })
50 | },
51 |
52 | privacy: function(req, res) {
53 | res.view({
54 | title: 'Privacy Policy'
55 | })
56 | },
57 |
58 | license: function(req, res) {
59 | res.view({
60 | title: 'MIT License'
61 | })
62 | },
63 |
64 | contact: function(req, res) {
65 | res.view({
66 | title: 'Contact'
67 | });
68 | }
69 |
70 | };
71 |
--------------------------------------------------------------------------------
/edX-datascrub/src/diffUsers.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | '''
3 | Examine a set of users.csv files for a class over some weeks, finding any differences
4 |
5 | This program is meant to be run from a directory of weekly dumps from edX. It
6 | finds all sub-directories and then looks for the class number supplied as a
7 | command-line argument. It goes into those class directories, and compares the
8 | users.csv files in those directories, creating a .csv file with the user id and
9 | user name for any user who is in one week and not in the next, or not in the first
10 | week and then in the next.
11 |
12 | Command line arguments
13 | ======================
14 | The name of a class (assumed to be the name of a sub-directory of the current
15 | directory) that is to have the user lists compared
16 |
17 | '''
18 |
19 | import glob
20 | import sys
21 | import csv
22 | import user
23 |
24 | course = sys.argv[1]
25 | flist = glob.glob('harvardx*/' + course + '/users.csv')
26 | if len(flist) < 2:
27 | exit()
28 |
29 | f = iter(flist).next()
30 | flist.remove(f)
31 | ufile = open(f, 'r')
32 | oldDict = user.builddict(ufile)
33 | ufile.close()
34 | out = csv.writer(open(course+'diffs.csv', 'w'))
35 | for f in flist:
36 | ufile = open(f, 'r')
37 | newDict = user.builddict(csv.reader(ufile))
38 | ufile.close()
39 | out.writerow(['In older course list, not new'])
40 | i = 0
41 | for u in iter(oldDict):
42 | if u not in newDict:
43 | out.writerow([u, oldDict[u].username])
44 | i += 1
45 | out.writerow(['Total deleted between files: ', str(i)])
46 | i = 0
47 | out.writerow(['In new course list, not old'])
48 | for u in iter(newDict):
49 | if u not in oldDict:
50 | out.writerow([u, newDict[u].username])
51 | i += 1
52 | out.writerow(['Total added between files : ', str(i)])
53 | oldDict = newDict
54 |
55 |
56 |
57 |
--------------------------------------------------------------------------------
/edX-datascrub/src/demographics/buildAnonProfile.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | '''
4 | Created on Apr 22, 2013
5 |
6 | Create a file of FERPA de-identified demographic data
7 |
8 | The program takes five arguments:
9 | A string that names the course;
10 | A file containing the userprofile data, in CSV format;
11 | A file containing the user data, in CSV format;
12 | A file containing the mapping from usernames to countries;
13 | A file containing the users who received certificates (optional)
14 |
15 |
16 | @author: waldo
17 | '''
18 |
19 | import sys
20 | import csv
21 | import userprofile as prof
22 | import user
23 | import certificates as cs
24 | import ipGeoloc as geo
25 |
26 |
27 | if (len(sys.argv) < 2):
28 | print('Usage: buildAnonProfile.py courseName profileFile userFile countryFile certFile')
29 | sys.exit()
30 |
31 | csv.field_size_limit(1000000)
32 |
33 | out_name = sys.argv[1] + 'anonProfile.csv'
34 | o1 = csv.writer(open(out_name, 'w'))
35 |
36 | ufile = csv.reader(open(sys.argv[2], 'r'))
37 | uprof = prof.builddict(ufile)
38 |
39 | udfile = csv.reader(open(sys.argv[3], 'r'))
40 | udict = user.builddict(udfile)
41 |
42 | countryFile = csv.reader(open(sys.argv[4], 'r'))
43 | locDict = geo.builddict(countryFile)
44 |
45 | certs = False
46 | if (len(sys.argv) > 5):
47 | certfile = csv.reader(open(sys.argv[5], 'r'))
48 | certDict = cs.builddict(certfile)
49 | certs = True
50 |
51 |
52 | students = uprof.keys()
53 | for s in students:
54 | p = uprof[s]
55 | if (s in udict):
56 | usrName = udict[s].username
57 | if (usrName in locDict):
58 | loc = locDict[usrName]
59 | else:
60 | loc = ''
61 | else:
62 | loc = ''
63 |
64 | if (certs):
65 | o1.writerow([p.gender, p.yob, p.ledu, p.goal, loc, (p.user_id in certDict)])
66 | else:
67 | o1.writerow([p.gender, p.yob, p.ledu, p.goal, loc])
68 |
69 |
70 |
71 |
72 |
--------------------------------------------------------------------------------
/edX-datascrub/src/logs/processLogData.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | '''
4 | Take the log files from all servers between startDate and endDate,
5 | and generate a log file for each course in the given input list.
6 |
7 | Command line arguments:
8 | 1) a list of class names to be processed, seperated by ',' WITHOUT whitespace.
9 | Class name is in school-class-term format. E.g. BerkeleyX-CS191x-Spring_2013.
10 | 2) start date in YYYY-MM-DD format
11 | 3) end date in YYYY-MM-DD format
12 |
13 | @auther: mangpo
14 | '''
15 |
16 | import sys, os, glob
17 |
18 | if __name__ == '__main__':
19 | names = sys.argv[1]
20 | start = sys.argv[2]
21 | end = sys.argv[3]
22 |
23 | (yy1,mm1,dd1) = [int(x) for x in start.split('-')]
24 | (yy2,mm2,dd2) = [int(x) for x in end.split('-')]
25 |
26 | # Clear unknownLogs
27 | logFiles = glob.glob('prod*')
28 | for d in logFiles:
29 | os.system("rm %s/unknownLogs" % d)
30 |
31 | first = True
32 |
33 | # Divide a long period into multiple week periods to reduce sorting time
34 | while yy1 < yy2 or mm1 < mm2 or dd1 < dd2:
35 | (y,m,d) = (yy1,mm1,dd1)
36 | if d >= 31:
37 | if m >= 12:
38 | y = y+1
39 | m = 1
40 | d = 1
41 | else:
42 | m = m+1
43 | d = 1
44 | else:
45 | d = d+7
46 | if y >= yy2 and m >= mm2 and d > mm2:
47 | (y,m,d) = (yy2,mm2,dd2)
48 |
49 | if first:
50 | command = "processSmallLogData.sh %s %04d-%02d-%02d %04d-%02d-%02d" % (names,yy1,mm1,dd1,y,m,d)
51 | first = False
52 | else:
53 | command = "processSmallLogData.sh %s - %04d-%02d-%02d" % (names,y,m,d)
54 |
55 |
56 | print command
57 | status = os.system(command)
58 |
59 | if status == 0:
60 | (yy1,mm1,dd1) = (y,m,d+1)
61 | else:
62 | exit(status)
63 |
64 |
65 |
--------------------------------------------------------------------------------
/edX-datascrub/src/buildCompRoster.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | '''
4 | Build a cumulative class roster for a class
5 |
6 | Run from a directory that contains all of the weekly dumps of data for a class from
7 | edX, this script will build a roster for all students who have ever shown up in the
8 | weekly profiles.csv files. The roster will include student id, name, country, age,
9 | education level, and gender. This should allow us to see all students in a course,
10 | even those that have withdrawn, as long as they remained in the course for a single
11 | data dump.
12 |
13 | Created on Jul 11, 2013
14 | Modified on Jul 28, 2013
15 |
16 | @author: waldo
17 | '''
18 |
19 | import glob
20 | import csv
21 | import sys
22 | import utils
23 | import buildClassRoster as bcr
24 | import demographics.userprofile as prof
25 | import ipGeoloc as geo
26 | import user
27 |
28 | def main(locname, relLoc="./"):
29 | csv.field_size_limit(sys.maxsize)
30 | locD = geo.readIdToLoc(locname)
31 | flist = glob.glob(relLoc + '20*')
32 | fname = flist.pop()
33 | fin = csv.reader(open(fname + '/profiles.csv', 'r'))
34 | pDict = prof.builddict(fin)
35 | uin = csv.reader(open(fname +'/users.csv', 'r'))
36 | uDict = user.builddict(uin)
37 | fullR = bcr.buildRosterDict(pDict, uDict, locD)
38 |
39 | for f in flist:
40 | fin = csv.reader(open(f + '/profiles.csv', 'r'))
41 | addDict = prof.builddict(fin)
42 | uin = csv.reader(open(f + '/users.csv', 'r'))
43 | uDict = user.builddict(uin)
44 | addR = bcr.buildRosterDict(addDict, uDict, locD)
45 | for i in iter(addR):
46 | if i not in fullR:
47 | fullR[i] = addR[i]
48 |
49 | outname = relLoc + 'FullRoster.csv'
50 | bcr.writeRoster(fullR, outname)
51 |
52 | if __name__ == '__main__':
53 | if len(sys.argv) > 1:
54 | locname = sys.argv[1]
55 | else:
56 | locname = utils.getFileName('Enter name of the id=>location file :')
57 |
58 | main(locname)
59 |
--------------------------------------------------------------------------------
/views/home/privacy.ejs:
--------------------------------------------------------------------------------
1 |
2 |
3 |
Privacy Policy
4 |
5 |
6 |
7 |
8 | Your privacy is very important to us. Accordingly, we have developed this Policy in order for you to understand how we collect, use, communicate and disclose and make use of personal information. The following outlines our privacy policy.
9 |
10 |
11 |
12 |
13 | Before or at the time of collecting personal information, we will identify the purposes for which information is being collected.
14 |
15 |
16 | We will collect and use of personal information solely with the objective of fulfilling those purposes specified by us and for other compatible purposes, unless we obtain the consent of the individual concerned or as required by law.
17 |
18 |
19 | We will only retain personal information as long as necessary for the fulfillment of those purposes.
20 |
21 |
22 | We will collect personal information by lawful and fair means and, where appropriate, with the knowledge or consent of the individual concerned.
23 |
24 |
25 | Personal data should be relevant to the purposes for which it is to be used, and, to the extent necessary for those purposes, should be accurate, complete, and up-to-date.
26 |
27 |
28 | We will protect personal information by reasonable security safeguards against loss or theft, as well as unauthorized access, disclosure, copying, use or modification.
29 |
30 |
31 | We will make readily available to customers information about our policies and practices relating to the management of personal information.
32 |
33 |
34 |
35 |
36 | We are committed to conducting our business in accordance with these principles in order to ensure that the confidentiality of personal information is protected and maintained.
37 |
38 |
--------------------------------------------------------------------------------
/api/responses/badRequest.js:
--------------------------------------------------------------------------------
1 | /**
2 | * 400 (Bad Request) Handler
3 | *
4 | * Usage:
5 | * return res.badRequest();
6 | * return res.badRequest(data);
7 | * return res.badRequest(data, 'some/specific/badRequest/view');
8 | *
9 | * e.g.:
10 | * ```
11 | * return res.badRequest(
12 | * 'Please choose a valid `password` (6-12 characters)',
13 | * 'trial/signup'
14 | * );
15 | * ```
16 | */
17 |
18 | module.exports = function badRequest(data, options) {
19 |
20 | // Get access to `req`, `res`, & `sails`
21 | var req = this.req;
22 | var res = this.res;
23 | var sails = req._sails;
24 |
25 | // Set status code
26 | res.status(400);
27 |
28 | // Log error to console
29 | if (data !== undefined) {
30 | sails.log.verbose('Sending 400 ("Bad Request") response: \n',data);
31 | }
32 | else sails.log.verbose('Sending 400 ("Bad Request") response');
33 |
34 | // Only include errors in response if application environment
35 | // is not set to 'production'. In production, we shouldn't
36 | // send back any identifying information about errors.
37 | if (sails.config.environment === 'production') {
38 | data = undefined;
39 | }
40 |
41 | // If the user-agent wants JSON, always respond with JSON
42 | if (req.wantsJSON) {
43 | return res.jsonx(data);
44 | }
45 |
46 | // If second argument is a string, we take that to mean it refers to a view.
47 | // If it was omitted, use an empty object (`{}`)
48 | options = (typeof options === 'string') ? { view: options } : options || {};
49 |
50 | // If a view was provided in options, serve it.
51 | // Otherwise try to guess an appropriate view, or if that doesn't
52 | // work, just send JSON.
53 | if (options.view) {
54 | return res.view(options.view, { data: data });
55 | }
56 |
57 | // If no second argument provided, try to serve the implied view,
58 | // but fall back to sending JSON(P) if no view can be inferred.
59 | else return res.guessView({ data: data }, function couldNotGuessView () {
60 | return res.jsonx(data);
61 | });
62 |
63 | };
64 |
65 |
--------------------------------------------------------------------------------
/api/controllers/DataScriptController.js:
--------------------------------------------------------------------------------
1 | /**
2 | * DataScriptController
3 | *
4 | * @description :: Server-side logic for managing Datascripts
5 | * @help :: See http://links.sailsjs.org/docs/controllers
6 | */
7 |
8 | var PATH_CONFIG = sails.config.paths;
9 | var DATA_DROP_PATH = PATH_CONFIG.DATASET_DROP_PATH;
10 | var kue = require('kue');
11 |
12 | module.exports = {
13 | create: function(req, res) {
14 | return res.redirect('/admin/manage_data_scripts');
15 | },
16 | destroy: function(req, res) {
17 | return res.redirect('/admin/manage_data_scripts');
18 | },
19 |
20 | script_test: function(req, res) {
21 | var jobs = QueueService.getQueue();
22 | var job = QueueService.createJob('test job', { user: 1, test: 'testparam' });
23 |
24 | jobs.process('archive job', 1, function (job, done) {
25 | var frames = job.data.frames;
26 |
27 | function next(i) {
28 | if (i > 10) {
29 | done();
30 | } else {
31 | sails.log(i);
32 | next(i+1);
33 | }
34 | }
35 | next(0);
36 | });
37 | return res.json({ 'progress': 'Running' });
38 | },
39 |
40 | // BUILT-IN DATA MANAGEMENT TOOLS
41 | // http://blog.thesparktree.com/post/92465942639/ducktyping-sailsjs-core-for-background-tasks-via
42 | script_archive: function(req, res) {
43 | var jobs = QueueService.getQueue();
44 | var job = QueueService.createJob('test job', { user: 1, test: 'testparam' });
45 |
46 | jobs.process('archive job', 1, function (job, done) {
47 | var frames = job.data.frames;
48 |
49 | function next(i) {
50 | if (i > 10) {
51 | done();
52 | } else {
53 | sails.log(i);
54 | next(i+1);
55 | }
56 | }
57 | next(0);
58 | });
59 | return res.json({ 'progress': 'Running' });
60 | },
61 |
62 | script_move: function(req, res) {
63 | return res.redirect('/admin/manage_data_scripts');
64 | },
65 |
66 | upload: function(req, res) {
67 | return res.redirect('/admin/manage_data_scripts');
68 | }
69 | };
70 |
71 |
--------------------------------------------------------------------------------
/test/SampleController.spec.js:
--------------------------------------------------------------------------------
1 | var request = require('supertest');
2 | var assert = require('assert');
3 | var async = require('async');
4 | var stubs = require('../stubs.js');
5 |
6 |
7 | describe('Post', function() {
8 |
9 | // // use after all to create custom stub data
10 | // var user;
11 | // before(function(done) {
12 | // User.create(uStub)
13 | // .exec( function(err, u){
14 | // if( err ) {
15 | // console.log(err);
16 | // return done(err);
17 | // }
18 |
19 | // user = u;
20 | // user.password = password;
21 | // done();
22 | // });
23 | // });
24 |
25 | describe('Authenticated', function() {
26 | // use supertest.agent for store cookies ...
27 | // logged in agent
28 | var agent ;
29 | // after authenticated requests login the user
30 | // before(function(done) {
31 | // agent = request.agent(sails.hooks.http.app);
32 | // agent.post('/auth/login')
33 | // .send({
34 | // email: user.email,
35 | // password: user.password
36 | // })
37 | // .end(function(err) {
38 | // done(err);
39 | // });
40 | // })
41 |
42 | describe('JSON Requests', function() {
43 | it('/relato should create one post with authenticated user as creator');
44 | });
45 | })
46 |
47 | describe('UnAuthenticated', function() {
48 | describe('JSON Requests', function() {
49 | it('/relato should create one post and return 201',function(done) {
50 |
51 | var postStub = stubs.postStub();
52 | request(sails.hooks.http.app)
53 | .post('/post')
54 | .send(postStub)
55 | .set('Accept', 'application/json')
56 | .expect('Content-Type', /json/)
57 | .end(function (err, res) {
58 | if(err) return done(err);
59 |
60 | assert.ok(res.body.title);
61 | assert.ok(res.body.body);
62 | assert.equal(res.body.title, postStub.title);
63 | assert.equal(res.body.body, postStub.body);
64 |
65 | done();
66 | });
67 | });
68 | });
69 | })
70 |
71 | })
--------------------------------------------------------------------------------
/bin/setup/setup.sh:
--------------------------------------------------------------------------------
1 | ##########################
2 | # Author: Kevin Kao #
3 | ##########################
4 | #!/bin/bash
5 |
6 | echo "------------------------------------------------------------------"
7 | echo "| Welcome to moocRP setup. Creating file directory structure... |"
8 | echo "------------------------------------------------------------------"
9 | echo ""
10 |
11 | # Grab this script's absolute filepath to its directory
12 | SCRIPT_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
13 |
14 | DATASET_FOLDERS=( "encrypted" "extracted" "available" "available/non_pii" "available/pii" )
15 | ANALYTIC_FOLDERS=("tmp" "archives")
16 | LOG_FILES=(production.log development.log)
17 | DATA_SCRIPTS=( "analytics" )
18 |
19 | DATABASE_SETUP=db_setup.sql
20 |
21 | ERRORS=0
22 |
23 | for folder in "${DATASET_FOLDERS[@]}"; do CREATE="$SCRIPT_PATH/../../../datasets/${folder}"; echo "[x] Creating $CREATE"; mkdir -p "$CREATE"; done
24 | for folder in "${ANALYTIC_FOLDERS[@]}"; do CREATE="$SCRIPT_PATH/../../../analytics/${folder}"; echo "[x] Creating $CREATE"; mkdir -p "$CREATE"; done
25 | for folder in "${DATA_SCRIPTS[@]}"; do CREATE="$SCRIPT_PATH/../../../data_scripts/${folder}"; echo "[x] Creating $CREATE"; mkdir -p "$CREATE"; done
26 |
27 |
28 | mkdir -p "$SCRIPT_PATH/../../logs"
29 |
30 | for files in "${LOG_FILES[@]}"
31 | do
32 | CREATE="$SCRIPT_PATH/../../logs/${files}"
33 | echo "[x] Creating $CREATE"
34 |
35 | if [ ! -e "$CREATE" ] ; then
36 | touch "$CREATE"
37 | fi
38 |
39 | if [ ! -w "$CREATE" ] ; then
40 | echo "Unable to write to $CREATE - run this script as sudo"
41 | COUNTER=$((COUNTER + 1))
42 | continue
43 | fi
44 | done
45 |
46 | echo ""
47 | echo "Setting up MySQL database..."
48 | printf "Please enter your MySQL username: "
49 | read -e mysqlUser
50 |
51 | printf "Please enter your MySQL password: "
52 | read -e -s mysqlPass
53 | echo ""
54 |
55 | printf "Ignore WARNING message from MySQL: "
56 | $(mysql -u $mysqlUser -p$mysqlPass < "$SCRIPT_PATH/$DATABASE_SETUP")
57 |
58 | echo ""
59 |
60 | if [[ $COUNTER -gt "0" ]] ; then
61 | echo "Setup encountered $COUNTER error(s)"
62 | exit 1
63 | fi
64 | echo "==== Setup was successful ===="
65 | exit 0
66 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "moocRP",
3 | "private": true,
4 | "version": "0.1.5",
5 | "description": "A learning analytics platform for MOOC research.",
6 | "keywords": [
7 | "moocRP",
8 | "analytics",
9 | "edx",
10 | "research",
11 | "visualizations",
12 | "d3"
13 | ],
14 | "dependencies": {
15 | "adm-zip": "latest",
16 | "bcrypt": "latest",
17 | "cheerio": "~0.17.0",
18 | "deasync": "latest",
19 | "ejs": "~0.8.4",
20 | "fs-extra": "latest",
21 | "grunt": "0.4.2",
22 | "grunt-contrib-clean": "~0.5.0",
23 | "grunt-contrib-coffee": "~0.10.1",
24 | "grunt-contrib-concat": "~0.3.0",
25 | "grunt-contrib-copy": "~0.5.0",
26 | "grunt-contrib-cssmin": "~0.9.0",
27 | "grunt-contrib-jst": "~0.6.0",
28 | "grunt-contrib-less": "0.11.1",
29 | "grunt-contrib-uglify": "~0.4.0",
30 | "grunt-contrib-watch": "~0.5.3",
31 | "grunt-sails-linker": "~0.9.5",
32 | "grunt-sync": "~0.0.4",
33 | "include-all": "~0.1.3",
34 | "kue": "latest",
35 | "lodash": "~2.4.1",
36 | "nodemailer": "latest",
37 | "pm2": "latest",
38 | "process": "latest",
39 | "rc": "~0.5.0",
40 | "request": "latest",
41 | "rimraf": "latest",
42 | "sails": "~0.11",
43 | "sails-disk": "~0.10.0",
44 | "sails-mysql": "latest",
45 | "shelljs": "latest",
46 | "shortid": "latest",
47 | "winston": "^0.8.3"
48 | },
49 | "bin": {
50 | "setup": "./bin/setup/setup.sh"
51 | },
52 | "scripts": {
53 | "start": "node app.js",
54 | "debug": "node debug app.js",
55 | "test": "./node_modules/.bin/mocha test/bootstrap.js test/**/*.spec.js"
56 | },
57 | "main": "app.js",
58 | "repository": {
59 | "type": "git",
60 | "url": "git://github.com/kk415kk/moocRP.git"
61 | },
62 | "author": "Kevin Kao ",
63 | "license": "MIT",
64 | "devDependencies": {
65 | "grunt-cli": "latest",
66 | "grunt-contrib-watch": "^0.5.3",
67 | "grunt-mocha-test": "latest",
68 | "mocha": "^1.21.4",
69 | "sinon": "latest",
70 | "async": "latest",
71 | "barrels": "latest",
72 | "assert": "latest",
73 | "supertest": "latest",
74 | "sails-memory": "latest"
75 | }
76 | }
77 |
--------------------------------------------------------------------------------
/views/report/index.ejs:
--------------------------------------------------------------------------------
1 |
2 |
3 |
Reports
4 |
5 |
6 |
7 |
8 |
9 |
Manage Notices & Reports
10 |
11 |
12 |
13 |
14 |
15 |
16 |
Reports
17 |
All reports by users (and non-users) will appear here, as well as in the inbox of the email address registered with this moocRP instance. Currently, messages can only be responded to through external email inboxes. Integration of replies, etc. is currently being developed. - 1/16/15
--------------------------------------------------------------------------------
/edX-datascrub/src/demographics/getenrollmentdata.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | '''
3 | This program will construct a table of when the students of a course registered,
4 | and will track the self-reported gender of those students. The result is a csv
5 | file that contains the date of enrollment, the number of self-reported males,
6 | the number of self-reported females, and the number who did not report, along
7 | with the total number that registered on that day. A final entry will give
8 | totals for the three possible gender choices (M, F, no report) and the total for
9 | the course.
10 |
11 | Usage:
12 | getenrollmentdata.py file1 file2 file3
13 |
14 | where:
15 | file1 is a csv representation of the student demographic information (user_profile),
16 | file2 is a csv representation of the course enrollment date (courseenrollent)
17 | file3 is a csv file that will be written with the enrollment information by day
18 |
19 | Created on Feb 18, 2013
20 |
21 | @author: waldo
22 | '''
23 |
24 | import userprofile
25 | import csv
26 | import sys
27 | import logging
28 |
29 | if __name__ == '__main__':
30 | pass
31 |
32 | class enrollday:
33 | def __init__(self):
34 | self.m = 0
35 | self.f = 0
36 | self.n = 0
37 | self.t = 0
38 |
39 |
40 |
41 | def buildenrolldict(e, profiles):
42 | retdict = {}
43 | lineno = 0;
44 | for line in e:
45 | lineno += 1
46 | if len(line) != 4:
47 | logging.warning('bad enrollment line at ' + str(lineno))
48 | continue
49 | [rid, uid, cid, date] = line
50 | if uid not in profiles:
51 | continue
52 | day = date[:date.find(' ')]
53 | gender = profiles[uid].gender
54 | if day not in retdict:
55 | retdict[day]= enrollday()
56 | if gender == 'm':
57 | retdict[day].m += 1
58 | elif gender == 'f':
59 | retdict[day].f += 1
60 | else :
61 | retdict[day].n += 1
62 | retdict[day].t += 1
63 | return retdict
64 |
65 |
66 | csv.field_size_limit(1000000000)
67 | f = csv.reader(open(sys.argv[1], 'r'))
68 | profdict = userprofile.builddict(f)
69 | e = csv.reader(open(sys.argv[2], 'r'))
70 | enrdict = buildenrolldict(e, profdict)
71 | it = sorted(enrdict.keys())
72 | outfile = csv.writer(open(sys.argv[3], 'w'))
73 | mt = ft = nt = 0
74 | outfile.writerow(['Enroll Date', 'Male', 'Female', 'Unspecified', 'Total'])
75 | for date in it:
76 | rec = enrdict[date]
77 | outfile.writerow([date, rec.m, rec.f, rec.n, rec.t])
78 | mt += rec.m
79 | ft += rec.f
80 | nt += rec.n
81 |
82 | outfile.writerow(['', mt, ft, nt, mt+ft+nt])
83 |
84 |
--------------------------------------------------------------------------------
/api/responses/serverError.js:
--------------------------------------------------------------------------------
1 | /**
2 | * 500 (Server Error) Response
3 | *
4 | * Usage:
5 | * return res.serverError();
6 | * return res.serverError(err);
7 | * return res.serverError(err, 'some/specific/error/view');
8 | *
9 | * NOTE:
10 | * If something throws in a policy or controller, or an internal
11 | * error is encountered, Sails will call `res.serverError()`
12 | * automatically.
13 | */
14 |
15 | module.exports = function serverError (data, options) {
16 |
17 | // Get access to `req`, `res`, & `sails`
18 | var req = this.req;
19 | var res = this.res;
20 | var sails = req._sails;
21 |
22 | // Set status code
23 | res.status(500);
24 |
25 | // Log error to console
26 | if (data !== undefined) {
27 | sails.log.error('Sending 500 ("Server Error") response: \n',data);
28 | }
29 | else sails.log.error('Sending empty 500 ("Server Error") response');
30 |
31 | // Only include errors in response if application environment
32 | // is not set to 'production'. In production, we shouldn't
33 | // send back any identifying information about errors.
34 | if (sails.config.environment === 'production') {
35 | data = undefined;
36 | }
37 |
38 | // If the user-agent wants JSON, always respond with JSON
39 | if (req.wantsJSON) {
40 | return res.jsonx(data);
41 | }
42 |
43 | // If second argument is a string, we take that to mean it refers to a view.
44 | // If it was omitted, use an empty object (`{}`)
45 | options = (typeof options === 'string') ? { view: options } : options || {};
46 |
47 | // If a view was provided in options, serve it.
48 | // Otherwise try to guess an appropriate view, or if that doesn't
49 | // work, just send JSON.
50 | if (options.view) {
51 | return res.view(options.view, { data: data });
52 | }
53 |
54 | // If no second argument provided, try to serve the default view,
55 | // but fall back to sending JSON(P) if any errors occur.
56 | else return res.view('500', { data: data }, function (err, html) {
57 |
58 | // If a view error occured, fall back to JSON(P).
59 | if (err) {
60 | //
61 | // Additionally:
62 | // • If the view was missing, ignore the error but provide a verbose log.
63 | if (err.code === 'E_VIEW_FAILED') {
64 | sails.log.verbose('res.serverError() :: Could not locate view for error page (sending JSON instead). Details: ',err);
65 | }
66 | // Otherwise, if this was a more serious error, log to the console with the details.
67 | else {
68 | sails.log.warn('res.serverError() :: When attempting to render error page view, an error occured (sending JSON instead). Details: ', err);
69 | }
70 | return res.jsonx(data);
71 | }
72 |
73 | return res.send(html);
74 | });
75 |
76 | };
77 |
78 |
--------------------------------------------------------------------------------
/views/user/edit.ejs:
--------------------------------------------------------------------------------
1 |
1:
81 | os.chdir(sys.argv[1])
82 | flist = glob.glob('*.mongo')
83 | clist = reduceName(flist)
84 | ofile = open('weeklyClassList', 'w')
85 | writeList(ofile, clist)
86 |
87 |
--------------------------------------------------------------------------------
/edX-datascrub/src/logs/buildWeekLog.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | '''
3 | Combine all of the log files in a directory into a single log file
4 |
5 | This script assumes that all of the log files to be combined are in a
6 | single directory, and that the name of the directory is the name of the
7 | class. The program will combine all of the files into a single file, with
8 | the lines in timestamp order. While unlikely, it is possible that multiple
9 | lines have the same timestamp; if that happens they will be ordered in the
10 | order they are seen when read.
11 |
12 | The main use for this script is to combine all of the files for a given
13 | week (as obtained from edX) into a file for that week. However, if multiple
14 | weeks log files are in the directory, they will all be combined.
15 | '''
16 |
17 | import json
18 | import glob
19 | import csv
20 | import sys
21 | import os
22 |
23 | def buildClassList():
24 | classes = []
25 | cin = open('ClassList.csv', 'rU')
26 | cread = csv.reader(cin)
27 | for course, count in cread:
28 | if course not in classes:
29 | classes.append(course)
30 | return iter(classes)
31 |
32 | def combineLogs(className, logFiles):
33 | lineDict = {}
34 | dc = json.JSONDecoder()
35 | for fname in logFiles:
36 | inf = open(fname, 'r')
37 | lineNo = 1
38 | for line in inf:
39 | try:
40 | dcl = dc.decode(line)
41 | ts = dcl['time']
42 | if ts not in lineDict:
43 | lineDict[ts] = [line]
44 | else:
45 | lineDict[ts].append(line)
46 | lineNo += 1
47 | except ValueError:
48 | print 'JSON error at line', str(lineNo)
49 | inf.close()
50 | return lineDict
51 |
52 | def writeCombLog(fname, log):
53 | i = 0
54 | if len(log) < 1:
55 | print 'Nothing to write for log', fname
56 | return
57 | outfile = open(fname, 'a')
58 | for d in sorted(iter(log)):
59 | last_l = None
60 | for l in sorted(log[d]):
61 | if not (l == last_l):
62 | i += 1
63 | outfile.write(l)
64 | last_l = l
65 | print 'wrote', str(i), 'lines to output file', fname
66 | print '-----------------'
67 | outfile.close()
68 |
69 | if __name__ == '__main__':
70 | cl = sys.argv[1]
71 |
72 | print 'about to process logs for', cl
73 | prodLogs = []
74 | logFiles = glob.glob('prod*/' + cl + '-*')
75 | print logFiles
76 | for f in logFiles:
77 | print 'processing log', f
78 | prodLogs.append(f)
79 |
80 | prodDict = combineLogs(cl, prodLogs)
81 | writeCombLog(cl + '.log', prodDict)
82 |
83 | for f in logFiles:
84 | os.system("rm %s" % f)
85 |
86 |
87 |
--------------------------------------------------------------------------------
/api/responses/notFound.js:
--------------------------------------------------------------------------------
1 | /**
2 | * 404 (Not Found) Handler
3 | *
4 | * Usage:
5 | * return res.notFound();
6 | * return res.notFound(err);
7 | * return res.notFound(err, 'some/specific/notfound/view');
8 | *
9 | * e.g.:
10 | * ```
11 | * return res.notFound();
12 | * ```
13 | *
14 | * NOTE:
15 | * If a request doesn't match any explicit routes (i.e. `config/routes.js`)
16 | * or route blueprints (i.e. "shadow routes", Sails will call `res.notFound()`
17 | * automatically.
18 | */
19 |
20 | module.exports = function notFound (data, options) {
21 |
22 | // Get access to `req`, `res`, & `sails`
23 | var req = this.req;
24 | var res = this.res;
25 | var sails = req._sails;
26 |
27 | // Set status code
28 | res.status(404);
29 |
30 | // Log error to console
31 | if (data !== undefined) {
32 | sails.log.verbose('Sending 404 ("Not Found") response: \n',data);
33 | }
34 | else sails.log.verbose('Sending 404 ("Not Found") response');
35 |
36 | // Only include errors in response if application environment
37 | // is not set to 'production'. In production, we shouldn't
38 | // send back any identifying information about errors.
39 | if (sails.config.environment === 'production') {
40 | data = undefined;
41 | }
42 |
43 | // If the user-agent wants JSON, always respond with JSON
44 | if (req.wantsJSON) {
45 | return res.jsonx(data);
46 | }
47 |
48 | // If second argument is a string, we take that to mean it refers to a view.
49 | // If it was omitted, use an empty object (`{}`)
50 | options = (typeof options === 'string') ? { view: options } : options || {};
51 |
52 | // If a view was provided in options, serve it.
53 | // Otherwise try to guess an appropriate view, or if that doesn't
54 | // work, just send JSON.
55 | if (options.view) {
56 | return res.view(options.view, { data: data });
57 | }
58 |
59 | // If no second argument provided, try to serve the default view,
60 | // but fall back to sending JSON(P) if any errors occur.
61 | else return res.view('404', { data: data }, function (err, html) {
62 |
63 | // If a view error occured, fall back to JSON(P).
64 | if (err) {
65 | //
66 | // Additionally:
67 | // • If the view was missing, ignore the error but provide a verbose log.
68 | if (err.code === 'E_VIEW_FAILED') {
69 | sails.log.verbose('res.notFound() :: Could not locate view for error page (sending JSON instead). Details: ',err);
70 | }
71 | // Otherwise, if this was a more serious error, log to the console with the details.
72 | else {
73 | sails.log.warn('res.notFound() :: When attempting to render error page view, an error occured (sending JSON instead). Details: ', err);
74 | }
75 | return res.jsonx(data);
76 | }
77 |
78 | return res.send(html);
79 | });
80 |
81 | };
82 |
83 |
--------------------------------------------------------------------------------
/config/i18n.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Internationalization / Localization Settings
3 | * (sails.config.i18n)
4 | *
5 | * If your app will touch people from all over the world, i18n (or internationalization)
6 | * may be an important part of your international strategy.
7 | *
8 | *
9 | * For more informationom i18n in Sails, check out:
10 | * http://sailsjs.org/#/documentation/concepts/Internationalization
11 | *
12 | * For a complete list of i18n options, see:
13 | * https://github.com/mashpie/i18n-node#list-of-configuration-options
14 | *
15 | *
16 | */
17 |
18 | module.exports.i18n = {
19 |
20 | /***************************************************************************
21 | * *
22 | * Which locales are supported? *
23 | * *
24 | ***************************************************************************/
25 |
26 | // locales: ['en', 'es', 'fr', 'de']
27 |
28 | /****************************************************************************
29 | * *
30 | * What is the default locale for the site? Note that this setting will be *
31 | * overridden for any request that sends an "Accept-Language" header (i.e. *
32 | * most browsers), but it's still useful if you need to localize the *
33 | * response for requests made by non-browser clients (e.g. cURL). *
34 | * *
35 | ****************************************************************************/
36 |
37 | // defaultLocale: 'en',
38 |
39 | /****************************************************************************
40 | * *
41 | * Automatically add new keys to locale (translation) files when they are *
42 | * encountered during a request? *
43 | * *
44 | ****************************************************************************/
45 |
46 | // updateFiles: false,
47 |
48 | /****************************************************************************
49 | * *
50 | * Path (relative to app root) of directory to store locale (translation) *
51 | * files in. *
52 | * *
53 | ****************************************************************************/
54 |
55 | // localesDirectory: '/config/locales'
56 |
57 | };
58 |
--------------------------------------------------------------------------------
/edX-datascrub/src/coursestudentstate.py:
--------------------------------------------------------------------------------
1 | '''
2 | Object definition and utility funcitons for the course student state data dumps
3 |
4 | Contains a definition of a coursestudentstate object, which contains all of the
5 | information exported in the coursestudentstate files from edx. Also contains a
6 | function allowing the construction of a dictionary, indexed by the student id,
7 | that maps to this state. As usual, there is also a function that will scrub the
8 | file of entries that are the wrong size.
9 |
10 | Created on Feb 24, 2013
11 |
12 | @author: waldo
13 | '''
14 |
15 | import convertfiles.xmltocsv
16 |
17 | class coursestudentstate(object):
18 | '''
19 | A representation of the state of student work in a course
20 |
21 | This object encapsulates the work a student has done in any
22 | edX course, and the course state itself.
23 |
24 | classdocs
25 | '''
26 |
27 |
28 | def __init__(self, sid, mod_type, mod_id, student_id, state, grade, created, mod,
29 | max_grade, done, course_id):
30 | '''
31 | Constructor
32 | '''
33 | self.sid = sid
34 | self.mod_type = mod_type
35 | self.mod_id = mod_id
36 | self.student_id = student_id
37 | self.state = state
38 | self.grade = grade
39 | self.created = created
40 | self.modified = mod
41 | self.max_grade = max_grade
42 | self.done = done
43 | self.course_id = course_id
44 |
45 | def builddict(f, ptype = ''):
46 | '''
47 | Build a dictionary, indexed by the state id, of the course state
48 |
49 | This function builds a dictionary of the student state of a course.
50 | Since the state is large, this also allows building a dictionary of
51 | only one part of the state, determined by the ptype that is handed
52 | in.
53 | '''
54 | retdict = {}
55 | lineno = 0
56 | for line in f:
57 | lineno += 1
58 | if len(line) != 11:
59 | print 'bad row size at line ' + str(lineno)
60 | continue
61 | [sid, modt, modi, st_id, state, gr, cr, modif, mgr, done, c_id] = line
62 | if (ptype != '') and (modt != ptype):
63 | continue
64 | rec = coursestudentstate(sid, modt, modi, st_id, state, gr, cr, modif,
65 | mgr, done, c_id)
66 | retdict[sid] = rec
67 |
68 | return retdict
69 |
70 | def scrubcsstate(f1, f2):
71 | '''
72 | Clean up the state of a cvs file representation of the student state
73 |
74 | This function will traverse a cvs file representation of the student
75 | course state, removing any entries that do not have the right number
76 | of fields (which can happen because of bad or dirty input)
77 | '''
78 | convertfiles.xmltocsv.scrubcsv(f1, f2, 11)
79 |
--------------------------------------------------------------------------------
/edX-datascrub/src/buildCertList.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | '''
3 | Builds a dictionary of all of the certificates a user has been
4 | considered for, indexed by the user id.
5 |
6 | It is given a directory which has all of the courses. Each course
7 | directory should have at least one date dump
8 |
9 | Writes a FullCertList.json file to the given directory
10 |
11 | Created on October 11, 2013
12 |
13 | @author: lazovich
14 | '''
15 |
16 | import certificates
17 | from certificates import cert, CertEncoder
18 | import csv
19 | import sys
20 | import glob
21 | import json
22 |
23 |
24 | def processCerts(dir):
25 | '''
26 | Construct a dictionary of certificate recipients for a course,
27 | given the directory of the certificates.csv file
28 |
29 | Parameters
30 | -----------
31 | dir: A string corresponding to the directory of the certificates.csv
32 | '''
33 |
34 | try:
35 | f = open(dir+"/certificates.csv", 'r')
36 | except IOError:
37 | return None
38 |
39 | infile = csv.reader(f)
40 | certDict = certificates.builddict(infile)
41 |
42 | return certDict
43 |
44 |
45 | def mergeCertDicts(dict1, dict2):
46 | '''
47 | Take two dictionaries and merge them. Combines values with the
48 | same key as a list
49 |
50 | Parameters
51 | -----------
52 | dict1, dict2: dictionaries to be merged
53 | '''
54 |
55 | for key in dict2:
56 | if key in dict1:
57 | obj1 = dict1[key]
58 | obj2 = dict2[key]
59 | merged = None
60 |
61 | if isinstance(obj1, cert):
62 | merged = [obj1, obj2]
63 | else:
64 | obj1.append(obj2)
65 | merged = obj1
66 |
67 | dict1[key] = merged
68 | else:
69 | dict1[key] = dict2[key]
70 |
71 | return dict1
72 |
73 |
74 | def main():
75 | if len(sys.argv) != 2:
76 | print "Usage: buildCertList.py dir_name"
77 | return 1
78 |
79 | indir = sys.argv[1]
80 | dirList = glob.glob(indir+"/"+"*x*20*")
81 |
82 | allCerts = {}
83 |
84 | # Iterate over all courses
85 | for dir in dirList:
86 | fList = glob.glob(dir+"/"+"*20*")
87 |
88 | # Iterate over all dumps
89 | allCourseCerts = {}
90 |
91 | for f in fList:
92 | certDict = processCerts(f)
93 |
94 | # Overwrites cert from earlier dumps
95 | # if user is already there
96 | allCourseCerts.update(certDict)
97 |
98 | mergeCertDicts(allCerts, allCourseCerts)
99 |
100 | outfile = open(indir + "/" + "FullCertList.json", 'w')
101 | outfile.write(json.dumps(allCerts, cls=CertEncoder))
102 |
103 |
104 | if __name__ == '__main__':
105 | main()
106 |
--------------------------------------------------------------------------------
/documentation/data_distribution.md:
--------------------------------------------------------------------------------
1 | [Back to README](../README.md)
2 |
3 | Data Distribution
4 | ================
5 | This page is a work in-progress. You can contribute to it by forking this repository and making pull requests.
6 |
7 | Data distribution is the process of distributing the data dumps of various MOOCs. Essentially, for each course's data that you want to make available, the following steps should be followed (suppose the course was BerkeleyX-CS169.1.x):
8 |
9 | * Provided you have the dataset saved in a folder somewhere already, transfer it to the server that moocRP is on.
10 | * Archive the contents of the folder (the data for the course) into a .ZIP file, where the name of the archive is the name of the course, i.e. BerkeleyX-CS169.1.x.zip.
11 | * Move the archive to `moocRP_base/datasets/available/non_pii` or `moocRP_base/datasets/available/pii`, whichever is relevant.
12 |
13 | Then, the dataset will appear on the dashboard page of moocRP, available for a user to request.
14 |
15 | Analytics Data
16 | ==============
17 | To make a dataset available to be selected from the Analytics tab of moocRP, the following steps should be followed:
18 |
19 | * Using the same name as the archive for data distribution (minus the .ZIP at the end), create a folder in `moocRP_base/datasets/extracted/DATA_MODEL_NAME/` named after the archive name.
20 | * Extract the files of the dataset archive into the newly created folder. Make sure all the files are in the new folder and NOT nested inside some other folder, i.e. `moocRP_base/datasets/extracted/DATA_MODEL_NAME/ARCHIVE_NAME` should be as deep as the hierarchy of folders goes.
21 |
22 | The datasets should then appear in the Analytics tab, available for users to choose from if they've been granted access to the dataset.
23 |
24 | IMPORTANT NOTE: the name of the archive file for data distribution, the folder name for analytics data MUST be consistent and must be the name of the course offering. moocRP relies on the names of the folders and archive files to detect which datasets are referring to the same course offering.
25 |
26 | Example:
27 | ```
28 | moocRP_base
29 | - datasets
30 | - keys
31 | - moocRP
32 | - available // for data distribution
33 | - non_pii
34 | - data_model_1
35 | - BerkeleyX-CS169.1x.zip // a dataset that's archived into a ZIP file
36 | - data_model_2
37 | - BerkeleyX-CS169.1x.zip // same dataset as above, but scrubbed into a diff format
38 | - pii
39 | ...
40 | - extracted // for analytics
41 | - data_model_1
42 | - BerkeleyX-CS169.1x // folder name is the same as the archive
43 | - data_model_1_data_file1
44 | - data_model_1_data_file2
45 | - data_model_2
46 | - BerkeleyX-CS169.1x
47 | - data_model_2_data_file1
48 | - data_model_2_data_file2
49 | - data_model_2_data_file3
50 | - encrypted
51 | ...
52 | ```
--------------------------------------------------------------------------------
/views/datamodel/info.ejs:
--------------------------------------------------------------------------------
1 |
This page contains information about each data model: its files, the file schemas, and other information. Basic module development resources can also be found here. If anything is out of date, please contact us below.
63 |
64 |
--------------------------------------------------------------------------------
/edX-datascrub/src/checkData/checkUsersTimes.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | '''
3 | Compare the user files for a class over multiple weeks, and determine if any of those not
4 | active in both weeks were awarded a certificate, and the date of enrollment of those who
5 | are in one list but not the other
6 |
7 | Looks at the user, enrollment, and certificate files in multiple directories for a class,
8 | and produces a list of those users that are in one user list and not the other. For those
9 | users, the script looks into the enrollment file to determine when the student enrolled,
10 | and at the certificates file to see if the student was awarded a certificate.
11 |
12 | Currently, the class for which this is done is specified as a command-line argument. The
13 | weeks compared is hard-coded.
14 |
15 | '''
16 |
17 | import os
18 | import sys
19 | import csv
20 | import user
21 | import certificates
22 | import course_enrollment
23 |
24 | def compareUsers(d1, d2):
25 | retDict = {}
26 | for u in iter(d1):
27 | if u not in d2:
28 | retDict[u] = 'n'
29 | return retDict
30 |
31 | ck_course = sys.argv[1]
32 |
33 | dump2 = 'harvardx-2013-06-16'
34 | dump1 = 'harvardx-2013-06-02'
35 | userFile = '/' + ck_course + '/users.csv'
36 | certFile = '/' + ck_course + '/certificates.csv'
37 | enroll = '/' + ck_course + '/enrollment.csv'
38 | uf1 = csv.reader(open(dump1 + userFile, 'r'))
39 | uf2 = csv.reader(open(dump2 + userFile, 'r'))
40 | cf1 = csv.reader(open(dump1 + certFile, 'r'))
41 | cf2 = csv.reader(open(dump2 + certFile, 'r'))
42 | ef1 = csv.reader(open(dump1 + enroll, 'r'))
43 | ef2 = csv.reader(open(dump2 + enroll, 'r'))
44 |
45 | u1dict = user.builddict(uf1)
46 | u2dict = user.builddict(uf2)
47 | c1dict = certificates.builddict(cf1)
48 | c2dict = certificates.builddict(cf2)
49 | e1dict = course_enrollment.builddict(ef1)
50 | e2dict = course_enrollment.builddict(ef2)
51 |
52 | OneNotTwo = compareUsers(u1dict, u2dict)
53 | TwoNotOne = compareUsers(u2dict, u1dict)
54 |
55 | for u in iter(OneNotTwo):
56 | if u in c1dict and c1dict[u].status =='downloadable':
57 | OneNotTwo[u] = 'y'
58 |
59 | for u in iter(TwoNotOne):
60 | if u in c2dict and c2dict[u].status == 'downloadable':
61 | TwoNotOne[u] = 'y'
62 |
63 | outfile = csv.writer(open('userDiff06020616' + ck_course +'.csv', 'w'))
64 | outfile.writerow(['Users in 06/02 list but not in 06/16 list'])
65 | outfile.writerow(['User id', 'Certificate granted', 'Date enrolled'])
66 | for u in iter(OneNotTwo):
67 | if u in e1dict:
68 | signdate = e1dict[u].enroll_d
69 | else:
70 | signdate = ''
71 | outfile.writerow([u, OneNotTwo[u], signdate])
72 |
73 | outfile.writerow(['Users in 06/16 list but not in 06/02 list'])
74 | outfile.writerow(['User id', 'Certificate granted', 'Date enrolled'])
75 | for u in iter(TwoNotOne):
76 | if u in e2dict:
77 | signdate = e2dict[u].enroll_d
78 | else:
79 | signdate = ''
80 | outfile.writerow([u, TwoNotOne[u], signdate])
81 |
82 |
83 |
84 |
--------------------------------------------------------------------------------
/test/stubs.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Stubs for use in tests
3 | * A GPG key must be generated for
4 | */
5 | var stubs = {};
6 |
7 | /*************************
8 | ** DATA MODEL STUBS **
9 | *************************/
10 | stubs.dataModelStub = function postStub() {
11 | return {
12 | displayName: 'Test Data Model',
13 | fileSafeName: 'test_data_model'
14 | }
15 | }
16 |
17 | stubs.dataModelStub2 = function postStub() {
18 | return {
19 | displayName: 'Test Data Model 2',
20 | fileSafeName: 'test_data_model_2'
21 | }
22 | }
23 |
24 | /**************************
25 | ** USER STUBS **
26 | **************************/
27 | stubs.userStub = function createUser() {
28 | return {
29 | id: '111111',
30 | firstName: 'John',
31 | lastName: 'Johnson',
32 | email: 'john.johnson@berkeley.edu',
33 | publicKey: testGPGKey,
34 | publicKeyID: '13779E47'
35 | }
36 | }
37 |
38 | stubs.userStub2 = function createUser() {
39 | return {
40 | id: '222222',
41 | firstName: 'Kevin',
42 | lastName: 'Chan',
43 | email: 'kevin.chan@berkeley.edu',
44 | publicKey: testGPGKey,
45 | publicKeyID: '13779E47'
46 | }
47 | }
48 |
49 | var testGPGKey = "-----BEGIN PGP PUBLIC KEY BLOCK-----" +
50 | "Version: GnuPG v1" +
51 | "" +
52 | "mQENBFSdIf4BCADATizHrS35lAsfqgI2PkvQoTUZSq+T0eSjmDjw0hrSChC+UpzW" +
53 | "l8Vqr/hVz3nhT9yVP+pyQhaWXDlGERmWej7rYW0JickunYvhM8D0RLRe7eXAYHQG" +
54 | "lWZimdOs9jarcnfh1cB94wgT8bgcEvgwmnHDpKwF/iVL7XTpUbwjGuQ5uNYqwM1w" +
55 | "nFpgr9tF9KRqqUJOpdzrr9LeIn+FtbP6l/WrYBn4+mmC203JXaV+7B0HQjHNIKUT" +
56 | "k4jJSrrMEmb2IkRRur5wA3f7QTDtb5XBkEpCrL8CA74LE8WXopK1fQpb82OmnqUV" +
57 | "OSrUdpzqRZk6UgBga5JCjkTVeFirtpJdxkm1ABEBAAG0OkpvaG4gSm9obnNvbiAo" +
58 | "bW9vY1JQIHRlc3Qga2V5KSA8am9obi5qb2huc29uQGJlcmtlbGV5LmVkdT6JATgE" +
59 | "EwECACIFAlSdIf4CGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJECGa274T" +
60 | "d55HqtUIAJQkxNYs7OKBPRYxVBURD1A9XSKAJBoJPSfEo95Gh8kCZRTK0yvoFVT4" +
61 | "vZBclFLIsg4mJ69sYboXRha4onpdOQkriCP4nG+kIgpcKO6fBYthvCsAUpR0Rz22" +
62 | "+BOBZFATYcytbSaKqhDg80F++MbDUd8ImiCbWtIfjfWRydn+YXatJ36zkX3wiJsh" +
63 | "wKOsvlcxwlc5V4MfPM2P7mRgYr+ger+FutBldPSdfsH+yJuAlQC3D2rw4q2SekS/" +
64 | "ud4baPnTD2+q6xNNQcP69/zUkz2q8Y0pmYqjDis9Cr3pWOJUgZ3vcUF3adRynMLC" +
65 | "8kGHwfHZDhfTnbfNuDXbM2/JTMTIYse5AQ0EVJ0h/gEIAJ5DfkkkG7kGry77A0Y0" +
66 | "lKsm0FMeshhdH7FHfpTdtRhPLamZVtUxABETbtTiaU7n4H4PxWmZ7CZriZpoZcf8" +
67 | "2+58JVhx2aNmJYjizdoY28XyOT+E+JhQgU7uxwUOaeGK0l3JU9fr0ynBCXY/Zv/M" +
68 | "/jsx6qFueYPVHZzHsdD1hZ9Sp3OdCKIlptbWq6fwnN0ZvehvHuzpNM6ybH9d8+LT" +
69 | "usZYQ49a2YUqHvsAKtHiEPOIXjj5D13r1sMXDnzowBsgRL3ARqYLphGUe6xwYpDa" +
70 | "qy5Z4Rj7LSjTd/koYL0gFLSYPFIBWxneWMsK45OYCo2/t6aXrjKYtbghgzV7hA85" +
71 | "MlUAEQEAAYkBHwQYAQIACQUCVJ0h/gIbDAAKCRAhmtu+E3eeR1JpB/wOKl/rwVLn" +
72 | "PFFp8ikrNZQaHG1Ycch9mX0UXjoaKlpta7W67dP2MBZmhMDK6fu9qHHpXeS7MwWp" +
73 | "jshZ3c30dOSNbeP/FkWOfg7vjHeix3KoeoaQoBmVO0WreWjm8JTsCtN7dOQQG1Ui" +
74 | "nbd44hBdQrnwudMl7kGV/kDYdQql5Luqtqlnzenthc+eR67DLTD7Q/K0HtZduvz3" +
75 | "h1BN1GSrkentod23Jp8+H0HXtEO3ddzl9qMWAlOjVJneSW5fCZMPlpsr8LHZf3mV" +
76 | "zp9YQwspPuwiPooKLKm775J6s+BrFlsszXo5DF8XDmzTMSl9SSqxQB1O93YUJzHr" +
77 | "Ai/xmb5/2vO0" +
78 | "=r0uC" +
79 | "-----END PGP PUBLIC KEY BLOCK-----";
80 |
81 | module.exports = stubs;
--------------------------------------------------------------------------------
/config/csrf.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Cross-Site Request Forgery Protection Settings
3 | * (sails.config.csrf)
4 | *
5 | * CSRF tokens are like a tracking chip. While a session tells the server that a user
6 | * "is who they say they are", a csrf token tells the server "you are where you say you are".
7 | *
8 | * When enabled, all non-GET requests to the Sails server must be accompanied by
9 | * a special token, identified as the '_csrf' parameter.
10 | *
11 | * This option protects your Sails app against cross-site request forgery (or CSRF) attacks.
12 | * A would-be attacker needs not only a user's session cookie, but also this timestamped,
13 | * secret CSRF token, which is refreshed/granted when the user visits a URL on your app's domain.
14 | *
15 | * This allows us to have certainty that our users' requests haven't been hijacked,
16 | * and that the requests they're making are intentional and legitimate.
17 | *
18 | * This token has a short-lived expiration timeline, and must be acquired by either:
19 | *
20 | * (a) For traditional view-driven web apps:
21 | * Fetching it from one of your views, where it may be accessed as
22 | * a local variable, e.g.:
23 | *
26 | *
27 | * or (b) For AJAX/Socket-heavy and/or single-page apps:
28 | * Sending a GET request to the `/csrfToken` route, where it will be returned
29 | * as JSON, e.g.:
30 | * { _csrf: 'ajg4JD(JGdajhLJALHDa' }
31 | *
32 | *
33 | * Enabling this option requires managing the token in your front-end app.
34 | * For traditional web apps, it's as easy as passing the data from a view into a form action.
35 | * In AJAX/Socket-heavy apps, just send a GET request to the /csrfToken route to get a valid token.
36 | *
37 | * For more information on CSRF, check out:
38 | * http://en.wikipedia.org/wiki/Cross-site_request_forgery
39 | *
40 | * For more information on this configuration file, including info on CSRF + CORS, see:
41 | * http://beta.sailsjs.org/#/documentation/reference/sails.config/sails.config.csrf.html
42 | *
43 | */
44 |
45 | /****************************************************************************
46 | * *
47 | * Enabled CSRF protection for your site? *
48 | * *
49 | ****************************************************************************/
50 |
51 | module.exports.csrf = true;
52 |
53 | /****************************************************************************
54 | * *
55 | * You may also specify more fine-grained settings for CSRF, including the *
56 | * domains which are allowed to request the CSRF token via AJAX. These *
57 | * settings override the general CORS settings in your config/cors.js file. *
58 | * *
59 | ****************************************************************************/
60 |
61 | // module.exports.csrf = {
62 | // grantTokenViaAjax: true,
63 | // origin: ''
64 | // }
65 |
--------------------------------------------------------------------------------
/edX-datascrub/src/buildAllStudents.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 |
3 | '''
4 | This is a file for building a complete list of users who have taken a course
5 | and what they've taken. As such, it's going to be saved as a JSON file.
6 |
7 | This is meant to be run from within the repository. Clone the repo into the
8 | /Harvard directory on the shared space, then cd to this local directory and
9 | execute the script. As you can see below, some assumptions are made about the
10 | relative location of things, so it's important to either do as I have outlined
11 | or update the logic below.
12 |
13 | @author EJ Bensing
14 | '''
15 |
16 | import glob
17 | import buildCompRoster as bcr
18 | import csv
19 | import json
20 |
21 | # yah, this isn't the most hacky thing ever... like this is really bad
22 | dirsToExclude = ['Logs', 'LOGS-FROM-2012', 'Unknown', 'idtolocation.csv',
23 | 'rosters.tar', 'Course_Axes', 'ebensing-scripts', 'HarvardX-Tools',
24 | 'globalis2name.csv', 'globalname2id.csv']
25 |
26 | # relative path to the course data top level folder
27 | relPath='../../../../'
28 |
29 | # location of the id=>location file, from the relPath directory
30 | idLocFile='idtolocation.csv'
31 |
32 | def main(saveName='StudentCourses.json'):
33 | # since we're changing the value of the global variable, we need to declare
34 | # it. I guess python != javascript
35 | global idLocFile
36 | global dirsToExclude
37 | dirsToExclude.append(saveName)
38 | rdirsToExclude = [relPath + x for x in dirsToExclude]
39 | idLocFile = relPath + idLocFile
40 | # get the dirs we're going to search
41 | courseDirs = [x + '/' for x in glob.glob(relPath + '*') if x not in rdirsToExclude]
42 |
43 | # build all of the class roster CSV files
44 | for courseDir in courseDirs:
45 | print "Processing " + courseDir
46 | bcr.main(idLocFile, courseDir)
47 |
48 | rosterFiles = [x + 'FullRoster.csv' for x in courseDirs]
49 | fullUsers = {}
50 | # iterate over all of the full class rosters and create the new dict that
51 | # we'll write out
52 | for rfile in rosterFiles:
53 | with open(rfile, 'r') as csvfile:
54 | reader = csv.DictReader(csvfile)
55 | for row in reader:
56 |
57 | id = row['Student ID']
58 | if id not in fullUsers:
59 | fullUsers[id] = {}
60 | fullUsers[id]["courses"] = []
61 | for key,val in row.iteritems():
62 | # grab all the demographic information except the
63 | # student ID since that is the key to the dictionary
64 | # anyway
65 | if key != 'Student ID':
66 | fullUsers[id][key] = val
67 |
68 | # hey, what's the worst way we could get the course name? I
69 | # think this is coming close...
70 | fullUsers[id]["courses"].append(rfile.replace(relPath,
71 | "").replace("FullRoster.csv","").replace("/",""))
72 |
73 | with open(relPath + saveName, 'w') as wfile:
74 | wfile.write(json.dumps(fullUsers))
75 |
76 |
77 |
78 |
79 |
80 | if __name__ == '__main__':
81 | main()
82 |
--------------------------------------------------------------------------------
/tasks/README.md:
--------------------------------------------------------------------------------
1 | # About the `tasks` folder
2 |
3 | The `tasks` directory is a suite of Grunt tasks and their configurations, bundled for your convenience. The Grunt integration is mainly useful for bundling front-end assets, (like stylesheets, scripts, & markup templates) but it can also be used to run all kinds of development tasks, from browserify compilation to database migrations.
4 |
5 | If you haven't used [Grunt](http://gruntjs.com/) before, be sure to check out the [Getting Started](http://gruntjs.com/getting-started) guide, as it explains how to create a [Gruntfile](http://gruntjs.com/sample-gruntfile) as well as install and use Grunt plugins. Once you're familiar with that process, read on!
6 |
7 |
8 | ### How does this work?
9 |
10 | The asset pipeline bundled in Sails is a set of Grunt tasks configured with conventional defaults designed to make your project more consistent and productive.
11 |
12 | The entire front-end asset workflow in Sails is completely customizable-- while it provides some suggestions out of the box, Sails makes no pretense that it can anticipate all of the needs you'll encounter building the browser-based/front-end portion of your application. Who's to say you're even building an app for a browser?
13 |
14 |
15 |
16 | ### What tasks does Sails run automatically?
17 |
18 | Sails runs some of these tasks (the ones in the `tasks/register` folder) automatically when you run certain commands.
19 |
20 | ###### `sails lift`
21 |
22 | Runs the `default` task (`tasks/register/default.js`).
23 |
24 | ###### `sails lift --prod`
25 |
26 | Runs the `prod` task (`tasks/register/prod.js`).
27 |
28 | ###### `sails www`
29 |
30 | Runs the `build` task (`tasks/register/build.js`).
31 |
32 | ###### `sails www --prod` (production)
33 |
34 | Runs the `buildProd` task (`tasks/register/buildProd.js`).
35 |
36 |
37 | ### Can I customize this for SASS, Angular, client-side Jade templates, etc?
38 |
39 | You can modify, omit, or replace any of these Grunt tasks to fit your requirements. You can also add your own Grunt tasks- just add a `someTask.js` file in the `grunt/config` directory to configure the new task, then register it with the appropriate parent task(s) (see files in `grunt/register/*.js`).
40 |
41 |
42 | ### Do I have to use Grunt?
43 |
44 | Nope! To disable Grunt integration in Sails, just delete your Gruntfile or disable the Grunt hook.
45 |
46 |
47 | ### What if I'm not building a web frontend?
48 |
49 | That's ok! A core tenant of Sails is client-agnosticism-- it's especially designed for building APIs used by all sorts of clients; native Android/iOS/Cordova, serverside SDKs, etc.
50 |
51 | You can completely disable Grunt by following the instructions above.
52 |
53 | If you still want to use Grunt for other purposes, but don't want any of the default web front-end stuff, just delete your project's `assets` folder and remove the front-end oriented tasks from the `grunt/register` and `grunt/config` folders. You can also run `sails new myCoolApi --no-frontend` to omit the `assets` folder and front-end-oriented Grunt tasks for future projects. You can also replace your `sails-generate-frontend` module with alternative community generators, or create your own. This allows `sails new` to create the boilerplate for native iOS apps, Android apps, Cordova apps, SteroidsJS apps, etc.
54 |
55 |
--------------------------------------------------------------------------------
/edX-datascrub/src/convertfiles/killListedFiles.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | '''
3 | A script made necessary by the inability to distinguish between experimental
4 | or test courses and the real thing on the edX platform.
5 |
6 | This script will get rid of a lot of files that are generated by the data
7 | extraction programs used by edX. The strings that are used to match files
8 | that can be removed are obtained by experience, and have to be updated
9 | every now and then as new experiments are tried.
10 |
11 | It might be better to use the course list generated by the log processing
12 | scripts as the set of classes to keep, and then delete any file associated with
13 | a class that hasn't had any log data generated for it during the week. But for
14 | the moment this is the technique being used.
15 |
16 | @author: waldo
17 | '''
18 |
19 | import os
20 | import glob
21 |
22 | killList = ['00001',
23 | '001-Intro_to_Learning_Management_Eco_System',
24 | '007Horsey',
25 | '100',
26 | '101-My',
27 | '101-Sept',
28 | '12345',
29 | '1795',
30 | '50000',
31 | '50-Take_2_on_edX',
32 | 'AIU12x',
33 | 'AV101',
34 | 'Biblical_Literacy',
35 | 'CB22x*edge',
36 | 'colin',
37 | 'Colin',
38 | 'Demo',
39 | 'Dogs',
40 | 'DOGS',
41 | 'Dummy',
42 | 'Edelman102',
43 | 'FAS2.1x',
44 | 'Fly_Fishing_',
45 | 'Gov2001',
46 | 'GSE102x',
47 | 'HarvardX-101',
48 | 'HeroesX-HeroesX',
49 | 'HKS-211',
50 | 'HKS211.1x-Central',
51 | 'HLS1x*edge',
52 | 'HS121x-Fall*edge',
53 | 'HSD1544.1x-3T',
54 | 'HX101',
55 | 'ITCx-ITCx',
56 | 'JandR',
57 | 'JS101',
58 | 'JS50',
59 | 'KMH1-Kuriyama_Prototype',
60 | 'Law-LRW_2',
61 | 'SLW1-Legal_Research_for_Non-Lawyers',
62 | 'MCB63',
63 | 'Math101',
64 | 'Mockup',
65 | 'NA_001',
66 | 'NA001',
67 | 'PC10',
68 | 'PH207x-*edge',
69 | 'PH207x-Health',
70 | 'QUANTUM',
71 | 'SAI-HGHI',
72 | 'SHAKE1x',
73 | 'sheep',
74 | 'Sheep',
75 | 'Slow_Cooking_Basics',
76 | 'SP001',
77 | 'SPU17x-3T2013',
78 | 'SPU27X',
79 | 'SW-12X',
80 | 'SW12-ChinaX',
81 | 'SW12x-2013_Oct',
82 | 'SW12.4X',
83 | 'T5532',
84 | 'TBD',
85 | 'test',
86 | 'Test',
87 | 'TR101',
88 | 'Tropicana',
89 | 'TT01x',
90 | 'UH001',
91 | 'WW-TFU1',
92 | 'wiki',
93 | 'WLX',
94 | 'WP1',
95 | 'xxx'
96 | ]
97 |
98 | def killFiles(fileList):
99 | for f in fileList:
100 | os.remove(f)
101 |
102 | if __name__ == '__main__':
103 | for k in killList:
104 | l = glob.glob('*'+k+'*')
105 | killFiles(l)
106 |
--------------------------------------------------------------------------------
/edX-datascrub/src/logs/checkDates.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | ''' Check that
4 | 1) start date is '-' if that course has already been processed.
5 | 2) end date is after the end date of the previous run.
6 |
7 | This script is useful if processSmallLogData.sh is executed manually.
8 |
9 | @auther: mangpo
10 | '''
11 |
12 |
13 | import os, sys
14 |
15 | def get_class_list():
16 | if os.path.isfile('ClassList.csv'):
17 | f = open('ClassList.csv','r')
18 | courses = {}
19 | for line in f:
20 | tokens = line.split(',')
21 | if len(tokens) == 3:
22 | (course,start,end) = tokens
23 | courses[course] = (start,end)
24 | f.close()
25 | return courses
26 | else:
27 | return {}
28 |
29 | def write_class_list(courses):
30 | f = open('NewClassList.csv','w')
31 | for course in courses:
32 | (start,end) = courses[course]
33 | f.write(course + "," + start + "," + end + "\n")
34 | f.close()
35 |
36 | def same_or_before(start,date):
37 | if start[0] > date[0]:
38 | return False
39 | elif start[0] < date[0]:
40 | return True
41 | elif start[1] > date[1]:
42 | return False
43 | elif start[1] < date[1]:
44 | return True
45 | else:
46 | return start[2] <= date[2]
47 |
48 | if __name__ == '__main__':
49 | names = sys.argv[1].split(',')
50 | startDate = sys.argv[2]
51 | endDate = sys.argv[3]
52 |
53 | if endDate == "-":
54 | print "Abort:", cl, "illegal end date."
55 | exit(1)
56 |
57 | class_list = get_class_list()
58 | dates = {}
59 | newStartDate = startDate
60 | newEndDate = endDate
61 | for cl in names:
62 | if cl in class_list:
63 | (oldStartDate,oldEndDate) = class_list[cl]
64 | if oldEndDate >= endDate:
65 | print "Abort:", cl, "logs have already been processed until", oldEndDate
66 | exit(1)
67 | elif not(startDate == "-"):
68 | print "Abort:", cl,"logs have already been processed."
69 | print "Use \'-\' for \'start date\' to continue processing from the latest date."
70 | exit(1)
71 |
72 | class_list[cl] = (oldStartDate, endDate)
73 | (yy,mm,dd) = oldEndDate.split('-')
74 | newStartDate = "%s-%s-%02d" % (yy,mm,int(dd)+1)
75 | dates[cl] = (newStartDate, newEndDate)
76 | else:
77 | if startDate == "-":
78 | print "Abort:", cl, "logs have NOT been processed before. Start date cannot be \'-\'"
79 | exit(1)
80 |
81 | class_list[cl] = (startDate, endDate)
82 | dates[cl] = (startDate,endDate)
83 |
84 | for cl in dates:
85 | (start,end) = dates[cl]
86 | if not(start == newStartDate):
87 | print "Cannot process this group of courses together because of the dates."
88 | print dates
89 | exit(1)
90 |
91 | print class_list
92 | write_class_list(class_list)
93 | f = open("dates.txt","w")
94 | f.write(newStartDate + "\n")
95 | f.write(newEndDate + "\n")
96 | f.close()
97 |
--------------------------------------------------------------------------------
/documentation/installation.md:
--------------------------------------------------------------------------------
1 | [Back to README](../README.md)
2 |
3 | Installation
4 | ================
5 |
6 | ## Dependencies
7 |
8 | **Ubuntu Instructions**
9 |
10 | * Install ````git````: ````sudo apt-get install git````
11 | * Install Node.js ~0.10.25, minimum 0.10.x:
12 |
13 | ````
14 | sudo apt-get install python-software-properties
15 | sudo apt-add-repository ppa:chris-lea/node.js
16 | sudo apt-get update
17 | sudo apt-get install nodejs-legacy
18 | ````
19 |
20 | * Install npm (Node.js package manager) ~1.3.10:
21 |
22 | Use ````aptitude```` to install npm and downgrade Node.js through the prompt if conflicts occur.
23 | ````
24 | sudo apt-get install aptitude
25 | sudo aptitude install npm
26 | ````
27 |
28 | * Install MySQL server: ````sudo apt-get install mysql-server-5.6````
29 | * Install Redis (latest installation instructions [here](http://redis.io/topics/quickstart)):
30 |
31 | ```
32 | wget http://download.redis.io/redis-stable.tar.gz
33 | tar xvzf redis-stable.tar.gz
34 | cd redis-stable
35 | make
36 | make test
37 | sudo make install
38 | ```
39 |
40 | * Install Sails.js ~0.10.5, minimum 0.10.x: ````sudo npm install -g sails````
41 |
42 | ## Setup Instructions
43 | Make sure MySQL and Redis are running before launching moocRP.
44 |
45 | First, create a new folder called moocRP_base to clone this repository to:
46 | ````
47 | mkdir moocRP_base
48 | cd moocRP_base
49 | git clone http://github.com:kk415kk/moocRP.git
50 | ````
51 |
52 | After cloning this repository, run the setup script to create the correct directory structure. Enter in the correct MySQL user and password when prompted. This will create the database as well.
53 | ````
54 | ./bin/setup/setup.sh
55 | ````
56 |
57 | Once the setup script is run, the file structure setup should be in this format:
58 | ````
59 | /moocRP_base
60 | ---- /moocRP (web application cloned from Github)
61 | -------- /api
62 | -------- /assets
63 | ------------ /scaffolds
64 | ------------ /analytics
65 | -------- ...
66 | -------- /logs
67 | -------- /bin
68 | ------------ setup.sh [setup script to create directory structure]
69 | ---- /datasets
70 | -------- /available
71 | ---------- /non_pii
72 | ---------- /pii
73 | ---------- /data_drop
74 | -------- /extracted
75 | -------- /encrypted
76 | ---- /analytics
77 | -------- /tmp
78 | -------- /archives
79 | ````
80 |
81 | Then, we need to install all npm package dependencies before launch:
82 | ````
83 | cd moocRP_base/moocRP
84 | sudo npm install
85 | ````
86 |
87 | There is also a bug where Grunt is not installed properly. To fix this:
88 | ````
89 | cd moocRP_base/moocRP/node_modules/sails
90 | sudo npm install grunt-cli
91 | ````
92 |
93 | Configuration
94 | ================
95 | See the [configuration documentation](configuration.md) to configure moocRP before launch. Most importantly, note that `local.js` must be created before launching.
96 |
97 |
98 | Launching moocRP
99 | ================
100 | To launch the application, first launch the Redis server:
101 | ````
102 | redis-server&
103 | ````
104 |
105 | Then, launch moocRP in a new command window:
106 | ````
107 | cd moocRP_base/moocRP
108 | sails lift
109 | ````
110 |
111 | Note that if you configure moocRP to use SSL, you will need to run moocRP as admin:
112 | ````
113 | sudo sails lift
114 | ````
115 |
--------------------------------------------------------------------------------