└── dashboard-ui
├── README.md
├── package-lock.json
├── package.json
├── public
├── favicon.ico
├── favicon2.ico
├── index.html
└── manifest.json
├── s3cmd-2.0.1.tar.gz
├── s3cmd-2.0.1
├── INSTALL
├── LICENSE
├── MANIFEST.in
├── NEWS
├── PKG-INFO
├── README.md
├── S3
│ ├── ACL.py
│ ├── AccessLog.py
│ ├── BidirMap.py
│ ├── CloudFront.py
│ ├── Config.py
│ ├── ConnMan.py
│ ├── Crypto.py
│ ├── Custom_httplib27.py
│ ├── Custom_httplib3x.py
│ ├── Exceptions.py
│ ├── ExitCodes.py
│ ├── FileDict.py
│ ├── FileLists.py
│ ├── HashCache.py
│ ├── MultiPart.py
│ ├── PkgInfo.py
│ ├── Progress.py
│ ├── S3.py
│ ├── S3Uri.py
│ ├── SortedDict.py
│ ├── Utils.py
│ ├── __init__.py
│ └── __pycache__
│ │ ├── PkgInfo.cpython-36.pyc
│ │ └── __init__.cpython-36.pyc
├── build
│ ├── lib
│ │ └── S3
│ │ │ ├── ACL.py
│ │ │ ├── AccessLog.py
│ │ │ ├── BidirMap.py
│ │ │ ├── CloudFront.py
│ │ │ ├── Config.py
│ │ │ ├── ConnMan.py
│ │ │ ├── Crypto.py
│ │ │ ├── Custom_httplib27.py
│ │ │ ├── Custom_httplib3x.py
│ │ │ ├── Exceptions.py
│ │ │ ├── ExitCodes.py
│ │ │ ├── FileDict.py
│ │ │ ├── FileLists.py
│ │ │ ├── HashCache.py
│ │ │ ├── MultiPart.py
│ │ │ ├── PkgInfo.py
│ │ │ ├── Progress.py
│ │ │ ├── S3.py
│ │ │ ├── S3Uri.py
│ │ │ ├── SortedDict.py
│ │ │ ├── Utils.py
│ │ │ └── __init__.py
│ └── scripts-3.6
│ │ └── s3cmd
├── dist
│ └── s3cmd-2.0.1-py3.6.egg
├── s3cmd
├── s3cmd.1
├── s3cmd.egg-info
│ ├── PKG-INFO
│ ├── SOURCES.txt
│ ├── dependency_links.txt
│ ├── requires.txt
│ └── top_level.txt
├── setup.cfg
└── setup.py
└── src
├── actions
├── AuthActions.js
├── CoordinatorActions.js
├── DashboardActions.js
├── RepoDataActions.js
├── RepoLogsActions.js
└── initializeActions.js
├── components
├── app.js
├── common
│ ├── app.css
│ ├── header.css
│ ├── header.js
│ ├── original-logo.svg
│ ├── white-logo.png
│ └── white-logo2.png
├── dashboard.js
├── dashboard
│ └── repoList.js
├── homePage.js
├── newRepo.js
├── notFoundPage.js
├── repo.js
├── repo
│ ├── repoLogs.js
│ ├── repoModels.js
│ └── repoStatus.js
├── signin.js
├── signin
│ ├── common.css
│ ├── loginForm.js
│ └── registrationForm.js
├── signout.js
└── signup.js
├── constants
├── actionTypes.js
└── endpoints.js
├── dispatcher
└── appDispatcher.js
├── index.js
├── routes.js
├── stores
├── AuthStore.js
├── CoordinatorStore.js
├── DashboardStore.js
├── RepoDataStore.js
└── RepoLogsStore.js
└── utils
├── AuthRoute.js
├── PrivateRoute.js
└── registerServiceWorker.js
/dashboard-ui/README.md:
--------------------------------------------------------------------------------
1 | # Dashboard UI
2 |
3 | UI for creating and managing DataAgora repos.
4 |
5 | ## Dependencies
6 |
7 | Before anything, install the npm dependencies:
8 |
9 | ```
10 | npm install
11 | ```
12 |
13 | ## Running locally
14 |
15 | To run locally, just run
16 |
17 | ```
18 | npm run build
19 | ```
20 |
21 | then
22 |
23 | ```
24 | npm run start
25 | ```
26 |
27 | ## Deploying
28 |
29 | First do
30 |
31 | ```
32 | pip install s3cmd
33 | ```
34 |
35 | and make sure AWS is set up on your local machine.
36 |
37 | Then run
38 |
39 | ```
40 | npm run build-and-deploy
41 | ```
42 |
43 | Read `package.json` for available commands.
44 |
--------------------------------------------------------------------------------
/dashboard-ui/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "beta-enterprise",
3 | "version": "0.1.0",
4 | "private": true,
5 | "dependencies": {
6 | "@fortawesome/fontawesome-svg-core": "^1.2.17",
7 | "@fortawesome/free-regular-svg-icons": "^5.8.1",
8 | "@fortawesome/free-solid-svg-icons": "^5.8.1",
9 | "@fortawesome/react-fontawesome": "^0.1.4",
10 | "axios": "^0.18.0",
11 | "bootstrap": "^4.3.1",
12 | "flux": "^3.1.3",
13 | "i": "^0.3.6",
14 | "jquery": "^3.4.1",
15 | "keymirror": "^0.1.1",
16 | "npm": "^6.11.3",
17 | "popper.js": "^1.14.0",
18 | "react": "^16.2.0",
19 | "react-bootstrap": "^0.32.4",
20 | "react-cookies": "^0.1.0",
21 | "react-dom": "^16.2.0",
22 | "react-mixin-decorator": "^1.2.3",
23 | "react-router": "^4.2.0",
24 | "react-router-dom": "^4.2.2",
25 | "react-scripts": "^3.1.2",
26 | "reflux": "^6.4.1"
27 | },
28 | "scripts": {
29 | "start": "react-scripts start",
30 | "build": "react-scripts build",
31 | "test": "react-scripts test --env=jsdom",
32 | "eject": "react-scripts eject",
33 | "deploy": "date > build/v.txt && s3cmd sync build/* s3://beta.dataagora.com && echo '🚀 Deployed!'",
34 | "build-and-deploy": "npm run build && npm run deploy"
35 | },
36 | "browserslist": {
37 | "production": [
38 | ">0.2%",
39 | "not dead",
40 | "not op_mini all"
41 | ],
42 | "development": [
43 | "last 1 chrome version",
44 | "last 1 firefox version",
45 | "last 1 safari version"
46 | ]
47 | }
48 | }
49 |
--------------------------------------------------------------------------------
/dashboard-ui/public/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DiscreetAI/dashboard-ui/371291831f6757ab06e8b45f9ec9260982560679/dashboard-ui/public/favicon.ico
--------------------------------------------------------------------------------
/dashboard-ui/public/favicon2.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DiscreetAI/dashboard-ui/371291831f6757ab06e8b45f9ec9260982560679/dashboard-ui/public/favicon2.ico
--------------------------------------------------------------------------------
/dashboard-ui/public/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
11 |
12 |
13 |
22 | DataAgora BETA
23 |
24 |
25 |
28 |
29 |
39 |
40 |
41 |
--------------------------------------------------------------------------------
/dashboard-ui/public/manifest.json:
--------------------------------------------------------------------------------
1 | {
2 | "short_name": "React App",
3 | "name": "Create React App Sample",
4 | "icons": [
5 | {
6 | "src": "favicon.ico",
7 | "sizes": "64x64 32x32 24x24 16x16",
8 | "type": "image/x-icon"
9 | }
10 | ],
11 | "start_url": "./index.html",
12 | "display": "standalone",
13 | "theme_color": "#000000",
14 | "background_color": "#ffffff"
15 | }
16 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1.tar.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DiscreetAI/dashboard-ui/371291831f6757ab06e8b45f9ec9260982560679/dashboard-ui/s3cmd-2.0.1.tar.gz
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/INSTALL:
--------------------------------------------------------------------------------
1 | Installation of s3cmd package
2 | =============================
3 |
4 | Copyright:
5 | TGRMN Software and contributors
6 |
7 | S3tools / S3cmd project homepage:
8 | http://s3tools.org
9 |
10 | !!!
11 | !!! Please consult README file for setup, usage and examples!
12 | !!!
13 |
14 | Package formats
15 | ---------------
16 | S3cmd is distributed in two formats:
17 |
18 | 1) Prebuilt RPM file - should work on most RPM-based
19 | distributions
20 |
21 | 2) Source .tar.gz package
22 |
23 |
24 | Installation of RPM package
25 | ---------------------------
26 | As user "root" run:
27 |
28 | rpm -ivh s3cmd-X.Y.Z.noarch.rpm
29 |
30 | where X.Y.Z is the most recent s3cmd release version.
31 |
32 | You may be informed about missing dependencies
33 | on Python or some libraries. Please consult your
34 | distribution documentation on ways to solve the problem.
35 |
36 | Installation from PyPA (Python Package Authority)
37 | ---------------------
38 | S3cmd can be installed from the PyPA using PIP (the recommended tool for PyPA).
39 |
40 | 1) Confirm you have PIP installed. PIP home page is here: https://pypi.python.org/pypi/pip
41 | Example install on a RHEL yum based machine
42 | sudo yum install python-pip
43 | 2) Install with pip
44 | sudo pip install s3cmd
45 |
46 | Installation from zip file
47 | --------------------------
48 | There are three options to run s3cmd from source tarball:
49 |
50 | 1) The S3cmd program, as distributed in s3cmd-X.Y.Z.tar.gz
51 | on SourceForge or in master.zip on GitHub, can be run directly
52 | from where you unzipped the package.
53 |
54 | 2) Or you may want to move "s3cmd" file and "S3" subdirectory
55 | to some other path. Make sure that "S3" subdirectory ends up
56 | in the same place where you move the "s3cmd" file.
57 |
58 | For instance if you decide to move s3cmd to you $HOME/bin
59 | you will have $HOME/bin/s3cmd file and $HOME/bin/S3 directory
60 | with a number of support files.
61 |
62 | 3) The cleanest and most recommended approach is to unzip the
63 | package and then just run:
64 |
65 | python setup.py install
66 |
67 | You will however need Python "distutils" module for this to
68 | work. It is often part of the core python package (e.g. in
69 | OpenSuse Python 2.5 package) or it can be installed using your
70 | package manager, e.g. in Debian use
71 |
72 | apt-get install python-setuptools
73 |
74 | Again, consult your distribution documentation on how to
75 | find out the actual package name and how to install it then.
76 |
77 | Note that on Linux, if you are not "root" already, you may
78 | need to run:
79 |
80 | sudo python setup.py install
81 |
82 | instead.
83 |
84 |
85 | Note to distributions package maintainers
86 | ----------------------------------------
87 | Define shell environment variable S3CMD_PACKAGING=yes if you
88 | don't want setup.py to install manpages and doc files. You'll
89 | have to install them manually in your .spec or similar package
90 | build scripts.
91 |
92 | On the other hand if you want setup.py to install manpages
93 | and docs, but to other than default path, define env
94 | variables $S3CMD_INSTPATH_MAN and $S3CMD_INSTPATH_DOC. Check
95 | out setup.py for details and default values.
96 |
97 |
98 | Where to get help
99 | -----------------
100 | If in doubt, or if something doesn't work as expected,
101 | get back to us via mailing list:
102 |
103 | s3tools-general@lists.sourceforge.net
104 |
105 | or visit the S3cmd / S3tools homepage at:
106 |
107 | http://s3tools.org
108 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include INSTALL README.md LICENSE NEWS
2 | include s3cmd.1
3 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/PKG-INFO:
--------------------------------------------------------------------------------
1 | Metadata-Version: 1.1
2 | Name: s3cmd
3 | Version: 2.0.1
4 | Summary: Command line tool for managing Amazon S3 and CloudFront services
5 | Home-page: http://s3tools.org
6 | Author: github.com/mdomsch, github.com/matteobar, github.com/fviard
7 | Author-email: s3tools-bugs@lists.sourceforge.net
8 | License: GNU GPL v2+
9 | Description-Content-Type: UNKNOWN
10 | Description:
11 |
12 | S3cmd lets you copy files from/to Amazon S3
13 | (Simple Storage Service) using a simple to use
14 | command line client. Supports rsync-like backup,
15 | GPG encryption, and more. Also supports management
16 | of Amazon's CloudFront content delivery network.
17 |
18 |
19 | Authors:
20 | --------
21 | Michal Ludvig
22 |
23 | Platform: UNKNOWN
24 | Classifier: Development Status :: 5 - Production/Stable
25 | Classifier: Environment :: Console
26 | Classifier: Environment :: MacOS X
27 | Classifier: Environment :: Win32 (MS Windows)
28 | Classifier: Intended Audience :: End Users/Desktop
29 | Classifier: Intended Audience :: System Administrators
30 | Classifier: License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)
31 | Classifier: Natural Language :: English
32 | Classifier: Operating System :: MacOS :: MacOS X
33 | Classifier: Operating System :: Microsoft :: Windows
34 | Classifier: Operating System :: POSIX
35 | Classifier: Operating System :: Unix
36 | Classifier: Programming Language :: Python :: 2
37 | Classifier: Programming Language :: Python :: 2.6
38 | Classifier: Programming Language :: Python :: 2.7
39 | Classifier: Programming Language :: Python :: 3
40 | Classifier: Programming Language :: Python :: 3.3
41 | Classifier: Programming Language :: Python :: 3.4
42 | Classifier: Programming Language :: Python :: 3.5
43 | Classifier: Programming Language :: Python :: 3.6
44 | Classifier: Topic :: System :: Archiving
45 | Classifier: Topic :: Utilities
46 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/S3/ACL.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | ## Amazon S3 - Access Control List representation
4 | ## Author: Michal Ludvig
5 | ## http://www.logix.cz/michal
6 | ## License: GPL Version 2
7 | ## Copyright: TGRMN Software and contributors
8 |
9 | from __future__ import absolute_import, print_function
10 |
11 | import sys
12 | from .Utils import getTreeFromXml, deunicodise, encode_to_s3, decode_from_s3
13 |
14 | try:
15 | import xml.etree.ElementTree as ET
16 | except ImportError:
17 | import elementtree.ElementTree as ET
18 |
19 | PY3 = (sys.version_info >= (3,0))
20 |
21 | class Grantee(object):
22 | ALL_USERS_URI = "http://acs.amazonaws.com/groups/global/AllUsers"
23 | LOG_DELIVERY_URI = "http://acs.amazonaws.com/groups/s3/LogDelivery"
24 |
25 | def __init__(self):
26 | self.xsi_type = None
27 | self.tag = None
28 | self.name = None
29 | self.display_name = None
30 | self.permission = None
31 |
32 | def __repr__(self):
33 | return repr('Grantee("%(tag)s", "%(name)s", "%(permission)s")' % {
34 | "tag" : self.tag,
35 | "name" : self.name,
36 | "permission" : self.permission
37 | })
38 |
39 | def isAllUsers(self):
40 | return self.tag == "URI" and self.name == Grantee.ALL_USERS_URI
41 |
42 | def isAnonRead(self):
43 | return self.isAllUsers() and (self.permission == "READ" or self.permission == "FULL_CONTROL")
44 |
45 | def getElement(self):
46 | el = ET.Element("Grant")
47 | grantee = ET.SubElement(el, "Grantee", {
48 | 'xmlns:xsi' : 'http://www.w3.org/2001/XMLSchema-instance',
49 | 'xsi:type' : self.xsi_type
50 | })
51 | name = ET.SubElement(grantee, self.tag)
52 | name.text = self.name
53 | permission = ET.SubElement(el, "Permission")
54 | permission.text = self.permission
55 | return el
56 |
57 | class GranteeAnonRead(Grantee):
58 | def __init__(self):
59 | Grantee.__init__(self)
60 | self.xsi_type = "Group"
61 | self.tag = "URI"
62 | self.name = Grantee.ALL_USERS_URI
63 | self.permission = "READ"
64 |
65 | class GranteeLogDelivery(Grantee):
66 | def __init__(self, permission):
67 | """
68 | permission must be either READ_ACP or WRITE
69 | """
70 | Grantee.__init__(self)
71 | self.xsi_type = "Group"
72 | self.tag = "URI"
73 | self.name = Grantee.LOG_DELIVERY_URI
74 | self.permission = permission
75 |
76 | class ACL(object):
77 | EMPTY_ACL = b""
78 |
79 | def __init__(self, xml = None):
80 | if not xml:
81 | xml = ACL.EMPTY_ACL
82 |
83 | self.grantees = []
84 | self.owner_id = ""
85 | self.owner_nick = ""
86 |
87 | tree = getTreeFromXml(encode_to_s3(xml))
88 | self.parseOwner(tree)
89 | self.parseGrants(tree)
90 |
91 | def parseOwner(self, tree):
92 | self.owner_id = tree.findtext(".//Owner//ID")
93 | self.owner_nick = tree.findtext(".//Owner//DisplayName")
94 |
95 | def parseGrants(self, tree):
96 | for grant in tree.findall(".//Grant"):
97 | grantee = Grantee()
98 | g = grant.find(".//Grantee")
99 | grantee.xsi_type = g.attrib['{http://www.w3.org/2001/XMLSchema-instance}type']
100 | grantee.permission = grant.find('Permission').text
101 | for el in g:
102 | if el.tag == "DisplayName":
103 | grantee.display_name = el.text
104 | else:
105 | grantee.tag = el.tag
106 | grantee.name = el.text
107 | self.grantees.append(grantee)
108 |
109 | def getGrantList(self):
110 | acl = []
111 | for grantee in self.grantees:
112 | if grantee.display_name:
113 | user = grantee.display_name
114 | elif grantee.isAllUsers():
115 | user = "*anon*"
116 | else:
117 | user = grantee.name
118 | acl.append({'grantee': user, 'permission': grantee.permission})
119 | return acl
120 |
121 | def getOwner(self):
122 | return { 'id' : self.owner_id, 'nick' : self.owner_nick }
123 |
124 | def isAnonRead(self):
125 | for grantee in self.grantees:
126 | if grantee.isAnonRead():
127 | return True
128 | return False
129 |
130 | def grantAnonRead(self):
131 | if not self.isAnonRead():
132 | self.appendGrantee(GranteeAnonRead())
133 |
134 | def revokeAnonRead(self):
135 | self.grantees = [g for g in self.grantees if not g.isAnonRead()]
136 |
137 | def appendGrantee(self, grantee):
138 | self.grantees.append(grantee)
139 |
140 | def hasGrant(self, name, permission):
141 | name = name.lower()
142 | permission = permission.upper()
143 |
144 | for grantee in self.grantees:
145 | if grantee.name.lower() == name:
146 | if grantee.permission == "FULL_CONTROL":
147 | return True
148 | elif grantee.permission.upper() == permission:
149 | return True
150 |
151 | return False;
152 |
153 | def grant(self, name, permission):
154 | if self.hasGrant(name, permission):
155 | return
156 |
157 | permission = permission.upper()
158 |
159 | if "ALL" == permission:
160 | permission = "FULL_CONTROL"
161 |
162 | if "FULL_CONTROL" == permission:
163 | self.revoke(name, "ALL")
164 |
165 | grantee = Grantee()
166 | grantee.name = name
167 | grantee.permission = permission
168 |
169 | if '@' in name:
170 | grantee.name = grantee.name.lower()
171 | grantee.xsi_type = "AmazonCustomerByEmail"
172 | grantee.tag = "EmailAddress"
173 | elif 'http://acs.amazonaws.com/groups/' in name:
174 | grantee.xsi_type = "Group"
175 | grantee.tag = "URI"
176 | else:
177 | grantee.name = grantee.name.lower()
178 | grantee.xsi_type = "CanonicalUser"
179 | grantee.tag = "ID"
180 |
181 | self.appendGrantee(grantee)
182 |
183 |
184 | def revoke(self, name, permission):
185 | name = name.lower()
186 | permission = permission.upper()
187 |
188 | if "ALL" == permission:
189 | self.grantees = [g for g in self.grantees if not (g.name.lower() == name or g.display_name.lower() == name)]
190 | else:
191 | self.grantees = [g for g in self.grantees if not ((g.display_name.lower() == name and g.permission.upper() == permission)\
192 | or (g.name.lower() == name and g.permission.upper() == permission))]
193 |
194 | def get_printable_tree(self):
195 | tree = getTreeFromXml(ACL.EMPTY_ACL)
196 | tree.attrib['xmlns'] = "http://s3.amazonaws.com/doc/2006-03-01/"
197 | owner = tree.find(".//Owner//ID")
198 | owner.text = self.owner_id
199 | acl = tree.find(".//AccessControlList")
200 | for grantee in self.grantees:
201 | acl.append(grantee.getElement())
202 | return tree
203 |
204 | def __unicode__(self):
205 | return decode_from_s3(ET.tostring(self.get_printable_tree()))
206 |
207 | def __str__(self):
208 | if PY3:
209 | # Return unicode
210 | return ET.tostring(self.get_printable_tree(), encoding="unicode")
211 | else:
212 | # Return bytes
213 | return ET.tostring(self.get_printable_tree())
214 |
215 | if __name__ == "__main__":
216 | xml = b"""
217 |
218 |
219 | 12345678901234567890
220 | owner-nickname
221 |
222 |
223 |
224 |
225 | 12345678901234567890
226 | owner-nickname
227 |
228 | FULL_CONTROL
229 |
230 |
231 |
232 | http://acs.amazonaws.com/groups/global/AllUsers
233 |
234 | READ
235 |
236 |
237 |
238 | """
239 | acl = ACL(xml)
240 | print("Grants:", acl.getGrantList())
241 | acl.revokeAnonRead()
242 | print("Grants:", acl.getGrantList())
243 | acl.grantAnonRead()
244 | print("Grants:", acl.getGrantList())
245 | print(acl)
246 |
247 | # vim:et:ts=4:sts=4:ai
248 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/S3/AccessLog.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | ## Amazon S3 - Access Control List representation
4 | ## Author: Michal Ludvig
5 | ## http://www.logix.cz/michal
6 | ## License: GPL Version 2
7 | ## Copyright: TGRMN Software and contributors
8 |
9 | from __future__ import absolute_import, print_function
10 |
11 | import sys
12 |
13 | from . import S3Uri
14 | from .Exceptions import ParameterError
15 | from .Utils import getTreeFromXml, decode_from_s3
16 | from .ACL import GranteeAnonRead
17 |
18 | try:
19 | import xml.etree.ElementTree as ET
20 | except ImportError:
21 | import elementtree.ElementTree as ET
22 |
23 | PY3 = (sys.version_info >= (3,0))
24 |
25 | __all__ = []
26 | class AccessLog(object):
27 | LOG_DISABLED = ""
28 | LOG_TEMPLATE = ""
29 |
30 | def __init__(self, xml = None):
31 | if not xml:
32 | xml = self.LOG_DISABLED
33 | self.tree = getTreeFromXml(xml)
34 | self.tree.attrib['xmlns'] = "http://doc.s3.amazonaws.com/2006-03-01"
35 |
36 | def isLoggingEnabled(self):
37 | return (self.tree.find(".//LoggingEnabled") is not None)
38 |
39 | def disableLogging(self):
40 | el = self.tree.find(".//LoggingEnabled")
41 | if el:
42 | self.tree.remove(el)
43 |
44 | def enableLogging(self, target_prefix_uri):
45 | el = self.tree.find(".//LoggingEnabled")
46 | if not el:
47 | el = getTreeFromXml(self.LOG_TEMPLATE)
48 | self.tree.append(el)
49 | el.find(".//TargetBucket").text = target_prefix_uri.bucket()
50 | el.find(".//TargetPrefix").text = target_prefix_uri.object()
51 |
52 | def targetPrefix(self):
53 | if self.isLoggingEnabled():
54 | target_prefix = u"s3://%s/%s" % (
55 | self.tree.find(".//LoggingEnabled//TargetBucket").text,
56 | self.tree.find(".//LoggingEnabled//TargetPrefix").text)
57 | return S3Uri.S3Uri(target_prefix)
58 | else:
59 | return ""
60 |
61 | def setAclPublic(self, acl_public):
62 | le = self.tree.find(".//LoggingEnabled")
63 | if le is None:
64 | raise ParameterError("Logging not enabled, can't set default ACL for logs")
65 | tg = le.find(".//TargetGrants")
66 | if not acl_public:
67 | if not tg:
68 | ## All good, it's not been there
69 | return
70 | else:
71 | le.remove(tg)
72 | else: # acl_public == True
73 | anon_read = GranteeAnonRead().getElement()
74 | if not tg:
75 | tg = ET.SubElement(le, "TargetGrants")
76 | ## What if TargetGrants already exists? We should check if
77 | ## AnonRead is there before appending a new one. Later...
78 | tg.append(anon_read)
79 |
80 | def isAclPublic(self):
81 | raise NotImplementedError()
82 |
83 | def __unicode__(self):
84 | return decode_from_s3(ET.tostring(self.tree))
85 |
86 | def __str__(self):
87 | if PY3:
88 | # Return unicode
89 | return ET.tostring(self.tree, encoding="unicode")
90 | else:
91 | # Return bytes
92 | return ET.tostring(self.tree)
93 |
94 | __all__.append("AccessLog")
95 |
96 | if __name__ == "__main__":
97 | log = AccessLog()
98 | print(log)
99 | log.enableLogging(S3Uri.S3Uri(u"s3://targetbucket/prefix/log-"))
100 | print(log)
101 | log.setAclPublic(True)
102 | print(log)
103 | log.setAclPublic(False)
104 | print(log)
105 | log.disableLogging()
106 | print(log)
107 |
108 | # vim:et:ts=4:sts=4:ai
109 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/S3/BidirMap.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | ## Amazon S3 manager
4 | ## Author: Michal Ludvig
5 | ## http://www.logix.cz/michal
6 | ## License: GPL Version 2
7 | ## Copyright: TGRMN Software and contributors
8 |
9 | class BidirMap(object):
10 | def __init__(self, **map):
11 | self.k2v = {}
12 | self.v2k = {}
13 | for key in map:
14 | self.__setitem__(key, map[key])
15 |
16 | def __setitem__(self, key, value):
17 | if value in self.v2k:
18 | if self.v2k[value] != key:
19 | raise KeyError("Value '"+str(value)+"' already in use with key '"+str(self.v2k[value])+"'")
20 | try:
21 | del(self.v2k[self.k2v[key]])
22 | except KeyError:
23 | pass
24 | self.k2v[key] = value
25 | self.v2k[value] = key
26 |
27 | def __getitem__(self, key):
28 | return self.k2v[key]
29 |
30 | def __str__(self):
31 | return self.v2k.__str__()
32 |
33 | def getkey(self, value):
34 | return self.v2k[value]
35 |
36 | def getvalue(self, key):
37 | return self.k2v[key]
38 |
39 | def keys(self):
40 | return [key for key in self.k2v]
41 |
42 | def values(self):
43 | return [value for value in self.v2k]
44 |
45 | # vim:et:ts=4:sts=4:ai
46 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/S3/Custom_httplib27.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import, print_function
2 |
3 | import os
4 | import httplib
5 |
6 | from httplib import (_CS_REQ_SENT, _CS_REQ_STARTED, CONTINUE, UnknownProtocol,
7 | CannotSendHeader, NO_CONTENT, NOT_MODIFIED, EXPECTATION_FAILED,
8 | HTTPMessage, HTTPException)
9 |
10 | try:
11 | from cStringIO import StringIO
12 | except ImportError:
13 | from StringIO import StringIO
14 |
15 | from .Utils import encode_to_s3
16 |
17 |
18 | _METHODS_EXPECTING_BODY = ['PATCH', 'POST', 'PUT']
19 |
20 | # Fixed python 2.X httplib to be able to support
21 | # Expect: 100-Continue http feature
22 | # Inspired by:
23 | # http://bugs.python.org/file26357/issue1346874-273.patch
24 |
25 | def httpresponse_patched_begin(self):
26 | """ Re-implemented httplib begin function
27 | to not loop over "100 CONTINUE" status replies
28 | but to report it to higher level so it can be processed.
29 | """
30 | if self.msg is not None:
31 | # we've already started reading the response
32 | return
33 |
34 | # read only one status even if we get a non-100 response
35 | version, status, reason = self._read_status()
36 |
37 | self.status = status
38 | self.reason = reason.strip()
39 | if version == 'HTTP/1.0':
40 | self.version = 10
41 | elif version.startswith('HTTP/1.'):
42 | self.version = 11 # use HTTP/1.1 code for HTTP/1.x where x>=1
43 | elif version == 'HTTP/0.9':
44 | self.version = 9
45 | else:
46 | raise UnknownProtocol(version)
47 |
48 | if self.version == 9:
49 | self.length = None
50 | self.chunked = 0
51 | self.will_close = 1
52 | self.msg = HTTPMessage(StringIO())
53 | return
54 |
55 | self.msg = HTTPMessage(self.fp, 0)
56 | if self.debuglevel > 0:
57 | for hdr in self.msg.headers:
58 | print("header:", hdr, end=" ")
59 |
60 | # don't let the msg keep an fp
61 | self.msg.fp = None
62 |
63 | # are we using the chunked-style of transfer encoding?
64 | tr_enc = self.msg.getheader('transfer-encoding')
65 | if tr_enc and tr_enc.lower() == "chunked":
66 | self.chunked = 1
67 | self.chunk_left = None
68 | else:
69 | self.chunked = 0
70 |
71 | # will the connection close at the end of the response?
72 | self.will_close = self._check_close()
73 |
74 | # do we have a Content-Length?
75 | # NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"
76 | length = self.msg.getheader('content-length')
77 | if length and not self.chunked:
78 | try:
79 | self.length = int(length)
80 | except ValueError:
81 | self.length = None
82 | else:
83 | if self.length < 0: # ignore nonsensical negative lengths
84 | self.length = None
85 | else:
86 | self.length = None
87 |
88 | # does the body have a fixed length? (of zero)
89 | if (status == NO_CONTENT or status == NOT_MODIFIED or
90 | 100 <= status < 200 or # 1xx codes
91 | self._method == 'HEAD'):
92 | self.length = 0
93 |
94 | # if the connection remains open, and we aren't using chunked, and
95 | # a content-length was not provided, then assume that the connection
96 | # WILL close.
97 | if not self.will_close and \
98 | not self.chunked and \
99 | self.length is None:
100 | self.will_close = 1
101 |
102 |
103 | def httpconnection_patched_set_content_length(self, body, method):
104 | ## REIMPLEMENTED because new in last httplib but needed by send_request
105 | # Set the content-length based on the body. If the body is "empty", we
106 | # set Content-Length: 0 for methods that expect a body (RFC 7230,
107 | # Section 3.3.2). If the body is set for other methods, we set the
108 | # header provided we can figure out what the length is.
109 | thelen = None
110 | if body is None and method.upper() in _METHODS_EXPECTING_BODY:
111 | thelen = '0'
112 | elif body is not None:
113 | try:
114 | thelen = str(len(body))
115 | except (TypeError, AttributeError):
116 | # If this is a file-like object, try to
117 | # fstat its file descriptor
118 | try:
119 | thelen = str(os.fstat(body.fileno()).st_size)
120 | except (AttributeError, OSError):
121 | # Don't send a length if this failed
122 | if self.debuglevel > 0: print("Cannot stat!!")
123 |
124 | if thelen is not None:
125 | self.putheader('Content-Length', thelen)
126 |
127 | def httpconnection_patched_send_request(self, method, url, body, headers):
128 | # Honor explicitly requested Host: and Accept-Encoding: headers.
129 | header_names = dict.fromkeys([k.lower() for k in headers])
130 | skips = {}
131 | if 'host' in header_names:
132 | skips['skip_host'] = 1
133 | if 'accept-encoding' in header_names:
134 | skips['skip_accept_encoding'] = 1
135 |
136 | expect_continue = False
137 | for hdr, value in headers.iteritems():
138 | if 'expect' == hdr.lower() and '100-continue' in value.lower():
139 | expect_continue = True
140 |
141 | url = encode_to_s3(url)
142 | self.putrequest(method, url, **skips)
143 |
144 | if 'content-length' not in header_names:
145 | self._set_content_length(body, method)
146 | for hdr, value in headers.iteritems():
147 | self.putheader(encode_to_s3(hdr), encode_to_s3(value))
148 |
149 | # If an Expect: 100-continue was sent, we need to check for a 417
150 | # Expectation Failed to avoid unecessarily sending the body
151 | # See RFC 2616 8.2.3
152 | if not expect_continue:
153 | self.endheaders(body)
154 | else:
155 | if not body:
156 | raise HTTPException("A body is required when expecting "
157 | "100-continue")
158 | self.endheaders()
159 | resp = self.getresponse()
160 | resp.read()
161 | self._HTTPConnection__state = _CS_REQ_SENT
162 | if resp.status == EXPECTATION_FAILED:
163 | raise ExpectationFailed()
164 | elif resp.status == CONTINUE:
165 | self.send(body)
166 |
167 | def httpconnection_patched_endheaders(self, message_body=None):
168 | """Indicate that the last header line has been sent to the server.
169 |
170 | This method sends the request to the server. The optional
171 | message_body argument can be used to pass a message body
172 | associated with the request. The message body will be sent in
173 | the same packet as the message headers if it is string, otherwise it is
174 | sent as a separate packet.
175 | """
176 | if self._HTTPConnection__state == _CS_REQ_STARTED:
177 | self._HTTPConnection__state = _CS_REQ_SENT
178 | else:
179 | raise CannotSendHeader()
180 | self._send_output(message_body)
181 |
182 | # TCP Maximum Segment Size (MSS) is determined by the TCP stack on
183 | # a per-connection basis. There is no simple and efficient
184 | # platform independent mechanism for determining the MSS, so
185 | # instead a reasonable estimate is chosen. The getsockopt()
186 | # interface using the TCP_MAXSEG parameter may be a suitable
187 | # approach on some operating systems. A value of 16KiB is chosen
188 | # as a reasonable estimate of the maximum MSS.
189 | mss = 16384
190 |
191 | def httpconnection_patched_send_output(self, message_body=None):
192 | """Send the currently buffered request and clear the buffer.
193 |
194 | Appends an extra \\r\\n to the buffer.
195 | A message_body may be specified, to be appended to the request.
196 | """
197 | self._buffer.extend((b"", b""))
198 | msg = b"\r\n".join(self._buffer)
199 | del self._buffer[:]
200 |
201 | msg = encode_to_s3(msg)
202 | # If msg and message_body are sent in a single send() call,
203 | # it will avoid performance problems caused by the interaction
204 | # between delayed ack and the Nagle algorithm.
205 | if isinstance(message_body, str) and len(message_body) < mss:
206 | msg += message_body
207 | message_body = None
208 | self.send(msg)
209 | if message_body is not None:
210 | #message_body was not a string (i.e. it is a file) and
211 | #we must run the risk of Nagle
212 | self.send(message_body)
213 |
214 |
215 | class ExpectationFailed(HTTPException):
216 | pass
217 |
218 | # Wrappers #
219 |
220 | def httpconnection_patched_wrapper_send_body(self, message_body):
221 | self.send(message_body)
222 |
223 |
224 | httplib.HTTPResponse.begin = httpresponse_patched_begin
225 | httplib.HTTPConnection.endheaders = httpconnection_patched_endheaders
226 | httplib.HTTPConnection._send_output = httpconnection_patched_send_output
227 | httplib.HTTPConnection._set_content_length = httpconnection_patched_set_content_length
228 | httplib.HTTPConnection._send_request = httpconnection_patched_send_request
229 |
230 | # Interfaces added to httplib.HTTPConnection:
231 | httplib.HTTPConnection.wrapper_send_body = httpconnection_patched_wrapper_send_body
232 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/S3/Exceptions.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | ## Amazon S3 manager - Exceptions library
4 | ## Author: Michal Ludvig
5 | ## http://www.logix.cz/michal
6 | ## License: GPL Version 2
7 | ## Copyright: TGRMN Software and contributors
8 |
9 | from __future__ import absolute_import
10 |
11 | from logging import debug, error
12 | import sys
13 | import S3.Utils
14 | from . import ExitCodes
15 |
16 | if sys.version_info >= (3,0):
17 | PY3 = True
18 | # In python 3, unicode -> str, and str -> bytes
19 | unicode = str
20 | else:
21 | PY3 = False
22 |
23 |
24 | try:
25 | from xml.etree.ElementTree import ParseError as XmlParseError
26 | except ImportError:
27 | # ParseError was only added in python2.7, before ET was raising ExpatError
28 | from xml.parsers.expat import ExpatError as XmlParseError
29 |
30 | class S3Exception(Exception):
31 | def __init__(self, message = ""):
32 | self.message = S3.Utils.unicodise(message)
33 |
34 | def __str__(self):
35 | ## Don't return self.message directly because
36 | ## __unicode__() method could be overridden in subclasses!
37 | if PY3:
38 | return self.__unicode__()
39 | else:
40 | return S3.Utils.deunicodise(self.__unicode__())
41 |
42 | def __unicode__(self):
43 | return self.message
44 |
45 | ## (Base)Exception.message has been deprecated in Python 2.6
46 | def _get_message(self):
47 | return self._message
48 | def _set_message(self, message):
49 | self._message = message
50 | message = property(_get_message, _set_message)
51 |
52 |
53 | class S3Error (S3Exception):
54 | def __init__(self, response):
55 | self.status = response["status"]
56 | self.reason = response["reason"]
57 | self.info = {
58 | "Code" : "",
59 | "Message" : "",
60 | "Resource" : ""
61 | }
62 | debug("S3Error: %s (%s)" % (self.status, self.reason))
63 | if "headers" in response:
64 | for header in response["headers"]:
65 | debug("HttpHeader: %s: %s" % (header, response["headers"][header]))
66 | if "data" in response and response["data"]:
67 | try:
68 | tree = S3.Utils.getTreeFromXml(response["data"])
69 | except XmlParseError:
70 | debug("Not an XML response")
71 | else:
72 | try:
73 | self.info.update(self.parse_error_xml(tree))
74 | except Exception as e:
75 | error("Error parsing xml: %s. ErrorXML: %s" % (e, response["data"]))
76 |
77 | self.code = self.info["Code"]
78 | self.message = self.info["Message"]
79 | self.resource = self.info["Resource"]
80 |
81 | def __unicode__(self):
82 | retval = u"%d " % (self.status)
83 | retval += (u"(%s)" % ("Code" in self.info and self.info["Code"] or self.reason))
84 | error_msg = self.info.get("Message")
85 | if error_msg:
86 | retval += (u": %s" % error_msg)
87 | return retval
88 |
89 | def get_error_code(self):
90 | if self.status in [301, 307]:
91 | return ExitCodes.EX_SERVERMOVED
92 | elif self.status in [400, 405, 411, 416, 417, 501, 504]:
93 | return ExitCodes.EX_SERVERERROR
94 | elif self.status == 403:
95 | return ExitCodes.EX_ACCESSDENIED
96 | elif self.status == 404:
97 | return ExitCodes.EX_NOTFOUND
98 | elif self.status == 409:
99 | return ExitCodes.EX_CONFLICT
100 | elif self.status == 412:
101 | return ExitCodes.EX_PRECONDITION
102 | elif self.status == 500:
103 | return ExitCodes.EX_SOFTWARE
104 | elif self.status == 503:
105 | return ExitCodes.EX_SERVICE
106 | else:
107 | return ExitCodes.EX_SOFTWARE
108 |
109 | @staticmethod
110 | def parse_error_xml(tree):
111 | info = {}
112 | error_node = tree
113 | if not error_node.tag == "Error":
114 | error_node = tree.find(".//Error")
115 | if error_node is not None:
116 | for child in error_node.getchildren():
117 | if child.text != "":
118 | debug("ErrorXML: " + child.tag + ": " + repr(child.text))
119 | info[child.tag] = child.text
120 | else:
121 | raise S3ResponseError("Malformed error XML returned from remote server.")
122 | return info
123 |
124 |
125 | class CloudFrontError(S3Error):
126 | pass
127 |
128 | class S3UploadError(S3Exception):
129 | pass
130 |
131 | class S3DownloadError(S3Exception):
132 | pass
133 |
134 | class S3RequestError(S3Exception):
135 | pass
136 |
137 | class S3ResponseError(S3Exception):
138 | pass
139 |
140 | class InvalidFileError(S3Exception):
141 | pass
142 |
143 | class ParameterError(S3Exception):
144 | pass
145 |
146 | # vim:et:ts=4:sts=4:ai
147 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/S3/ExitCodes.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # patterned on /usr/include/sysexits.h
4 |
5 | EX_OK = 0
6 | EX_GENERAL = 1
7 | EX_PARTIAL = 2 # some parts of the command succeeded, while others failed
8 | EX_SERVERMOVED = 10 # 301: Moved permanantly & 307: Moved temp
9 | EX_SERVERERROR = 11 # 400, 405, 411, 416, 417, 501: Bad request, 504: Gateway Time-out
10 | EX_NOTFOUND = 12 # 404: Not found
11 | EX_CONFLICT = 13 # 409: Conflict (ex: bucket error)
12 | EX_PRECONDITION = 14 # 412: Precondition failed
13 | EX_SERVICE = 15 # 503: Service not available or slow down
14 | EX_USAGE = 64 # The command was used incorrectly (e.g. bad command line syntax)
15 | EX_DATAERR = 65 # Failed file transfer, upload or download
16 | EX_SOFTWARE = 70 # internal software error (e.g. S3 error of unknown specificity)
17 | EX_OSERR = 71 # system error (e.g. out of memory)
18 | EX_OSFILE = 72 # OS error (e.g. invalid Python version)
19 | EX_IOERR = 74 # An error occurred while doing I/O on some file.
20 | EX_TEMPFAIL = 75 # temporary failure (S3DownloadError or similar, retry later)
21 | EX_ACCESSDENIED = 77 # Insufficient permissions to perform the operation on S3
22 | EX_CONFIG = 78 # Configuration file error
23 | _EX_SIGNAL = 128
24 | _EX_SIGINT = 2
25 | EX_BREAK = _EX_SIGNAL + _EX_SIGINT # Control-C (KeyboardInterrupt raised)
26 |
27 | class ExitScoreboard(object):
28 | """Helper to return best return code"""
29 | def __init__(self):
30 | self._success = 0
31 | self._notfound = 0
32 | self._failed = 0
33 |
34 | def success(self):
35 | self._success += 1
36 |
37 | def notfound(self):
38 | self._notfound += 1
39 |
40 | def failed(self):
41 | self._failed += 1
42 |
43 | def rc(self):
44 | if self._success:
45 | if not self._failed and not self._notfound:
46 | return EX_OK
47 | elif self._failed:
48 | return EX_PARTIAL
49 | else:
50 | if self._failed:
51 | return EX_GENERAL
52 | else:
53 | if self._notfound:
54 | return EX_NOTFOUND
55 | return EX_GENERAL
56 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/S3/FileDict.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | ## Amazon S3 manager
4 | ## Author: Michal Ludvig
5 | ## http://www.logix.cz/michal
6 | ## License: GPL Version 2
7 | ## Copyright: TGRMN Software and contributors
8 |
9 | from __future__ import absolute_import
10 |
11 | import logging
12 | from .SortedDict import SortedDict
13 | from . import Utils
14 | from . import Config
15 |
16 | zero_length_md5 = "d41d8cd98f00b204e9800998ecf8427e"
17 | cfg = Config.Config()
18 |
19 | class FileDict(SortedDict):
20 | def __init__(self, mapping = None, ignore_case = True, **kwargs):
21 | SortedDict.__init__(self, mapping = mapping or {}, ignore_case = ignore_case, **kwargs)
22 | self.hardlinks_md5 = dict() # { dev: { inode : {'md5':, 'relative_files':}}}
23 | self.by_md5 = dict() # {md5: set(relative_files)}
24 |
25 | def record_md5(self, relative_file, md5):
26 | if not relative_file:
27 | return
28 | if md5 is None:
29 | return
30 | if md5 == zero_length_md5:
31 | return
32 | if md5 not in self.by_md5:
33 | self.by_md5[md5] = relative_file
34 |
35 | def find_md5_one(self, md5):
36 | if not md5:
37 | return None
38 | return self.by_md5.get(md5, None)
39 |
40 | def get_md5(self, relative_file):
41 | """returns md5 if it can, or raises IOError if file is unreadable"""
42 | md5 = None
43 | if 'md5' in self[relative_file]:
44 | return self[relative_file]['md5']
45 | md5 = self.get_hardlink_md5(relative_file)
46 | if md5 is None and 'md5' in cfg.sync_checks:
47 | logging.debug(u"doing file I/O to read md5 of %s" % relative_file)
48 | md5 = Utils.hash_file_md5(self[relative_file]['full_name'])
49 | self.record_md5(relative_file, md5)
50 | self[relative_file]['md5'] = md5
51 | return md5
52 |
53 | def record_hardlink(self, relative_file, dev, inode, md5, size):
54 | if md5 is None:
55 | return
56 | if size == 0:
57 | # don't record 0-length files
58 | return
59 | if dev == 0 or inode == 0:
60 | # Windows
61 | return
62 | if dev not in self.hardlinks_md5:
63 | self.hardlinks_md5[dev] = dict()
64 | if inode not in self.hardlinks_md5[dev]:
65 | self.hardlinks_md5[dev][inode] = md5
66 |
67 | def get_hardlink_md5(self, relative_file):
68 | try:
69 | dev = self[relative_file]['dev']
70 | inode = self[relative_file]['inode']
71 | md5 = self.hardlinks_md5[dev][inode]
72 | except KeyError:
73 | md5 = None
74 | return md5
75 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/S3/HashCache.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from __future__ import absolute_import
4 |
5 | try:
6 | # python 3 support
7 | import cPickle as pickle
8 | except ImportError:
9 | import pickle
10 | from .Utils import deunicodise
11 |
12 | class HashCache(object):
13 | def __init__(self):
14 | self.inodes = dict()
15 |
16 | def add(self, dev, inode, mtime, size, md5):
17 | if dev == 0 or inode == 0: return # Windows
18 | if dev not in self.inodes:
19 | self.inodes[dev] = dict()
20 | if inode not in self.inodes[dev]:
21 | self.inodes[dev][inode] = dict()
22 | self.inodes[dev][inode][mtime] = dict(md5=md5, size=size)
23 |
24 | def md5(self, dev, inode, mtime, size):
25 | try:
26 | d = self.inodes[dev][inode][mtime]
27 | if d['size'] != size:
28 | return None
29 | except:
30 | return None
31 | return d['md5']
32 |
33 | def mark_all_for_purge(self):
34 | for d in self.inodes.keys():
35 | for i in self.inodes[d].keys():
36 | for c in self.inodes[d][i].keys():
37 | self.inodes[d][i][c]['purge'] = True
38 |
39 | def unmark_for_purge(self, dev, inode, mtime, size):
40 | try:
41 | d = self.inodes[dev][inode][mtime]
42 | except KeyError:
43 | return
44 | if d['size'] == size and 'purge' in d:
45 | del self.inodes[dev][inode][mtime]['purge']
46 |
47 | def purge(self):
48 | for d in self.inodes.keys():
49 | for i in self.inodes[d].keys():
50 | for m in self.inodes[d][i].keys():
51 | if 'purge' in self.inodes[d][i][m]:
52 | del self.inodes[d][i]
53 | break
54 |
55 | def save(self, f):
56 | d = dict(inodes=self.inodes, version=1)
57 | with open(deunicodise(f), 'wb') as fp:
58 | pickle.dump(d, fp)
59 |
60 | def load(self, f):
61 | with open(deunicodise(f), 'rb') as fp:
62 | d = pickle.load(fp)
63 | if d.get('version') == 1 and 'inodes' in d:
64 | self.inodes = d['inodes']
65 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/S3/MultiPart.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | ## Amazon S3 Multipart upload support
4 | ## Author: Jerome Leclanche
5 | ## License: GPL Version 2
6 |
7 | from __future__ import absolute_import
8 |
9 | import os
10 | import sys
11 | from stat import ST_SIZE
12 | from logging import debug, info, warning, error
13 | from .Utils import getTextFromXml, getTreeFromXml, formatSize, unicodise, deunicodise, calculateChecksum, parseNodes, encode_to_s3
14 |
15 | class MultiPartUpload(object):
16 |
17 | MIN_CHUNK_SIZE_MB = 5 # 5MB
18 | MAX_CHUNK_SIZE_MB = 5120 # 5GB
19 | MAX_FILE_SIZE = 42949672960 # 5TB
20 |
21 | def __init__(self, s3, file_stream, uri, headers_baseline=None):
22 | self.s3 = s3
23 | self.file_stream = file_stream
24 | self.uri = uri
25 | self.parts = {}
26 | self.headers_baseline = headers_baseline or {}
27 | self.upload_id = self.initiate_multipart_upload()
28 |
29 | def get_parts_information(self, uri, upload_id):
30 | multipart_response = self.s3.list_multipart(uri, upload_id)
31 | tree = getTreeFromXml(multipart_response['data'])
32 |
33 | parts = dict()
34 | for elem in parseNodes(tree):
35 | try:
36 | parts[int(elem['PartNumber'])] = {'checksum': elem['ETag'], 'size': elem['Size']}
37 | except KeyError:
38 | pass
39 |
40 | return parts
41 |
42 | def get_unique_upload_id(self, uri):
43 | upload_id = None
44 | multipart_response = self.s3.get_multipart(uri)
45 | tree = getTreeFromXml(multipart_response['data'])
46 | for mpupload in parseNodes(tree):
47 | try:
48 | mp_upload_id = mpupload['UploadId']
49 | mp_path = mpupload['Key']
50 | info("mp_path: %s, object: %s" % (mp_path, uri.object()))
51 | if mp_path == uri.object():
52 | if upload_id is not None:
53 | raise ValueError("More than one UploadId for URI %s. Disable multipart upload, or use\n %s multipart %s\nto list the Ids, then pass a unique --upload-id into the put command." % (uri, sys.argv[0], uri))
54 | upload_id = mp_upload_id
55 | except KeyError:
56 | pass
57 |
58 | return upload_id
59 |
60 | def initiate_multipart_upload(self):
61 | """
62 | Begin a multipart upload
63 | http://docs.amazonwebservices.com/AmazonS3/latest/API/index.html?mpUploadInitiate.html
64 | """
65 | if self.s3.config.upload_id is not None:
66 | self.upload_id = self.s3.config.upload_id
67 | elif self.s3.config.put_continue:
68 | self.upload_id = self.get_unique_upload_id(self.uri)
69 | else:
70 | self.upload_id = None
71 |
72 | if self.upload_id is None:
73 | request = self.s3.create_request("OBJECT_POST", uri = self.uri,
74 | headers = self.headers_baseline,
75 | uri_params = {'uploads': None})
76 | response = self.s3.send_request(request)
77 | data = response["data"]
78 | self.upload_id = getTextFromXml(data, "UploadId")
79 |
80 | return self.upload_id
81 |
82 | def upload_all_parts(self, extra_label=''):
83 | """
84 | Execute a full multipart upload on a file
85 | Returns the seq/etag dict
86 | TODO use num_processes to thread it
87 | """
88 | if not self.upload_id:
89 | raise RuntimeError("Attempting to use a multipart upload that has not been initiated.")
90 |
91 | self.chunk_size = self.s3.config.multipart_chunk_size_mb * 1024 * 1024
92 | filename = self.file_stream.stream_name
93 |
94 | if filename != u"":
95 | size_left = file_size = os.stat(deunicodise(filename))[ST_SIZE]
96 | nr_parts = file_size // self.chunk_size + (file_size % self.chunk_size and 1)
97 | debug("MultiPart: Uploading %s in %d parts" % (filename, nr_parts))
98 | else:
99 | debug("MultiPart: Uploading from %s" % filename)
100 |
101 | remote_statuses = dict()
102 | if self.s3.config.put_continue:
103 | remote_statuses = self.get_parts_information(self.uri, self.upload_id)
104 |
105 | if extra_label:
106 | extra_label = u' ' + extra_label
107 | seq = 1
108 | if filename != u"":
109 | while size_left > 0:
110 | offset = self.chunk_size * (seq - 1)
111 | current_chunk_size = min(file_size - offset, self.chunk_size)
112 | size_left -= current_chunk_size
113 | labels = {
114 | 'source' : filename,
115 | 'destination' : self.uri.uri(),
116 | 'extra' : "[part %d of %d, %s]%s" % (seq, nr_parts, "%d%sB" % formatSize(current_chunk_size, human_readable = True), extra_label)
117 | }
118 | try:
119 | self.upload_part(seq, offset, current_chunk_size, labels, remote_status = remote_statuses.get(seq))
120 | except:
121 | error(u"\nUpload of '%s' part %d failed. Use\n %s abortmp %s %s\nto abort the upload, or\n %s --upload-id %s put ...\nto continue the upload."
122 | % (filename, seq, sys.argv[0], self.uri, self.upload_id, sys.argv[0], self.upload_id))
123 | raise
124 | seq += 1
125 | else:
126 | while True:
127 | buffer = self.file_stream.read(self.chunk_size)
128 | offset = 0 # send from start of the buffer
129 | current_chunk_size = len(buffer)
130 | labels = {
131 | 'source' : filename,
132 | 'destination' : self.uri.uri(),
133 | 'extra' : "[part %d, %s]" % (seq, "%d%sB" % formatSize(current_chunk_size, human_readable = True))
134 | }
135 | if len(buffer) == 0: # EOF
136 | break
137 | try:
138 | self.upload_part(seq, offset, current_chunk_size, labels, buffer, remote_status = remote_statuses.get(seq))
139 | except:
140 | error(u"\nUpload of '%s' part %d failed. Use\n %s abortmp %s %s\nto abort, or\n %s --upload-id %s put ...\nto continue the upload."
141 | % (filename, seq, sys.argv[0], self.uri, self.upload_id, sys.argv[0], self.upload_id))
142 | raise
143 | seq += 1
144 |
145 | debug("MultiPart: Upload finished: %d parts", seq - 1)
146 |
147 | def upload_part(self, seq, offset, chunk_size, labels, buffer = '', remote_status = None):
148 | """
149 | Upload a file chunk
150 | http://docs.amazonwebservices.com/AmazonS3/latest/API/index.html?mpUploadUploadPart.html
151 | """
152 | # TODO implement Content-MD5
153 | debug("Uploading part %i of %r (%s bytes)" % (seq, self.upload_id, chunk_size))
154 |
155 | if remote_status is not None:
156 | if int(remote_status['size']) == chunk_size:
157 | checksum = calculateChecksum(buffer, self.file_stream, offset, chunk_size, self.s3.config.send_chunk)
158 | remote_checksum = remote_status['checksum'].strip('"\'')
159 | if remote_checksum == checksum:
160 | warning("MultiPart: size and md5sum match for %s part %d, skipping." % (self.uri, seq))
161 | self.parts[seq] = remote_status['checksum']
162 | return
163 | else:
164 | warning("MultiPart: checksum (%s vs %s) does not match for %s part %d, reuploading."
165 | % (remote_checksum, checksum, self.uri, seq))
166 | else:
167 | warning("MultiPart: size (%d vs %d) does not match for %s part %d, reuploading."
168 | % (int(remote_status['size']), chunk_size, self.uri, seq))
169 |
170 | headers = { "content-length": str(chunk_size) }
171 | query_string_params = {'partNumber':'%s' % seq,
172 | 'uploadId': self.upload_id}
173 | request = self.s3.create_request("OBJECT_PUT", uri = self.uri,
174 | headers = headers,
175 | uri_params = query_string_params)
176 | response = self.s3.send_file(request, self.file_stream, labels, buffer, offset = offset, chunk_size = chunk_size)
177 | self.parts[seq] = response["headers"].get('etag', '').strip('"\'')
178 | return response
179 |
180 | def complete_multipart_upload(self):
181 | """
182 | Finish a multipart upload
183 | http://docs.amazonwebservices.com/AmazonS3/latest/API/index.html?mpUploadComplete.html
184 | """
185 | debug("MultiPart: Completing upload: %s" % self.upload_id)
186 |
187 | parts_xml = []
188 | part_xml = "%i%s"
189 | for seq, etag in self.parts.items():
190 | parts_xml.append(part_xml % (seq, etag))
191 | body = "%s" % ("".join(parts_xml))
192 |
193 | headers = { "content-length": str(len(body)) }
194 | request = self.s3.create_request("OBJECT_POST", uri = self.uri,
195 | headers = headers, body = body,
196 | uri_params = {'uploadId': self.upload_id})
197 | response = self.s3.send_request(request)
198 |
199 | return response
200 |
201 | def abort_upload(self):
202 | """
203 | Abort multipart upload
204 | http://docs.amazonwebservices.com/AmazonS3/latest/API/index.html?mpUploadAbort.html
205 | """
206 | debug("MultiPart: Aborting upload: %s" % self.upload_id)
207 | #request = self.s3.create_request("OBJECT_DELETE", uri = self.uri,
208 | # uri_params = {'uploadId': self.upload_id})
209 | #response = self.s3.send_request(request)
210 | response = None
211 | return response
212 |
213 | # vim:et:ts=4:sts=4:ai
214 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/S3/PkgInfo.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | ## Amazon S3 manager
4 | ## Author: Michal Ludvig
5 | ## http://www.logix.cz/michal
6 | ## License: GPL Version 2
7 | ## Copyright: TGRMN Software and contributors
8 |
9 | package = "s3cmd"
10 | version = "2.0.1"
11 | url = "http://s3tools.org"
12 | license = "GNU GPL v2+"
13 | short_description = "Command line tool for managing Amazon S3 and CloudFront services"
14 | long_description = """
15 | S3cmd lets you copy files from/to Amazon S3
16 | (Simple Storage Service) using a simple to use
17 | command line client. Supports rsync-like backup,
18 | GPG encryption, and more. Also supports management
19 | of Amazon's CloudFront content delivery network.
20 | """
21 |
22 | # vim:et:ts=4:sts=4:ai
23 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/S3/Progress.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | ## Amazon S3 manager
4 | ## Author: Michal Ludvig
5 | ## http://www.logix.cz/michal
6 | ## License: GPL Version 2
7 | ## Copyright: TGRMN Software and contributors
8 |
9 | from __future__ import absolute_import, division
10 |
11 | import sys
12 | import datetime
13 | import time
14 | import S3.Utils
15 |
16 | class Progress(object):
17 | _stdout = sys.stdout
18 | _last_display = 0
19 |
20 | def __init__(self, labels, total_size):
21 | self._stdout = sys.stdout
22 | self.new_file(labels, total_size)
23 |
24 | def new_file(self, labels, total_size):
25 | self.labels = labels
26 | self.total_size = total_size
27 | # Set initial_position to something in the
28 | # case we're not counting from 0. For instance
29 | # when appending to a partially downloaded file.
30 | # Setting initial_position will let the speed
31 | # be computed right.
32 | self.initial_position = 0
33 | self.current_position = self.initial_position
34 | self.time_start = datetime.datetime.now()
35 | self.time_last = self.time_start
36 | self.time_current = self.time_start
37 |
38 | self.display(new_file = True)
39 |
40 | def update(self, current_position = -1, delta_position = -1):
41 | self.time_last = self.time_current
42 | self.time_current = datetime.datetime.now()
43 | if current_position > -1:
44 | self.current_position = current_position
45 | elif delta_position > -1:
46 | self.current_position += delta_position
47 | #else:
48 | # no update, just call display()
49 | self.display()
50 |
51 | def done(self, message):
52 | self.display(done_message = message)
53 |
54 | def output_labels(self):
55 | self._stdout.write(u"%(action)s: '%(source)s' -> '%(destination)s' %(extra)s\n" % self.labels)
56 | self._stdout.flush()
57 |
58 | def _display_needed(self):
59 | # We only need to update the display every so often.
60 | if time.time() - self._last_display > 1:
61 | self._last_display = time.time()
62 | return True
63 | return False
64 |
65 | def display(self, new_file = False, done_message = None):
66 | """
67 | display(new_file = False[/True], done = False[/True])
68 |
69 | Override this method to provide a nicer output.
70 | """
71 | if new_file:
72 | self.output_labels()
73 | self.last_milestone = 0
74 | return
75 |
76 | if self.current_position == self.total_size:
77 | print_size = S3.Utils.formatSize(self.current_position, True)
78 | if print_size[1] != "": print_size[1] += "B"
79 | timedelta = self.time_current - self.time_start
80 | sec_elapsed = timedelta.days * 86400 + timedelta.seconds + float(timedelta.microseconds) / 1000000.0
81 | print_speed = S3.Utils.formatSize((self.current_position - self.initial_position) / sec_elapsed, True, True)
82 | self._stdout.write("100%% %s%s in %.2fs (%.2f %sB/s)\n" %
83 | (print_size[0], print_size[1], sec_elapsed, print_speed[0], print_speed[1]))
84 | self._stdout.flush()
85 | return
86 |
87 | rel_position = (self.current_position * 100) // self.total_size
88 | if rel_position >= self.last_milestone:
89 | # Move by increments of 5.
90 | # NOTE: to check: Looks like to not do what is looks like to be designed to do
91 | self.last_milestone = (rel_position // 5) * 5
92 | self._stdout.write("%d%% ", self.last_milestone)
93 | self._stdout.flush()
94 | return
95 |
96 | class ProgressANSI(Progress):
97 | ## http://en.wikipedia.org/wiki/ANSI_escape_code
98 | SCI = '\x1b['
99 | ANSI_hide_cursor = SCI + "?25l"
100 | ANSI_show_cursor = SCI + "?25h"
101 | ANSI_save_cursor_pos = SCI + "s"
102 | ANSI_restore_cursor_pos = SCI + "u"
103 | ANSI_move_cursor_to_column = SCI + "%uG"
104 | ANSI_erase_to_eol = SCI + "0K"
105 | ANSI_erase_current_line = SCI + "2K"
106 |
107 | def display(self, new_file = False, done_message = None):
108 | """
109 | display(new_file = False[/True], done_message = None)
110 | """
111 | if new_file:
112 | self.output_labels()
113 | self._stdout.write(self.ANSI_save_cursor_pos)
114 | self._stdout.flush()
115 | return
116 |
117 | # Only display progress every so often
118 | if not (new_file or done_message) and not self._display_needed():
119 | return
120 |
121 | timedelta = self.time_current - self.time_start
122 | sec_elapsed = timedelta.days * 86400 + timedelta.seconds + float(timedelta.microseconds)/1000000.0
123 | if (sec_elapsed > 0):
124 | print_speed = S3.Utils.formatSize((self.current_position - self.initial_position) / sec_elapsed, True, True)
125 | else:
126 | print_speed = (0, "")
127 | self._stdout.write(self.ANSI_restore_cursor_pos)
128 | self._stdout.write(self.ANSI_erase_to_eol)
129 | self._stdout.write("%(current)s of %(total)s %(percent)3d%% in %(elapsed)ds %(speed).2f %(speed_coeff)sB/s" % {
130 | "current" : str(self.current_position).rjust(len(str(self.total_size))),
131 | "total" : self.total_size,
132 | "percent" : self.total_size and ((self.current_position * 100) // self.total_size) or 0,
133 | "elapsed" : sec_elapsed,
134 | "speed" : print_speed[0],
135 | "speed_coeff" : print_speed[1]
136 | })
137 |
138 | if done_message:
139 | self._stdout.write(" %s\n" % done_message)
140 |
141 | self._stdout.flush()
142 |
143 | class ProgressCR(Progress):
144 | ## Uses CR char (Carriage Return) just like other progress bars do.
145 | CR_char = chr(13)
146 |
147 | def display(self, new_file = False, done_message = None):
148 | """
149 | display(new_file = False[/True], done_message = None)
150 | """
151 | if new_file:
152 | self.output_labels()
153 | return
154 |
155 | # Only display progress every so often
156 | if not (new_file or done_message) and not self._display_needed():
157 | return
158 |
159 | timedelta = self.time_current - self.time_start
160 | sec_elapsed = timedelta.days * 86400 + timedelta.seconds + float(timedelta.microseconds)/1000000.0
161 | if (sec_elapsed > 0):
162 | print_speed = S3.Utils.formatSize((self.current_position - self.initial_position) / sec_elapsed, True, True)
163 | else:
164 | print_speed = (0, "")
165 | self._stdout.write(self.CR_char)
166 | output = " %(current)s of %(total)s %(percent)3d%% in %(elapsed)4ds %(speed)7.2f %(speed_coeff)sB/s" % {
167 | "current" : str(self.current_position).rjust(len(str(self.total_size))),
168 | "total" : self.total_size,
169 | "percent" : self.total_size and ((self.current_position * 100) // self.total_size) or 0,
170 | "elapsed" : sec_elapsed,
171 | "speed" : print_speed[0],
172 | "speed_coeff" : print_speed[1]
173 | }
174 | self._stdout.write(output)
175 | if done_message:
176 | self._stdout.write(" %s\n" % done_message)
177 |
178 | self._stdout.flush()
179 |
180 | class StatsInfo(object):
181 | """Holding info for stats totals"""
182 | def __init__(self):
183 | self.files = None
184 | self.size = None
185 | self.files_transferred = None
186 | self.size_transferred = None
187 | self.files_copied = None
188 | self.size_copied = None
189 | self.files_deleted = None
190 | self.size_deleted = None
191 |
192 | def format_output(self):
193 | outstr = u""
194 | if self.files is not None:
195 | tmp_str = u"Number of files: %d"% self.files
196 | if self.size is not None:
197 | tmp_str += " (%d bytes) "% self.size
198 | outstr += u"\nStats: " + tmp_str
199 |
200 | if self.files_transferred:
201 | tmp_str = u"Number of files transferred: %d"% self.files_transferred
202 | if self.size_transferred is not None:
203 | tmp_str += " (%d bytes) "% self.size_transferred
204 | outstr += u"\nStats: " + tmp_str
205 |
206 | if self.files_copied:
207 | tmp_str = u"Number of files copied: %d"% self.files_copied
208 | if self.size_copied is not None:
209 | tmp_str += " (%d bytes) "% self.size_copied
210 | outstr += u"\nStats: " + tmp_str
211 |
212 | if self.files_deleted:
213 | tmp_str = u"Number of files deleted: %d"% self.files_deleted
214 | if self.size_deleted is not None:
215 | tmp_str += " (%d bytes) "% self.size_deleted
216 | outstr += u"\nStats: " + tmp_str
217 |
218 | return outstr
219 |
220 | # vim:et:ts=4:sts=4:ai
221 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/S3/S3Uri.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | ## Amazon S3 manager
4 | ## Author: Michal Ludvig
5 | ## http://www.logix.cz/michal
6 | ## License: GPL Version 2
7 | ## Copyright: TGRMN Software and contributors
8 |
9 | from __future__ import absolute_import, print_function
10 |
11 | import os
12 | import re
13 | import sys
14 | from .Utils import unicodise, deunicodise, check_bucket_name_dns_support
15 | from . import Config
16 |
17 | if sys.version_info >= (3,0):
18 | PY3 = True
19 | else:
20 | PY3 = False
21 |
22 | class S3Uri(object):
23 | type = None
24 | _subclasses = None
25 |
26 | def __new__(self, string):
27 | if not self._subclasses:
28 | ## Generate a list of all subclasses of S3Uri
29 | self._subclasses = []
30 | dict = sys.modules[__name__].__dict__
31 | for something in dict:
32 | if type(dict[something]) is not type(self):
33 | continue
34 | if issubclass(dict[something], self) and dict[something] != self:
35 | self._subclasses.append(dict[something])
36 | for subclass in self._subclasses:
37 | try:
38 | instance = object.__new__(subclass)
39 | instance.__init__(string)
40 | return instance
41 | except ValueError:
42 | continue
43 | raise ValueError("%s: not a recognized URI" % string)
44 |
45 | def __str__(self):
46 | if PY3:
47 | return self.uri()
48 | else:
49 | return deunicodise(self.uri())
50 |
51 | def __unicode__(self):
52 | return self.uri()
53 |
54 | def __repr__(self):
55 | return repr("<%s: %s>" % (self.__class__.__name__, self.__unicode__()))
56 |
57 | def public_url(self):
58 | raise ValueError("This S3 URI does not have Anonymous URL representation")
59 |
60 | def basename(self):
61 | return self.__unicode__().split("/")[-1]
62 |
63 | class S3UriS3(S3Uri):
64 | type = "s3"
65 | _re = re.compile("^s3:///*([^/]*)/?(.*)", re.IGNORECASE | re.UNICODE)
66 | def __init__(self, string):
67 | match = self._re.match(string)
68 | if not match:
69 | raise ValueError("%s: not a S3 URI" % string)
70 | groups = match.groups()
71 | self._bucket = groups[0]
72 | self._object = groups[1]
73 |
74 | def bucket(self):
75 | return self._bucket
76 |
77 | def object(self):
78 | return self._object
79 |
80 | def has_bucket(self):
81 | return bool(self._bucket)
82 |
83 | def has_object(self):
84 | return bool(self._object)
85 |
86 | def uri(self):
87 | return u"/".join([u"s3:/", self._bucket, self._object])
88 |
89 | def is_dns_compatible(self):
90 | return check_bucket_name_dns_support(Config.Config().host_bucket, self._bucket)
91 |
92 | def public_url(self):
93 | if self.is_dns_compatible():
94 | return "http://%s.%s/%s" % (self._bucket, Config.Config().host_base, self._object)
95 | else:
96 | return "http://%s/%s/%s" % (Config.Config().host_base, self._bucket, self._object)
97 |
98 | def host_name(self):
99 | if self.is_dns_compatible():
100 | return "%s.s3.amazonaws.com" % (self._bucket)
101 | else:
102 | return "s3.amazonaws.com"
103 |
104 | @staticmethod
105 | def compose_uri(bucket, object = ""):
106 | return u"s3://%s/%s" % (bucket, object)
107 |
108 | @staticmethod
109 | def httpurl_to_s3uri(http_url):
110 | m=re.match("(https?://)?([^/]+)/?(.*)", http_url, re.IGNORECASE | re.UNICODE)
111 | hostname, object = m.groups()[1:]
112 | hostname = hostname.lower()
113 | if hostname == "s3.amazonaws.com":
114 | ## old-style url: http://s3.amazonaws.com/bucket/object
115 | if object.count("/") == 0:
116 | ## no object given
117 | bucket = object
118 | object = ""
119 | else:
120 | ## bucket/object
121 | bucket, object = object.split("/", 1)
122 | elif hostname.endswith(".s3.amazonaws.com"):
123 | ## new-style url: http://bucket.s3.amazonaws.com/object
124 | bucket = hostname[:-(len(".s3.amazonaws.com"))]
125 | else:
126 | raise ValueError("Unable to parse URL: %s" % http_url)
127 | return S3Uri(u"s3://%(bucket)s/%(object)s" % {
128 | 'bucket' : bucket,
129 | 'object' : object })
130 |
131 | class S3UriS3FS(S3Uri):
132 | type = "s3fs"
133 | _re = re.compile("^s3fs:///*([^/]*)/?(.*)", re.IGNORECASE | re.UNICODE)
134 | def __init__(self, string):
135 | match = self._re.match(string)
136 | if not match:
137 | raise ValueError("%s: not a S3fs URI" % string)
138 | groups = match.groups()
139 | self._fsname = groups[0]
140 | self._path = groups[1].split("/")
141 |
142 | def fsname(self):
143 | return self._fsname
144 |
145 | def path(self):
146 | return "/".join(self._path)
147 |
148 | def uri(self):
149 | return u"/".join([u"s3fs:/", self._fsname, self.path()])
150 |
151 | class S3UriFile(S3Uri):
152 | type = "file"
153 | _re = re.compile("^(\w+://)?(.*)", re.UNICODE)
154 | def __init__(self, string):
155 | match = self._re.match(string)
156 | groups = match.groups()
157 | if groups[0] not in (None, "file://"):
158 | raise ValueError("%s: not a file:// URI" % string)
159 | if groups[0] is None:
160 | self._path = groups[1].split(os.sep)
161 | else:
162 | self._path = groups[1].split("/")
163 |
164 | def path(self):
165 | return os.sep.join(self._path)
166 |
167 | def uri(self):
168 | return u"/".join([u"file:/"]+self._path)
169 |
170 | def isdir(self):
171 | return os.path.isdir(deunicodise(self.path()))
172 |
173 | def dirname(self):
174 | return unicodise(os.path.dirname(deunicodise(self.path())))
175 |
176 | def basename(self):
177 | return unicodise(os.path.basename(deunicodise(self.path())))
178 |
179 | class S3UriCloudFront(S3Uri):
180 | type = "cf"
181 | _re = re.compile("^cf://([^/]*)/*(.*)", re.IGNORECASE | re.UNICODE)
182 | def __init__(self, string):
183 | match = self._re.match(string)
184 | if not match:
185 | raise ValueError("%s: not a CloudFront URI" % string)
186 | groups = match.groups()
187 | self._dist_id = groups[0]
188 | self._request_id = groups[1] != "/" and groups[1] or None
189 |
190 | def dist_id(self):
191 | return self._dist_id
192 |
193 | def request_id(self):
194 | return self._request_id
195 |
196 | def uri(self):
197 | uri = u"cf://" + self.dist_id()
198 | if self.request_id():
199 | uri += u"/" + self.request_id()
200 | return uri
201 |
202 | if __name__ == "__main__":
203 | uri = S3Uri("s3://bucket/object")
204 | print("type() =", type(uri))
205 | print("uri =", uri)
206 | print("uri.type=", uri.type)
207 | print("bucket =", uri.bucket())
208 | print("object =", uri.object())
209 | print()
210 |
211 | uri = S3Uri("s3://bucket")
212 | print("type() =", type(uri))
213 | print("uri =", uri)
214 | print("uri.type=", uri.type)
215 | print("bucket =", uri.bucket())
216 | print()
217 |
218 | uri = S3Uri("s3fs://filesystem1/path/to/remote/file.txt")
219 | print("type() =", type(uri))
220 | print("uri =", uri)
221 | print("uri.type=", uri.type)
222 | print("path =", uri.path())
223 | print()
224 |
225 | uri = S3Uri("/path/to/local/file.txt")
226 | print("type() =", type(uri))
227 | print("uri =", uri)
228 | print("uri.type=", uri.type)
229 | print("path =", uri.path())
230 | print()
231 |
232 | uri = S3Uri("cf://1234567890ABCD/")
233 | print("type() =", type(uri))
234 | print("uri =", uri)
235 | print("uri.type=", uri.type)
236 | print("dist_id =", uri.dist_id())
237 | print()
238 |
239 | # vim:et:ts=4:sts=4:ai
240 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/S3/SortedDict.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | ## Amazon S3 manager
4 | ## Author: Michal Ludvig
5 | ## http://www.logix.cz/michal
6 | ## License: GPL Version 2
7 | ## Copyright: TGRMN Software and contributors
8 |
9 | from __future__ import absolute_import, print_function
10 |
11 | from .BidirMap import BidirMap
12 |
13 | class SortedDictIterator(object):
14 | def __init__(self, sorted_dict, keys):
15 | self.sorted_dict = sorted_dict
16 | self.keys = keys
17 |
18 | def __next__(self):
19 | try:
20 | return self.keys.pop(0)
21 | except IndexError:
22 | raise StopIteration
23 |
24 | next = __next__
25 |
26 | class SortedDict(dict):
27 | def __init__(self, mapping = {}, ignore_case = True, **kwargs):
28 | """
29 | WARNING: SortedDict() with ignore_case==True will
30 | drop entries differing only in capitalisation!
31 | Eg: SortedDict({'auckland':1, 'Auckland':2}).keys() => ['Auckland']
32 | With ignore_case==False it's all right
33 | """
34 | dict.__init__(self, mapping, **kwargs)
35 | self.ignore_case = ignore_case
36 |
37 | def keys(self):
38 | # TODO fix
39 | # Probably not anymore memory efficient on python2
40 | # as now 2 copies ok keys to sort them.
41 | keys = dict.keys(self)
42 | if self.ignore_case:
43 | # Translation map
44 | xlat_map = BidirMap()
45 | for key in keys:
46 | xlat_map[key.lower()] = key
47 | # Lowercase keys
48 | lc_keys = sorted(xlat_map.keys())
49 | return [xlat_map[k] for k in lc_keys]
50 | else:
51 | keys = sorted(keys)
52 | return keys
53 |
54 | def __iter__(self):
55 | return SortedDictIterator(self, self.keys())
56 |
57 | def __getitem__(self, index):
58 | """Override to support the "get_slice" for python3 """
59 | if isinstance(index, slice):
60 | r = SortedDict(ignore_case = self.ignore_case)
61 | for k in self.keys()[index]:
62 | r[k] = self[k]
63 | else:
64 | r = super(SortedDict, self).__getitem__(index)
65 | return r
66 |
67 |
68 | if __name__ == "__main__":
69 | d = { 'AWS' : 1, 'Action' : 2, 'america' : 3, 'Auckland' : 4, 'America' : 5 }
70 | sd = SortedDict(d)
71 | print("Wanted: Action, america, Auckland, AWS, [ignore case]")
72 | print("Got: ", end=' ')
73 | for key in sd:
74 | print("%s," % key, end=' ')
75 | print(" [used: __iter__()]")
76 | d = SortedDict(d, ignore_case = False)
77 | print("Wanted: AWS, Action, America, Auckland, america, [case sensitive]")
78 | print("Got: ", end=' ')
79 | for key in d.keys():
80 | print("%s," % key, end=' ')
81 | print(" [used: keys()]")
82 |
83 | # vim:et:ts=4:sts=4:ai
84 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/S3/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/S3/__pycache__/PkgInfo.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DiscreetAI/dashboard-ui/371291831f6757ab06e8b45f9ec9260982560679/dashboard-ui/s3cmd-2.0.1/S3/__pycache__/PkgInfo.cpython-36.pyc
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/S3/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DiscreetAI/dashboard-ui/371291831f6757ab06e8b45f9ec9260982560679/dashboard-ui/s3cmd-2.0.1/S3/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/build/lib/S3/ACL.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | ## Amazon S3 - Access Control List representation
4 | ## Author: Michal Ludvig
5 | ## http://www.logix.cz/michal
6 | ## License: GPL Version 2
7 | ## Copyright: TGRMN Software and contributors
8 |
9 | from __future__ import absolute_import, print_function
10 |
11 | import sys
12 | from .Utils import getTreeFromXml, deunicodise, encode_to_s3, decode_from_s3
13 |
14 | try:
15 | import xml.etree.ElementTree as ET
16 | except ImportError:
17 | import elementtree.ElementTree as ET
18 |
19 | PY3 = (sys.version_info >= (3,0))
20 |
21 | class Grantee(object):
22 | ALL_USERS_URI = "http://acs.amazonaws.com/groups/global/AllUsers"
23 | LOG_DELIVERY_URI = "http://acs.amazonaws.com/groups/s3/LogDelivery"
24 |
25 | def __init__(self):
26 | self.xsi_type = None
27 | self.tag = None
28 | self.name = None
29 | self.display_name = None
30 | self.permission = None
31 |
32 | def __repr__(self):
33 | return repr('Grantee("%(tag)s", "%(name)s", "%(permission)s")' % {
34 | "tag" : self.tag,
35 | "name" : self.name,
36 | "permission" : self.permission
37 | })
38 |
39 | def isAllUsers(self):
40 | return self.tag == "URI" and self.name == Grantee.ALL_USERS_URI
41 |
42 | def isAnonRead(self):
43 | return self.isAllUsers() and (self.permission == "READ" or self.permission == "FULL_CONTROL")
44 |
45 | def getElement(self):
46 | el = ET.Element("Grant")
47 | grantee = ET.SubElement(el, "Grantee", {
48 | 'xmlns:xsi' : 'http://www.w3.org/2001/XMLSchema-instance',
49 | 'xsi:type' : self.xsi_type
50 | })
51 | name = ET.SubElement(grantee, self.tag)
52 | name.text = self.name
53 | permission = ET.SubElement(el, "Permission")
54 | permission.text = self.permission
55 | return el
56 |
57 | class GranteeAnonRead(Grantee):
58 | def __init__(self):
59 | Grantee.__init__(self)
60 | self.xsi_type = "Group"
61 | self.tag = "URI"
62 | self.name = Grantee.ALL_USERS_URI
63 | self.permission = "READ"
64 |
65 | class GranteeLogDelivery(Grantee):
66 | def __init__(self, permission):
67 | """
68 | permission must be either READ_ACP or WRITE
69 | """
70 | Grantee.__init__(self)
71 | self.xsi_type = "Group"
72 | self.tag = "URI"
73 | self.name = Grantee.LOG_DELIVERY_URI
74 | self.permission = permission
75 |
76 | class ACL(object):
77 | EMPTY_ACL = b""
78 |
79 | def __init__(self, xml = None):
80 | if not xml:
81 | xml = ACL.EMPTY_ACL
82 |
83 | self.grantees = []
84 | self.owner_id = ""
85 | self.owner_nick = ""
86 |
87 | tree = getTreeFromXml(encode_to_s3(xml))
88 | self.parseOwner(tree)
89 | self.parseGrants(tree)
90 |
91 | def parseOwner(self, tree):
92 | self.owner_id = tree.findtext(".//Owner//ID")
93 | self.owner_nick = tree.findtext(".//Owner//DisplayName")
94 |
95 | def parseGrants(self, tree):
96 | for grant in tree.findall(".//Grant"):
97 | grantee = Grantee()
98 | g = grant.find(".//Grantee")
99 | grantee.xsi_type = g.attrib['{http://www.w3.org/2001/XMLSchema-instance}type']
100 | grantee.permission = grant.find('Permission').text
101 | for el in g:
102 | if el.tag == "DisplayName":
103 | grantee.display_name = el.text
104 | else:
105 | grantee.tag = el.tag
106 | grantee.name = el.text
107 | self.grantees.append(grantee)
108 |
109 | def getGrantList(self):
110 | acl = []
111 | for grantee in self.grantees:
112 | if grantee.display_name:
113 | user = grantee.display_name
114 | elif grantee.isAllUsers():
115 | user = "*anon*"
116 | else:
117 | user = grantee.name
118 | acl.append({'grantee': user, 'permission': grantee.permission})
119 | return acl
120 |
121 | def getOwner(self):
122 | return { 'id' : self.owner_id, 'nick' : self.owner_nick }
123 |
124 | def isAnonRead(self):
125 | for grantee in self.grantees:
126 | if grantee.isAnonRead():
127 | return True
128 | return False
129 |
130 | def grantAnonRead(self):
131 | if not self.isAnonRead():
132 | self.appendGrantee(GranteeAnonRead())
133 |
134 | def revokeAnonRead(self):
135 | self.grantees = [g for g in self.grantees if not g.isAnonRead()]
136 |
137 | def appendGrantee(self, grantee):
138 | self.grantees.append(grantee)
139 |
140 | def hasGrant(self, name, permission):
141 | name = name.lower()
142 | permission = permission.upper()
143 |
144 | for grantee in self.grantees:
145 | if grantee.name.lower() == name:
146 | if grantee.permission == "FULL_CONTROL":
147 | return True
148 | elif grantee.permission.upper() == permission:
149 | return True
150 |
151 | return False;
152 |
153 | def grant(self, name, permission):
154 | if self.hasGrant(name, permission):
155 | return
156 |
157 | permission = permission.upper()
158 |
159 | if "ALL" == permission:
160 | permission = "FULL_CONTROL"
161 |
162 | if "FULL_CONTROL" == permission:
163 | self.revoke(name, "ALL")
164 |
165 | grantee = Grantee()
166 | grantee.name = name
167 | grantee.permission = permission
168 |
169 | if '@' in name:
170 | grantee.name = grantee.name.lower()
171 | grantee.xsi_type = "AmazonCustomerByEmail"
172 | grantee.tag = "EmailAddress"
173 | elif 'http://acs.amazonaws.com/groups/' in name:
174 | grantee.xsi_type = "Group"
175 | grantee.tag = "URI"
176 | else:
177 | grantee.name = grantee.name.lower()
178 | grantee.xsi_type = "CanonicalUser"
179 | grantee.tag = "ID"
180 |
181 | self.appendGrantee(grantee)
182 |
183 |
184 | def revoke(self, name, permission):
185 | name = name.lower()
186 | permission = permission.upper()
187 |
188 | if "ALL" == permission:
189 | self.grantees = [g for g in self.grantees if not (g.name.lower() == name or g.display_name.lower() == name)]
190 | else:
191 | self.grantees = [g for g in self.grantees if not ((g.display_name.lower() == name and g.permission.upper() == permission)\
192 | or (g.name.lower() == name and g.permission.upper() == permission))]
193 |
194 | def get_printable_tree(self):
195 | tree = getTreeFromXml(ACL.EMPTY_ACL)
196 | tree.attrib['xmlns'] = "http://s3.amazonaws.com/doc/2006-03-01/"
197 | owner = tree.find(".//Owner//ID")
198 | owner.text = self.owner_id
199 | acl = tree.find(".//AccessControlList")
200 | for grantee in self.grantees:
201 | acl.append(grantee.getElement())
202 | return tree
203 |
204 | def __unicode__(self):
205 | return decode_from_s3(ET.tostring(self.get_printable_tree()))
206 |
207 | def __str__(self):
208 | if PY3:
209 | # Return unicode
210 | return ET.tostring(self.get_printable_tree(), encoding="unicode")
211 | else:
212 | # Return bytes
213 | return ET.tostring(self.get_printable_tree())
214 |
215 | if __name__ == "__main__":
216 | xml = b"""
217 |
218 |
219 | 12345678901234567890
220 | owner-nickname
221 |
222 |
223 |
224 |
225 | 12345678901234567890
226 | owner-nickname
227 |
228 | FULL_CONTROL
229 |
230 |
231 |
232 | http://acs.amazonaws.com/groups/global/AllUsers
233 |
234 | READ
235 |
236 |
237 |
238 | """
239 | acl = ACL(xml)
240 | print("Grants:", acl.getGrantList())
241 | acl.revokeAnonRead()
242 | print("Grants:", acl.getGrantList())
243 | acl.grantAnonRead()
244 | print("Grants:", acl.getGrantList())
245 | print(acl)
246 |
247 | # vim:et:ts=4:sts=4:ai
248 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/build/lib/S3/AccessLog.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | ## Amazon S3 - Access Control List representation
4 | ## Author: Michal Ludvig
5 | ## http://www.logix.cz/michal
6 | ## License: GPL Version 2
7 | ## Copyright: TGRMN Software and contributors
8 |
9 | from __future__ import absolute_import, print_function
10 |
11 | import sys
12 |
13 | from . import S3Uri
14 | from .Exceptions import ParameterError
15 | from .Utils import getTreeFromXml, decode_from_s3
16 | from .ACL import GranteeAnonRead
17 |
18 | try:
19 | import xml.etree.ElementTree as ET
20 | except ImportError:
21 | import elementtree.ElementTree as ET
22 |
23 | PY3 = (sys.version_info >= (3,0))
24 |
25 | __all__ = []
26 | class AccessLog(object):
27 | LOG_DISABLED = ""
28 | LOG_TEMPLATE = ""
29 |
30 | def __init__(self, xml = None):
31 | if not xml:
32 | xml = self.LOG_DISABLED
33 | self.tree = getTreeFromXml(xml)
34 | self.tree.attrib['xmlns'] = "http://doc.s3.amazonaws.com/2006-03-01"
35 |
36 | def isLoggingEnabled(self):
37 | return (self.tree.find(".//LoggingEnabled") is not None)
38 |
39 | def disableLogging(self):
40 | el = self.tree.find(".//LoggingEnabled")
41 | if el:
42 | self.tree.remove(el)
43 |
44 | def enableLogging(self, target_prefix_uri):
45 | el = self.tree.find(".//LoggingEnabled")
46 | if not el:
47 | el = getTreeFromXml(self.LOG_TEMPLATE)
48 | self.tree.append(el)
49 | el.find(".//TargetBucket").text = target_prefix_uri.bucket()
50 | el.find(".//TargetPrefix").text = target_prefix_uri.object()
51 |
52 | def targetPrefix(self):
53 | if self.isLoggingEnabled():
54 | target_prefix = u"s3://%s/%s" % (
55 | self.tree.find(".//LoggingEnabled//TargetBucket").text,
56 | self.tree.find(".//LoggingEnabled//TargetPrefix").text)
57 | return S3Uri.S3Uri(target_prefix)
58 | else:
59 | return ""
60 |
61 | def setAclPublic(self, acl_public):
62 | le = self.tree.find(".//LoggingEnabled")
63 | if le is None:
64 | raise ParameterError("Logging not enabled, can't set default ACL for logs")
65 | tg = le.find(".//TargetGrants")
66 | if not acl_public:
67 | if not tg:
68 | ## All good, it's not been there
69 | return
70 | else:
71 | le.remove(tg)
72 | else: # acl_public == True
73 | anon_read = GranteeAnonRead().getElement()
74 | if not tg:
75 | tg = ET.SubElement(le, "TargetGrants")
76 | ## What if TargetGrants already exists? We should check if
77 | ## AnonRead is there before appending a new one. Later...
78 | tg.append(anon_read)
79 |
80 | def isAclPublic(self):
81 | raise NotImplementedError()
82 |
83 | def __unicode__(self):
84 | return decode_from_s3(ET.tostring(self.tree))
85 |
86 | def __str__(self):
87 | if PY3:
88 | # Return unicode
89 | return ET.tostring(self.tree, encoding="unicode")
90 | else:
91 | # Return bytes
92 | return ET.tostring(self.tree)
93 |
94 | __all__.append("AccessLog")
95 |
96 | if __name__ == "__main__":
97 | log = AccessLog()
98 | print(log)
99 | log.enableLogging(S3Uri.S3Uri(u"s3://targetbucket/prefix/log-"))
100 | print(log)
101 | log.setAclPublic(True)
102 | print(log)
103 | log.setAclPublic(False)
104 | print(log)
105 | log.disableLogging()
106 | print(log)
107 |
108 | # vim:et:ts=4:sts=4:ai
109 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/build/lib/S3/BidirMap.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | ## Amazon S3 manager
4 | ## Author: Michal Ludvig
5 | ## http://www.logix.cz/michal
6 | ## License: GPL Version 2
7 | ## Copyright: TGRMN Software and contributors
8 |
9 | class BidirMap(object):
10 | def __init__(self, **map):
11 | self.k2v = {}
12 | self.v2k = {}
13 | for key in map:
14 | self.__setitem__(key, map[key])
15 |
16 | def __setitem__(self, key, value):
17 | if value in self.v2k:
18 | if self.v2k[value] != key:
19 | raise KeyError("Value '"+str(value)+"' already in use with key '"+str(self.v2k[value])+"'")
20 | try:
21 | del(self.v2k[self.k2v[key]])
22 | except KeyError:
23 | pass
24 | self.k2v[key] = value
25 | self.v2k[value] = key
26 |
27 | def __getitem__(self, key):
28 | return self.k2v[key]
29 |
30 | def __str__(self):
31 | return self.v2k.__str__()
32 |
33 | def getkey(self, value):
34 | return self.v2k[value]
35 |
36 | def getvalue(self, key):
37 | return self.k2v[key]
38 |
39 | def keys(self):
40 | return [key for key in self.k2v]
41 |
42 | def values(self):
43 | return [value for value in self.v2k]
44 |
45 | # vim:et:ts=4:sts=4:ai
46 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/build/lib/S3/Custom_httplib27.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import, print_function
2 |
3 | import os
4 | import httplib
5 |
6 | from httplib import (_CS_REQ_SENT, _CS_REQ_STARTED, CONTINUE, UnknownProtocol,
7 | CannotSendHeader, NO_CONTENT, NOT_MODIFIED, EXPECTATION_FAILED,
8 | HTTPMessage, HTTPException)
9 |
10 | try:
11 | from cStringIO import StringIO
12 | except ImportError:
13 | from StringIO import StringIO
14 |
15 | from .Utils import encode_to_s3
16 |
17 |
18 | _METHODS_EXPECTING_BODY = ['PATCH', 'POST', 'PUT']
19 |
20 | # Fixed python 2.X httplib to be able to support
21 | # Expect: 100-Continue http feature
22 | # Inspired by:
23 | # http://bugs.python.org/file26357/issue1346874-273.patch
24 |
25 | def httpresponse_patched_begin(self):
26 | """ Re-implemented httplib begin function
27 | to not loop over "100 CONTINUE" status replies
28 | but to report it to higher level so it can be processed.
29 | """
30 | if self.msg is not None:
31 | # we've already started reading the response
32 | return
33 |
34 | # read only one status even if we get a non-100 response
35 | version, status, reason = self._read_status()
36 |
37 | self.status = status
38 | self.reason = reason.strip()
39 | if version == 'HTTP/1.0':
40 | self.version = 10
41 | elif version.startswith('HTTP/1.'):
42 | self.version = 11 # use HTTP/1.1 code for HTTP/1.x where x>=1
43 | elif version == 'HTTP/0.9':
44 | self.version = 9
45 | else:
46 | raise UnknownProtocol(version)
47 |
48 | if self.version == 9:
49 | self.length = None
50 | self.chunked = 0
51 | self.will_close = 1
52 | self.msg = HTTPMessage(StringIO())
53 | return
54 |
55 | self.msg = HTTPMessage(self.fp, 0)
56 | if self.debuglevel > 0:
57 | for hdr in self.msg.headers:
58 | print("header:", hdr, end=" ")
59 |
60 | # don't let the msg keep an fp
61 | self.msg.fp = None
62 |
63 | # are we using the chunked-style of transfer encoding?
64 | tr_enc = self.msg.getheader('transfer-encoding')
65 | if tr_enc and tr_enc.lower() == "chunked":
66 | self.chunked = 1
67 | self.chunk_left = None
68 | else:
69 | self.chunked = 0
70 |
71 | # will the connection close at the end of the response?
72 | self.will_close = self._check_close()
73 |
74 | # do we have a Content-Length?
75 | # NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"
76 | length = self.msg.getheader('content-length')
77 | if length and not self.chunked:
78 | try:
79 | self.length = int(length)
80 | except ValueError:
81 | self.length = None
82 | else:
83 | if self.length < 0: # ignore nonsensical negative lengths
84 | self.length = None
85 | else:
86 | self.length = None
87 |
88 | # does the body have a fixed length? (of zero)
89 | if (status == NO_CONTENT or status == NOT_MODIFIED or
90 | 100 <= status < 200 or # 1xx codes
91 | self._method == 'HEAD'):
92 | self.length = 0
93 |
94 | # if the connection remains open, and we aren't using chunked, and
95 | # a content-length was not provided, then assume that the connection
96 | # WILL close.
97 | if not self.will_close and \
98 | not self.chunked and \
99 | self.length is None:
100 | self.will_close = 1
101 |
102 |
103 | def httpconnection_patched_set_content_length(self, body, method):
104 | ## REIMPLEMENTED because new in last httplib but needed by send_request
105 | # Set the content-length based on the body. If the body is "empty", we
106 | # set Content-Length: 0 for methods that expect a body (RFC 7230,
107 | # Section 3.3.2). If the body is set for other methods, we set the
108 | # header provided we can figure out what the length is.
109 | thelen = None
110 | if body is None and method.upper() in _METHODS_EXPECTING_BODY:
111 | thelen = '0'
112 | elif body is not None:
113 | try:
114 | thelen = str(len(body))
115 | except (TypeError, AttributeError):
116 | # If this is a file-like object, try to
117 | # fstat its file descriptor
118 | try:
119 | thelen = str(os.fstat(body.fileno()).st_size)
120 | except (AttributeError, OSError):
121 | # Don't send a length if this failed
122 | if self.debuglevel > 0: print("Cannot stat!!")
123 |
124 | if thelen is not None:
125 | self.putheader('Content-Length', thelen)
126 |
127 | def httpconnection_patched_send_request(self, method, url, body, headers):
128 | # Honor explicitly requested Host: and Accept-Encoding: headers.
129 | header_names = dict.fromkeys([k.lower() for k in headers])
130 | skips = {}
131 | if 'host' in header_names:
132 | skips['skip_host'] = 1
133 | if 'accept-encoding' in header_names:
134 | skips['skip_accept_encoding'] = 1
135 |
136 | expect_continue = False
137 | for hdr, value in headers.iteritems():
138 | if 'expect' == hdr.lower() and '100-continue' in value.lower():
139 | expect_continue = True
140 |
141 | url = encode_to_s3(url)
142 | self.putrequest(method, url, **skips)
143 |
144 | if 'content-length' not in header_names:
145 | self._set_content_length(body, method)
146 | for hdr, value in headers.iteritems():
147 | self.putheader(encode_to_s3(hdr), encode_to_s3(value))
148 |
149 | # If an Expect: 100-continue was sent, we need to check for a 417
150 | # Expectation Failed to avoid unecessarily sending the body
151 | # See RFC 2616 8.2.3
152 | if not expect_continue:
153 | self.endheaders(body)
154 | else:
155 | if not body:
156 | raise HTTPException("A body is required when expecting "
157 | "100-continue")
158 | self.endheaders()
159 | resp = self.getresponse()
160 | resp.read()
161 | self._HTTPConnection__state = _CS_REQ_SENT
162 | if resp.status == EXPECTATION_FAILED:
163 | raise ExpectationFailed()
164 | elif resp.status == CONTINUE:
165 | self.send(body)
166 |
167 | def httpconnection_patched_endheaders(self, message_body=None):
168 | """Indicate that the last header line has been sent to the server.
169 |
170 | This method sends the request to the server. The optional
171 | message_body argument can be used to pass a message body
172 | associated with the request. The message body will be sent in
173 | the same packet as the message headers if it is string, otherwise it is
174 | sent as a separate packet.
175 | """
176 | if self._HTTPConnection__state == _CS_REQ_STARTED:
177 | self._HTTPConnection__state = _CS_REQ_SENT
178 | else:
179 | raise CannotSendHeader()
180 | self._send_output(message_body)
181 |
182 | # TCP Maximum Segment Size (MSS) is determined by the TCP stack on
183 | # a per-connection basis. There is no simple and efficient
184 | # platform independent mechanism for determining the MSS, so
185 | # instead a reasonable estimate is chosen. The getsockopt()
186 | # interface using the TCP_MAXSEG parameter may be a suitable
187 | # approach on some operating systems. A value of 16KiB is chosen
188 | # as a reasonable estimate of the maximum MSS.
189 | mss = 16384
190 |
191 | def httpconnection_patched_send_output(self, message_body=None):
192 | """Send the currently buffered request and clear the buffer.
193 |
194 | Appends an extra \\r\\n to the buffer.
195 | A message_body may be specified, to be appended to the request.
196 | """
197 | self._buffer.extend((b"", b""))
198 | msg = b"\r\n".join(self._buffer)
199 | del self._buffer[:]
200 |
201 | msg = encode_to_s3(msg)
202 | # If msg and message_body are sent in a single send() call,
203 | # it will avoid performance problems caused by the interaction
204 | # between delayed ack and the Nagle algorithm.
205 | if isinstance(message_body, str) and len(message_body) < mss:
206 | msg += message_body
207 | message_body = None
208 | self.send(msg)
209 | if message_body is not None:
210 | #message_body was not a string (i.e. it is a file) and
211 | #we must run the risk of Nagle
212 | self.send(message_body)
213 |
214 |
215 | class ExpectationFailed(HTTPException):
216 | pass
217 |
218 | # Wrappers #
219 |
220 | def httpconnection_patched_wrapper_send_body(self, message_body):
221 | self.send(message_body)
222 |
223 |
224 | httplib.HTTPResponse.begin = httpresponse_patched_begin
225 | httplib.HTTPConnection.endheaders = httpconnection_patched_endheaders
226 | httplib.HTTPConnection._send_output = httpconnection_patched_send_output
227 | httplib.HTTPConnection._set_content_length = httpconnection_patched_set_content_length
228 | httplib.HTTPConnection._send_request = httpconnection_patched_send_request
229 |
230 | # Interfaces added to httplib.HTTPConnection:
231 | httplib.HTTPConnection.wrapper_send_body = httpconnection_patched_wrapper_send_body
232 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/build/lib/S3/Exceptions.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | ## Amazon S3 manager - Exceptions library
4 | ## Author: Michal Ludvig
5 | ## http://www.logix.cz/michal
6 | ## License: GPL Version 2
7 | ## Copyright: TGRMN Software and contributors
8 |
9 | from __future__ import absolute_import
10 |
11 | from logging import debug, error
12 | import sys
13 | import S3.Utils
14 | from . import ExitCodes
15 |
16 | if sys.version_info >= (3,0):
17 | PY3 = True
18 | # In python 3, unicode -> str, and str -> bytes
19 | unicode = str
20 | else:
21 | PY3 = False
22 |
23 |
24 | try:
25 | from xml.etree.ElementTree import ParseError as XmlParseError
26 | except ImportError:
27 | # ParseError was only added in python2.7, before ET was raising ExpatError
28 | from xml.parsers.expat import ExpatError as XmlParseError
29 |
30 | class S3Exception(Exception):
31 | def __init__(self, message = ""):
32 | self.message = S3.Utils.unicodise(message)
33 |
34 | def __str__(self):
35 | ## Don't return self.message directly because
36 | ## __unicode__() method could be overridden in subclasses!
37 | if PY3:
38 | return self.__unicode__()
39 | else:
40 | return S3.Utils.deunicodise(self.__unicode__())
41 |
42 | def __unicode__(self):
43 | return self.message
44 |
45 | ## (Base)Exception.message has been deprecated in Python 2.6
46 | def _get_message(self):
47 | return self._message
48 | def _set_message(self, message):
49 | self._message = message
50 | message = property(_get_message, _set_message)
51 |
52 |
53 | class S3Error (S3Exception):
54 | def __init__(self, response):
55 | self.status = response["status"]
56 | self.reason = response["reason"]
57 | self.info = {
58 | "Code" : "",
59 | "Message" : "",
60 | "Resource" : ""
61 | }
62 | debug("S3Error: %s (%s)" % (self.status, self.reason))
63 | if "headers" in response:
64 | for header in response["headers"]:
65 | debug("HttpHeader: %s: %s" % (header, response["headers"][header]))
66 | if "data" in response and response["data"]:
67 | try:
68 | tree = S3.Utils.getTreeFromXml(response["data"])
69 | except XmlParseError:
70 | debug("Not an XML response")
71 | else:
72 | try:
73 | self.info.update(self.parse_error_xml(tree))
74 | except Exception as e:
75 | error("Error parsing xml: %s. ErrorXML: %s" % (e, response["data"]))
76 |
77 | self.code = self.info["Code"]
78 | self.message = self.info["Message"]
79 | self.resource = self.info["Resource"]
80 |
81 | def __unicode__(self):
82 | retval = u"%d " % (self.status)
83 | retval += (u"(%s)" % ("Code" in self.info and self.info["Code"] or self.reason))
84 | error_msg = self.info.get("Message")
85 | if error_msg:
86 | retval += (u": %s" % error_msg)
87 | return retval
88 |
89 | def get_error_code(self):
90 | if self.status in [301, 307]:
91 | return ExitCodes.EX_SERVERMOVED
92 | elif self.status in [400, 405, 411, 416, 417, 501, 504]:
93 | return ExitCodes.EX_SERVERERROR
94 | elif self.status == 403:
95 | return ExitCodes.EX_ACCESSDENIED
96 | elif self.status == 404:
97 | return ExitCodes.EX_NOTFOUND
98 | elif self.status == 409:
99 | return ExitCodes.EX_CONFLICT
100 | elif self.status == 412:
101 | return ExitCodes.EX_PRECONDITION
102 | elif self.status == 500:
103 | return ExitCodes.EX_SOFTWARE
104 | elif self.status == 503:
105 | return ExitCodes.EX_SERVICE
106 | else:
107 | return ExitCodes.EX_SOFTWARE
108 |
109 | @staticmethod
110 | def parse_error_xml(tree):
111 | info = {}
112 | error_node = tree
113 | if not error_node.tag == "Error":
114 | error_node = tree.find(".//Error")
115 | if error_node is not None:
116 | for child in error_node.getchildren():
117 | if child.text != "":
118 | debug("ErrorXML: " + child.tag + ": " + repr(child.text))
119 | info[child.tag] = child.text
120 | else:
121 | raise S3ResponseError("Malformed error XML returned from remote server.")
122 | return info
123 |
124 |
125 | class CloudFrontError(S3Error):
126 | pass
127 |
128 | class S3UploadError(S3Exception):
129 | pass
130 |
131 | class S3DownloadError(S3Exception):
132 | pass
133 |
134 | class S3RequestError(S3Exception):
135 | pass
136 |
137 | class S3ResponseError(S3Exception):
138 | pass
139 |
140 | class InvalidFileError(S3Exception):
141 | pass
142 |
143 | class ParameterError(S3Exception):
144 | pass
145 |
146 | # vim:et:ts=4:sts=4:ai
147 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/build/lib/S3/ExitCodes.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # patterned on /usr/include/sysexits.h
4 |
5 | EX_OK = 0
6 | EX_GENERAL = 1
7 | EX_PARTIAL = 2 # some parts of the command succeeded, while others failed
8 | EX_SERVERMOVED = 10 # 301: Moved permanantly & 307: Moved temp
9 | EX_SERVERERROR = 11 # 400, 405, 411, 416, 417, 501: Bad request, 504: Gateway Time-out
10 | EX_NOTFOUND = 12 # 404: Not found
11 | EX_CONFLICT = 13 # 409: Conflict (ex: bucket error)
12 | EX_PRECONDITION = 14 # 412: Precondition failed
13 | EX_SERVICE = 15 # 503: Service not available or slow down
14 | EX_USAGE = 64 # The command was used incorrectly (e.g. bad command line syntax)
15 | EX_DATAERR = 65 # Failed file transfer, upload or download
16 | EX_SOFTWARE = 70 # internal software error (e.g. S3 error of unknown specificity)
17 | EX_OSERR = 71 # system error (e.g. out of memory)
18 | EX_OSFILE = 72 # OS error (e.g. invalid Python version)
19 | EX_IOERR = 74 # An error occurred while doing I/O on some file.
20 | EX_TEMPFAIL = 75 # temporary failure (S3DownloadError or similar, retry later)
21 | EX_ACCESSDENIED = 77 # Insufficient permissions to perform the operation on S3
22 | EX_CONFIG = 78 # Configuration file error
23 | _EX_SIGNAL = 128
24 | _EX_SIGINT = 2
25 | EX_BREAK = _EX_SIGNAL + _EX_SIGINT # Control-C (KeyboardInterrupt raised)
26 |
27 | class ExitScoreboard(object):
28 | """Helper to return best return code"""
29 | def __init__(self):
30 | self._success = 0
31 | self._notfound = 0
32 | self._failed = 0
33 |
34 | def success(self):
35 | self._success += 1
36 |
37 | def notfound(self):
38 | self._notfound += 1
39 |
40 | def failed(self):
41 | self._failed += 1
42 |
43 | def rc(self):
44 | if self._success:
45 | if not self._failed and not self._notfound:
46 | return EX_OK
47 | elif self._failed:
48 | return EX_PARTIAL
49 | else:
50 | if self._failed:
51 | return EX_GENERAL
52 | else:
53 | if self._notfound:
54 | return EX_NOTFOUND
55 | return EX_GENERAL
56 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/build/lib/S3/FileDict.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | ## Amazon S3 manager
4 | ## Author: Michal Ludvig
5 | ## http://www.logix.cz/michal
6 | ## License: GPL Version 2
7 | ## Copyright: TGRMN Software and contributors
8 |
9 | from __future__ import absolute_import
10 |
11 | import logging
12 | from .SortedDict import SortedDict
13 | from . import Utils
14 | from . import Config
15 |
16 | zero_length_md5 = "d41d8cd98f00b204e9800998ecf8427e"
17 | cfg = Config.Config()
18 |
19 | class FileDict(SortedDict):
20 | def __init__(self, mapping = None, ignore_case = True, **kwargs):
21 | SortedDict.__init__(self, mapping = mapping or {}, ignore_case = ignore_case, **kwargs)
22 | self.hardlinks_md5 = dict() # { dev: { inode : {'md5':, 'relative_files':}}}
23 | self.by_md5 = dict() # {md5: set(relative_files)}
24 |
25 | def record_md5(self, relative_file, md5):
26 | if not relative_file:
27 | return
28 | if md5 is None:
29 | return
30 | if md5 == zero_length_md5:
31 | return
32 | if md5 not in self.by_md5:
33 | self.by_md5[md5] = relative_file
34 |
35 | def find_md5_one(self, md5):
36 | if not md5:
37 | return None
38 | return self.by_md5.get(md5, None)
39 |
40 | def get_md5(self, relative_file):
41 | """returns md5 if it can, or raises IOError if file is unreadable"""
42 | md5 = None
43 | if 'md5' in self[relative_file]:
44 | return self[relative_file]['md5']
45 | md5 = self.get_hardlink_md5(relative_file)
46 | if md5 is None and 'md5' in cfg.sync_checks:
47 | logging.debug(u"doing file I/O to read md5 of %s" % relative_file)
48 | md5 = Utils.hash_file_md5(self[relative_file]['full_name'])
49 | self.record_md5(relative_file, md5)
50 | self[relative_file]['md5'] = md5
51 | return md5
52 |
53 | def record_hardlink(self, relative_file, dev, inode, md5, size):
54 | if md5 is None:
55 | return
56 | if size == 0:
57 | # don't record 0-length files
58 | return
59 | if dev == 0 or inode == 0:
60 | # Windows
61 | return
62 | if dev not in self.hardlinks_md5:
63 | self.hardlinks_md5[dev] = dict()
64 | if inode not in self.hardlinks_md5[dev]:
65 | self.hardlinks_md5[dev][inode] = md5
66 |
67 | def get_hardlink_md5(self, relative_file):
68 | try:
69 | dev = self[relative_file]['dev']
70 | inode = self[relative_file]['inode']
71 | md5 = self.hardlinks_md5[dev][inode]
72 | except KeyError:
73 | md5 = None
74 | return md5
75 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/build/lib/S3/HashCache.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from __future__ import absolute_import
4 |
5 | try:
6 | # python 3 support
7 | import cPickle as pickle
8 | except ImportError:
9 | import pickle
10 | from .Utils import deunicodise
11 |
12 | class HashCache(object):
13 | def __init__(self):
14 | self.inodes = dict()
15 |
16 | def add(self, dev, inode, mtime, size, md5):
17 | if dev == 0 or inode == 0: return # Windows
18 | if dev not in self.inodes:
19 | self.inodes[dev] = dict()
20 | if inode not in self.inodes[dev]:
21 | self.inodes[dev][inode] = dict()
22 | self.inodes[dev][inode][mtime] = dict(md5=md5, size=size)
23 |
24 | def md5(self, dev, inode, mtime, size):
25 | try:
26 | d = self.inodes[dev][inode][mtime]
27 | if d['size'] != size:
28 | return None
29 | except:
30 | return None
31 | return d['md5']
32 |
33 | def mark_all_for_purge(self):
34 | for d in self.inodes.keys():
35 | for i in self.inodes[d].keys():
36 | for c in self.inodes[d][i].keys():
37 | self.inodes[d][i][c]['purge'] = True
38 |
39 | def unmark_for_purge(self, dev, inode, mtime, size):
40 | try:
41 | d = self.inodes[dev][inode][mtime]
42 | except KeyError:
43 | return
44 | if d['size'] == size and 'purge' in d:
45 | del self.inodes[dev][inode][mtime]['purge']
46 |
47 | def purge(self):
48 | for d in self.inodes.keys():
49 | for i in self.inodes[d].keys():
50 | for m in self.inodes[d][i].keys():
51 | if 'purge' in self.inodes[d][i][m]:
52 | del self.inodes[d][i]
53 | break
54 |
55 | def save(self, f):
56 | d = dict(inodes=self.inodes, version=1)
57 | with open(deunicodise(f), 'wb') as fp:
58 | pickle.dump(d, fp)
59 |
60 | def load(self, f):
61 | with open(deunicodise(f), 'rb') as fp:
62 | d = pickle.load(fp)
63 | if d.get('version') == 1 and 'inodes' in d:
64 | self.inodes = d['inodes']
65 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/build/lib/S3/MultiPart.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | ## Amazon S3 Multipart upload support
4 | ## Author: Jerome Leclanche
5 | ## License: GPL Version 2
6 |
7 | from __future__ import absolute_import
8 |
9 | import os
10 | import sys
11 | from stat import ST_SIZE
12 | from logging import debug, info, warning, error
13 | from .Utils import getTextFromXml, getTreeFromXml, formatSize, unicodise, deunicodise, calculateChecksum, parseNodes, encode_to_s3
14 |
15 | class MultiPartUpload(object):
16 |
17 | MIN_CHUNK_SIZE_MB = 5 # 5MB
18 | MAX_CHUNK_SIZE_MB = 5120 # 5GB
19 | MAX_FILE_SIZE = 42949672960 # 5TB
20 |
21 | def __init__(self, s3, file_stream, uri, headers_baseline=None):
22 | self.s3 = s3
23 | self.file_stream = file_stream
24 | self.uri = uri
25 | self.parts = {}
26 | self.headers_baseline = headers_baseline or {}
27 | self.upload_id = self.initiate_multipart_upload()
28 |
29 | def get_parts_information(self, uri, upload_id):
30 | multipart_response = self.s3.list_multipart(uri, upload_id)
31 | tree = getTreeFromXml(multipart_response['data'])
32 |
33 | parts = dict()
34 | for elem in parseNodes(tree):
35 | try:
36 | parts[int(elem['PartNumber'])] = {'checksum': elem['ETag'], 'size': elem['Size']}
37 | except KeyError:
38 | pass
39 |
40 | return parts
41 |
42 | def get_unique_upload_id(self, uri):
43 | upload_id = None
44 | multipart_response = self.s3.get_multipart(uri)
45 | tree = getTreeFromXml(multipart_response['data'])
46 | for mpupload in parseNodes(tree):
47 | try:
48 | mp_upload_id = mpupload['UploadId']
49 | mp_path = mpupload['Key']
50 | info("mp_path: %s, object: %s" % (mp_path, uri.object()))
51 | if mp_path == uri.object():
52 | if upload_id is not None:
53 | raise ValueError("More than one UploadId for URI %s. Disable multipart upload, or use\n %s multipart %s\nto list the Ids, then pass a unique --upload-id into the put command." % (uri, sys.argv[0], uri))
54 | upload_id = mp_upload_id
55 | except KeyError:
56 | pass
57 |
58 | return upload_id
59 |
60 | def initiate_multipart_upload(self):
61 | """
62 | Begin a multipart upload
63 | http://docs.amazonwebservices.com/AmazonS3/latest/API/index.html?mpUploadInitiate.html
64 | """
65 | if self.s3.config.upload_id is not None:
66 | self.upload_id = self.s3.config.upload_id
67 | elif self.s3.config.put_continue:
68 | self.upload_id = self.get_unique_upload_id(self.uri)
69 | else:
70 | self.upload_id = None
71 |
72 | if self.upload_id is None:
73 | request = self.s3.create_request("OBJECT_POST", uri = self.uri,
74 | headers = self.headers_baseline,
75 | uri_params = {'uploads': None})
76 | response = self.s3.send_request(request)
77 | data = response["data"]
78 | self.upload_id = getTextFromXml(data, "UploadId")
79 |
80 | return self.upload_id
81 |
82 | def upload_all_parts(self, extra_label=''):
83 | """
84 | Execute a full multipart upload on a file
85 | Returns the seq/etag dict
86 | TODO use num_processes to thread it
87 | """
88 | if not self.upload_id:
89 | raise RuntimeError("Attempting to use a multipart upload that has not been initiated.")
90 |
91 | self.chunk_size = self.s3.config.multipart_chunk_size_mb * 1024 * 1024
92 | filename = self.file_stream.stream_name
93 |
94 | if filename != u"":
95 | size_left = file_size = os.stat(deunicodise(filename))[ST_SIZE]
96 | nr_parts = file_size // self.chunk_size + (file_size % self.chunk_size and 1)
97 | debug("MultiPart: Uploading %s in %d parts" % (filename, nr_parts))
98 | else:
99 | debug("MultiPart: Uploading from %s" % filename)
100 |
101 | remote_statuses = dict()
102 | if self.s3.config.put_continue:
103 | remote_statuses = self.get_parts_information(self.uri, self.upload_id)
104 |
105 | if extra_label:
106 | extra_label = u' ' + extra_label
107 | seq = 1
108 | if filename != u"":
109 | while size_left > 0:
110 | offset = self.chunk_size * (seq - 1)
111 | current_chunk_size = min(file_size - offset, self.chunk_size)
112 | size_left -= current_chunk_size
113 | labels = {
114 | 'source' : filename,
115 | 'destination' : self.uri.uri(),
116 | 'extra' : "[part %d of %d, %s]%s" % (seq, nr_parts, "%d%sB" % formatSize(current_chunk_size, human_readable = True), extra_label)
117 | }
118 | try:
119 | self.upload_part(seq, offset, current_chunk_size, labels, remote_status = remote_statuses.get(seq))
120 | except:
121 | error(u"\nUpload of '%s' part %d failed. Use\n %s abortmp %s %s\nto abort the upload, or\n %s --upload-id %s put ...\nto continue the upload."
122 | % (filename, seq, sys.argv[0], self.uri, self.upload_id, sys.argv[0], self.upload_id))
123 | raise
124 | seq += 1
125 | else:
126 | while True:
127 | buffer = self.file_stream.read(self.chunk_size)
128 | offset = 0 # send from start of the buffer
129 | current_chunk_size = len(buffer)
130 | labels = {
131 | 'source' : filename,
132 | 'destination' : self.uri.uri(),
133 | 'extra' : "[part %d, %s]" % (seq, "%d%sB" % formatSize(current_chunk_size, human_readable = True))
134 | }
135 | if len(buffer) == 0: # EOF
136 | break
137 | try:
138 | self.upload_part(seq, offset, current_chunk_size, labels, buffer, remote_status = remote_statuses.get(seq))
139 | except:
140 | error(u"\nUpload of '%s' part %d failed. Use\n %s abortmp %s %s\nto abort, or\n %s --upload-id %s put ...\nto continue the upload."
141 | % (filename, seq, sys.argv[0], self.uri, self.upload_id, sys.argv[0], self.upload_id))
142 | raise
143 | seq += 1
144 |
145 | debug("MultiPart: Upload finished: %d parts", seq - 1)
146 |
147 | def upload_part(self, seq, offset, chunk_size, labels, buffer = '', remote_status = None):
148 | """
149 | Upload a file chunk
150 | http://docs.amazonwebservices.com/AmazonS3/latest/API/index.html?mpUploadUploadPart.html
151 | """
152 | # TODO implement Content-MD5
153 | debug("Uploading part %i of %r (%s bytes)" % (seq, self.upload_id, chunk_size))
154 |
155 | if remote_status is not None:
156 | if int(remote_status['size']) == chunk_size:
157 | checksum = calculateChecksum(buffer, self.file_stream, offset, chunk_size, self.s3.config.send_chunk)
158 | remote_checksum = remote_status['checksum'].strip('"\'')
159 | if remote_checksum == checksum:
160 | warning("MultiPart: size and md5sum match for %s part %d, skipping." % (self.uri, seq))
161 | self.parts[seq] = remote_status['checksum']
162 | return
163 | else:
164 | warning("MultiPart: checksum (%s vs %s) does not match for %s part %d, reuploading."
165 | % (remote_checksum, checksum, self.uri, seq))
166 | else:
167 | warning("MultiPart: size (%d vs %d) does not match for %s part %d, reuploading."
168 | % (int(remote_status['size']), chunk_size, self.uri, seq))
169 |
170 | headers = { "content-length": str(chunk_size) }
171 | query_string_params = {'partNumber':'%s' % seq,
172 | 'uploadId': self.upload_id}
173 | request = self.s3.create_request("OBJECT_PUT", uri = self.uri,
174 | headers = headers,
175 | uri_params = query_string_params)
176 | response = self.s3.send_file(request, self.file_stream, labels, buffer, offset = offset, chunk_size = chunk_size)
177 | self.parts[seq] = response["headers"].get('etag', '').strip('"\'')
178 | return response
179 |
180 | def complete_multipart_upload(self):
181 | """
182 | Finish a multipart upload
183 | http://docs.amazonwebservices.com/AmazonS3/latest/API/index.html?mpUploadComplete.html
184 | """
185 | debug("MultiPart: Completing upload: %s" % self.upload_id)
186 |
187 | parts_xml = []
188 | part_xml = "%i%s"
189 | for seq, etag in self.parts.items():
190 | parts_xml.append(part_xml % (seq, etag))
191 | body = "%s" % ("".join(parts_xml))
192 |
193 | headers = { "content-length": str(len(body)) }
194 | request = self.s3.create_request("OBJECT_POST", uri = self.uri,
195 | headers = headers, body = body,
196 | uri_params = {'uploadId': self.upload_id})
197 | response = self.s3.send_request(request)
198 |
199 | return response
200 |
201 | def abort_upload(self):
202 | """
203 | Abort multipart upload
204 | http://docs.amazonwebservices.com/AmazonS3/latest/API/index.html?mpUploadAbort.html
205 | """
206 | debug("MultiPart: Aborting upload: %s" % self.upload_id)
207 | #request = self.s3.create_request("OBJECT_DELETE", uri = self.uri,
208 | # uri_params = {'uploadId': self.upload_id})
209 | #response = self.s3.send_request(request)
210 | response = None
211 | return response
212 |
213 | # vim:et:ts=4:sts=4:ai
214 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/build/lib/S3/PkgInfo.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | ## Amazon S3 manager
4 | ## Author: Michal Ludvig
5 | ## http://www.logix.cz/michal
6 | ## License: GPL Version 2
7 | ## Copyright: TGRMN Software and contributors
8 |
9 | package = "s3cmd"
10 | version = "2.0.1"
11 | url = "http://s3tools.org"
12 | license = "GNU GPL v2+"
13 | short_description = "Command line tool for managing Amazon S3 and CloudFront services"
14 | long_description = """
15 | S3cmd lets you copy files from/to Amazon S3
16 | (Simple Storage Service) using a simple to use
17 | command line client. Supports rsync-like backup,
18 | GPG encryption, and more. Also supports management
19 | of Amazon's CloudFront content delivery network.
20 | """
21 |
22 | # vim:et:ts=4:sts=4:ai
23 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/build/lib/S3/Progress.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | ## Amazon S3 manager
4 | ## Author: Michal Ludvig
5 | ## http://www.logix.cz/michal
6 | ## License: GPL Version 2
7 | ## Copyright: TGRMN Software and contributors
8 |
9 | from __future__ import absolute_import, division
10 |
11 | import sys
12 | import datetime
13 | import time
14 | import S3.Utils
15 |
16 | class Progress(object):
17 | _stdout = sys.stdout
18 | _last_display = 0
19 |
20 | def __init__(self, labels, total_size):
21 | self._stdout = sys.stdout
22 | self.new_file(labels, total_size)
23 |
24 | def new_file(self, labels, total_size):
25 | self.labels = labels
26 | self.total_size = total_size
27 | # Set initial_position to something in the
28 | # case we're not counting from 0. For instance
29 | # when appending to a partially downloaded file.
30 | # Setting initial_position will let the speed
31 | # be computed right.
32 | self.initial_position = 0
33 | self.current_position = self.initial_position
34 | self.time_start = datetime.datetime.now()
35 | self.time_last = self.time_start
36 | self.time_current = self.time_start
37 |
38 | self.display(new_file = True)
39 |
40 | def update(self, current_position = -1, delta_position = -1):
41 | self.time_last = self.time_current
42 | self.time_current = datetime.datetime.now()
43 | if current_position > -1:
44 | self.current_position = current_position
45 | elif delta_position > -1:
46 | self.current_position += delta_position
47 | #else:
48 | # no update, just call display()
49 | self.display()
50 |
51 | def done(self, message):
52 | self.display(done_message = message)
53 |
54 | def output_labels(self):
55 | self._stdout.write(u"%(action)s: '%(source)s' -> '%(destination)s' %(extra)s\n" % self.labels)
56 | self._stdout.flush()
57 |
58 | def _display_needed(self):
59 | # We only need to update the display every so often.
60 | if time.time() - self._last_display > 1:
61 | self._last_display = time.time()
62 | return True
63 | return False
64 |
65 | def display(self, new_file = False, done_message = None):
66 | """
67 | display(new_file = False[/True], done = False[/True])
68 |
69 | Override this method to provide a nicer output.
70 | """
71 | if new_file:
72 | self.output_labels()
73 | self.last_milestone = 0
74 | return
75 |
76 | if self.current_position == self.total_size:
77 | print_size = S3.Utils.formatSize(self.current_position, True)
78 | if print_size[1] != "": print_size[1] += "B"
79 | timedelta = self.time_current - self.time_start
80 | sec_elapsed = timedelta.days * 86400 + timedelta.seconds + float(timedelta.microseconds) / 1000000.0
81 | print_speed = S3.Utils.formatSize((self.current_position - self.initial_position) / sec_elapsed, True, True)
82 | self._stdout.write("100%% %s%s in %.2fs (%.2f %sB/s)\n" %
83 | (print_size[0], print_size[1], sec_elapsed, print_speed[0], print_speed[1]))
84 | self._stdout.flush()
85 | return
86 |
87 | rel_position = (self.current_position * 100) // self.total_size
88 | if rel_position >= self.last_milestone:
89 | # Move by increments of 5.
90 | # NOTE: to check: Looks like to not do what is looks like to be designed to do
91 | self.last_milestone = (rel_position // 5) * 5
92 | self._stdout.write("%d%% ", self.last_milestone)
93 | self._stdout.flush()
94 | return
95 |
96 | class ProgressANSI(Progress):
97 | ## http://en.wikipedia.org/wiki/ANSI_escape_code
98 | SCI = '\x1b['
99 | ANSI_hide_cursor = SCI + "?25l"
100 | ANSI_show_cursor = SCI + "?25h"
101 | ANSI_save_cursor_pos = SCI + "s"
102 | ANSI_restore_cursor_pos = SCI + "u"
103 | ANSI_move_cursor_to_column = SCI + "%uG"
104 | ANSI_erase_to_eol = SCI + "0K"
105 | ANSI_erase_current_line = SCI + "2K"
106 |
107 | def display(self, new_file = False, done_message = None):
108 | """
109 | display(new_file = False[/True], done_message = None)
110 | """
111 | if new_file:
112 | self.output_labels()
113 | self._stdout.write(self.ANSI_save_cursor_pos)
114 | self._stdout.flush()
115 | return
116 |
117 | # Only display progress every so often
118 | if not (new_file or done_message) and not self._display_needed():
119 | return
120 |
121 | timedelta = self.time_current - self.time_start
122 | sec_elapsed = timedelta.days * 86400 + timedelta.seconds + float(timedelta.microseconds)/1000000.0
123 | if (sec_elapsed > 0):
124 | print_speed = S3.Utils.formatSize((self.current_position - self.initial_position) / sec_elapsed, True, True)
125 | else:
126 | print_speed = (0, "")
127 | self._stdout.write(self.ANSI_restore_cursor_pos)
128 | self._stdout.write(self.ANSI_erase_to_eol)
129 | self._stdout.write("%(current)s of %(total)s %(percent)3d%% in %(elapsed)ds %(speed).2f %(speed_coeff)sB/s" % {
130 | "current" : str(self.current_position).rjust(len(str(self.total_size))),
131 | "total" : self.total_size,
132 | "percent" : self.total_size and ((self.current_position * 100) // self.total_size) or 0,
133 | "elapsed" : sec_elapsed,
134 | "speed" : print_speed[0],
135 | "speed_coeff" : print_speed[1]
136 | })
137 |
138 | if done_message:
139 | self._stdout.write(" %s\n" % done_message)
140 |
141 | self._stdout.flush()
142 |
143 | class ProgressCR(Progress):
144 | ## Uses CR char (Carriage Return) just like other progress bars do.
145 | CR_char = chr(13)
146 |
147 | def display(self, new_file = False, done_message = None):
148 | """
149 | display(new_file = False[/True], done_message = None)
150 | """
151 | if new_file:
152 | self.output_labels()
153 | return
154 |
155 | # Only display progress every so often
156 | if not (new_file or done_message) and not self._display_needed():
157 | return
158 |
159 | timedelta = self.time_current - self.time_start
160 | sec_elapsed = timedelta.days * 86400 + timedelta.seconds + float(timedelta.microseconds)/1000000.0
161 | if (sec_elapsed > 0):
162 | print_speed = S3.Utils.formatSize((self.current_position - self.initial_position) / sec_elapsed, True, True)
163 | else:
164 | print_speed = (0, "")
165 | self._stdout.write(self.CR_char)
166 | output = " %(current)s of %(total)s %(percent)3d%% in %(elapsed)4ds %(speed)7.2f %(speed_coeff)sB/s" % {
167 | "current" : str(self.current_position).rjust(len(str(self.total_size))),
168 | "total" : self.total_size,
169 | "percent" : self.total_size and ((self.current_position * 100) // self.total_size) or 0,
170 | "elapsed" : sec_elapsed,
171 | "speed" : print_speed[0],
172 | "speed_coeff" : print_speed[1]
173 | }
174 | self._stdout.write(output)
175 | if done_message:
176 | self._stdout.write(" %s\n" % done_message)
177 |
178 | self._stdout.flush()
179 |
180 | class StatsInfo(object):
181 | """Holding info for stats totals"""
182 | def __init__(self):
183 | self.files = None
184 | self.size = None
185 | self.files_transferred = None
186 | self.size_transferred = None
187 | self.files_copied = None
188 | self.size_copied = None
189 | self.files_deleted = None
190 | self.size_deleted = None
191 |
192 | def format_output(self):
193 | outstr = u""
194 | if self.files is not None:
195 | tmp_str = u"Number of files: %d"% self.files
196 | if self.size is not None:
197 | tmp_str += " (%d bytes) "% self.size
198 | outstr += u"\nStats: " + tmp_str
199 |
200 | if self.files_transferred:
201 | tmp_str = u"Number of files transferred: %d"% self.files_transferred
202 | if self.size_transferred is not None:
203 | tmp_str += " (%d bytes) "% self.size_transferred
204 | outstr += u"\nStats: " + tmp_str
205 |
206 | if self.files_copied:
207 | tmp_str = u"Number of files copied: %d"% self.files_copied
208 | if self.size_copied is not None:
209 | tmp_str += " (%d bytes) "% self.size_copied
210 | outstr += u"\nStats: " + tmp_str
211 |
212 | if self.files_deleted:
213 | tmp_str = u"Number of files deleted: %d"% self.files_deleted
214 | if self.size_deleted is not None:
215 | tmp_str += " (%d bytes) "% self.size_deleted
216 | outstr += u"\nStats: " + tmp_str
217 |
218 | return outstr
219 |
220 | # vim:et:ts=4:sts=4:ai
221 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/build/lib/S3/S3Uri.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | ## Amazon S3 manager
4 | ## Author: Michal Ludvig
5 | ## http://www.logix.cz/michal
6 | ## License: GPL Version 2
7 | ## Copyright: TGRMN Software and contributors
8 |
9 | from __future__ import absolute_import, print_function
10 |
11 | import os
12 | import re
13 | import sys
14 | from .Utils import unicodise, deunicodise, check_bucket_name_dns_support
15 | from . import Config
16 |
17 | if sys.version_info >= (3,0):
18 | PY3 = True
19 | else:
20 | PY3 = False
21 |
22 | class S3Uri(object):
23 | type = None
24 | _subclasses = None
25 |
26 | def __new__(self, string):
27 | if not self._subclasses:
28 | ## Generate a list of all subclasses of S3Uri
29 | self._subclasses = []
30 | dict = sys.modules[__name__].__dict__
31 | for something in dict:
32 | if type(dict[something]) is not type(self):
33 | continue
34 | if issubclass(dict[something], self) and dict[something] != self:
35 | self._subclasses.append(dict[something])
36 | for subclass in self._subclasses:
37 | try:
38 | instance = object.__new__(subclass)
39 | instance.__init__(string)
40 | return instance
41 | except ValueError:
42 | continue
43 | raise ValueError("%s: not a recognized URI" % string)
44 |
45 | def __str__(self):
46 | if PY3:
47 | return self.uri()
48 | else:
49 | return deunicodise(self.uri())
50 |
51 | def __unicode__(self):
52 | return self.uri()
53 |
54 | def __repr__(self):
55 | return repr("<%s: %s>" % (self.__class__.__name__, self.__unicode__()))
56 |
57 | def public_url(self):
58 | raise ValueError("This S3 URI does not have Anonymous URL representation")
59 |
60 | def basename(self):
61 | return self.__unicode__().split("/")[-1]
62 |
63 | class S3UriS3(S3Uri):
64 | type = "s3"
65 | _re = re.compile("^s3:///*([^/]*)/?(.*)", re.IGNORECASE | re.UNICODE)
66 | def __init__(self, string):
67 | match = self._re.match(string)
68 | if not match:
69 | raise ValueError("%s: not a S3 URI" % string)
70 | groups = match.groups()
71 | self._bucket = groups[0]
72 | self._object = groups[1]
73 |
74 | def bucket(self):
75 | return self._bucket
76 |
77 | def object(self):
78 | return self._object
79 |
80 | def has_bucket(self):
81 | return bool(self._bucket)
82 |
83 | def has_object(self):
84 | return bool(self._object)
85 |
86 | def uri(self):
87 | return u"/".join([u"s3:/", self._bucket, self._object])
88 |
89 | def is_dns_compatible(self):
90 | return check_bucket_name_dns_support(Config.Config().host_bucket, self._bucket)
91 |
92 | def public_url(self):
93 | if self.is_dns_compatible():
94 | return "http://%s.%s/%s" % (self._bucket, Config.Config().host_base, self._object)
95 | else:
96 | return "http://%s/%s/%s" % (Config.Config().host_base, self._bucket, self._object)
97 |
98 | def host_name(self):
99 | if self.is_dns_compatible():
100 | return "%s.s3.amazonaws.com" % (self._bucket)
101 | else:
102 | return "s3.amazonaws.com"
103 |
104 | @staticmethod
105 | def compose_uri(bucket, object = ""):
106 | return u"s3://%s/%s" % (bucket, object)
107 |
108 | @staticmethod
109 | def httpurl_to_s3uri(http_url):
110 | m=re.match("(https?://)?([^/]+)/?(.*)", http_url, re.IGNORECASE | re.UNICODE)
111 | hostname, object = m.groups()[1:]
112 | hostname = hostname.lower()
113 | if hostname == "s3.amazonaws.com":
114 | ## old-style url: http://s3.amazonaws.com/bucket/object
115 | if object.count("/") == 0:
116 | ## no object given
117 | bucket = object
118 | object = ""
119 | else:
120 | ## bucket/object
121 | bucket, object = object.split("/", 1)
122 | elif hostname.endswith(".s3.amazonaws.com"):
123 | ## new-style url: http://bucket.s3.amazonaws.com/object
124 | bucket = hostname[:-(len(".s3.amazonaws.com"))]
125 | else:
126 | raise ValueError("Unable to parse URL: %s" % http_url)
127 | return S3Uri(u"s3://%(bucket)s/%(object)s" % {
128 | 'bucket' : bucket,
129 | 'object' : object })
130 |
131 | class S3UriS3FS(S3Uri):
132 | type = "s3fs"
133 | _re = re.compile("^s3fs:///*([^/]*)/?(.*)", re.IGNORECASE | re.UNICODE)
134 | def __init__(self, string):
135 | match = self._re.match(string)
136 | if not match:
137 | raise ValueError("%s: not a S3fs URI" % string)
138 | groups = match.groups()
139 | self._fsname = groups[0]
140 | self._path = groups[1].split("/")
141 |
142 | def fsname(self):
143 | return self._fsname
144 |
145 | def path(self):
146 | return "/".join(self._path)
147 |
148 | def uri(self):
149 | return u"/".join([u"s3fs:/", self._fsname, self.path()])
150 |
151 | class S3UriFile(S3Uri):
152 | type = "file"
153 | _re = re.compile("^(\w+://)?(.*)", re.UNICODE)
154 | def __init__(self, string):
155 | match = self._re.match(string)
156 | groups = match.groups()
157 | if groups[0] not in (None, "file://"):
158 | raise ValueError("%s: not a file:// URI" % string)
159 | if groups[0] is None:
160 | self._path = groups[1].split(os.sep)
161 | else:
162 | self._path = groups[1].split("/")
163 |
164 | def path(self):
165 | return os.sep.join(self._path)
166 |
167 | def uri(self):
168 | return u"/".join([u"file:/"]+self._path)
169 |
170 | def isdir(self):
171 | return os.path.isdir(deunicodise(self.path()))
172 |
173 | def dirname(self):
174 | return unicodise(os.path.dirname(deunicodise(self.path())))
175 |
176 | def basename(self):
177 | return unicodise(os.path.basename(deunicodise(self.path())))
178 |
179 | class S3UriCloudFront(S3Uri):
180 | type = "cf"
181 | _re = re.compile("^cf://([^/]*)/*(.*)", re.IGNORECASE | re.UNICODE)
182 | def __init__(self, string):
183 | match = self._re.match(string)
184 | if not match:
185 | raise ValueError("%s: not a CloudFront URI" % string)
186 | groups = match.groups()
187 | self._dist_id = groups[0]
188 | self._request_id = groups[1] != "/" and groups[1] or None
189 |
190 | def dist_id(self):
191 | return self._dist_id
192 |
193 | def request_id(self):
194 | return self._request_id
195 |
196 | def uri(self):
197 | uri = u"cf://" + self.dist_id()
198 | if self.request_id():
199 | uri += u"/" + self.request_id()
200 | return uri
201 |
202 | if __name__ == "__main__":
203 | uri = S3Uri("s3://bucket/object")
204 | print("type() =", type(uri))
205 | print("uri =", uri)
206 | print("uri.type=", uri.type)
207 | print("bucket =", uri.bucket())
208 | print("object =", uri.object())
209 | print()
210 |
211 | uri = S3Uri("s3://bucket")
212 | print("type() =", type(uri))
213 | print("uri =", uri)
214 | print("uri.type=", uri.type)
215 | print("bucket =", uri.bucket())
216 | print()
217 |
218 | uri = S3Uri("s3fs://filesystem1/path/to/remote/file.txt")
219 | print("type() =", type(uri))
220 | print("uri =", uri)
221 | print("uri.type=", uri.type)
222 | print("path =", uri.path())
223 | print()
224 |
225 | uri = S3Uri("/path/to/local/file.txt")
226 | print("type() =", type(uri))
227 | print("uri =", uri)
228 | print("uri.type=", uri.type)
229 | print("path =", uri.path())
230 | print()
231 |
232 | uri = S3Uri("cf://1234567890ABCD/")
233 | print("type() =", type(uri))
234 | print("uri =", uri)
235 | print("uri.type=", uri.type)
236 | print("dist_id =", uri.dist_id())
237 | print()
238 |
239 | # vim:et:ts=4:sts=4:ai
240 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/build/lib/S3/SortedDict.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | ## Amazon S3 manager
4 | ## Author: Michal Ludvig
5 | ## http://www.logix.cz/michal
6 | ## License: GPL Version 2
7 | ## Copyright: TGRMN Software and contributors
8 |
9 | from __future__ import absolute_import, print_function
10 |
11 | from .BidirMap import BidirMap
12 |
13 | class SortedDictIterator(object):
14 | def __init__(self, sorted_dict, keys):
15 | self.sorted_dict = sorted_dict
16 | self.keys = keys
17 |
18 | def __next__(self):
19 | try:
20 | return self.keys.pop(0)
21 | except IndexError:
22 | raise StopIteration
23 |
24 | next = __next__
25 |
26 | class SortedDict(dict):
27 | def __init__(self, mapping = {}, ignore_case = True, **kwargs):
28 | """
29 | WARNING: SortedDict() with ignore_case==True will
30 | drop entries differing only in capitalisation!
31 | Eg: SortedDict({'auckland':1, 'Auckland':2}).keys() => ['Auckland']
32 | With ignore_case==False it's all right
33 | """
34 | dict.__init__(self, mapping, **kwargs)
35 | self.ignore_case = ignore_case
36 |
37 | def keys(self):
38 | # TODO fix
39 | # Probably not anymore memory efficient on python2
40 | # as now 2 copies ok keys to sort them.
41 | keys = dict.keys(self)
42 | if self.ignore_case:
43 | # Translation map
44 | xlat_map = BidirMap()
45 | for key in keys:
46 | xlat_map[key.lower()] = key
47 | # Lowercase keys
48 | lc_keys = sorted(xlat_map.keys())
49 | return [xlat_map[k] for k in lc_keys]
50 | else:
51 | keys = sorted(keys)
52 | return keys
53 |
54 | def __iter__(self):
55 | return SortedDictIterator(self, self.keys())
56 |
57 | def __getitem__(self, index):
58 | """Override to support the "get_slice" for python3 """
59 | if isinstance(index, slice):
60 | r = SortedDict(ignore_case = self.ignore_case)
61 | for k in self.keys()[index]:
62 | r[k] = self[k]
63 | else:
64 | r = super(SortedDict, self).__getitem__(index)
65 | return r
66 |
67 |
68 | if __name__ == "__main__":
69 | d = { 'AWS' : 1, 'Action' : 2, 'america' : 3, 'Auckland' : 4, 'America' : 5 }
70 | sd = SortedDict(d)
71 | print("Wanted: Action, america, Auckland, AWS, [ignore case]")
72 | print("Got: ", end=' ')
73 | for key in sd:
74 | print("%s," % key, end=' ')
75 | print(" [used: __iter__()]")
76 | d = SortedDict(d, ignore_case = False)
77 | print("Wanted: AWS, Action, America, Auckland, america, [case sensitive]")
78 | print("Got: ", end=' ')
79 | for key in d.keys():
80 | print("%s," % key, end=' ')
81 | print(" [used: keys()]")
82 |
83 | # vim:et:ts=4:sts=4:ai
84 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/build/lib/S3/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/dist/s3cmd-2.0.1-py3.6.egg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DiscreetAI/dashboard-ui/371291831f6757ab06e8b45f9ec9260982560679/dashboard-ui/s3cmd-2.0.1/dist/s3cmd-2.0.1-py3.6.egg
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/s3cmd.egg-info/PKG-INFO:
--------------------------------------------------------------------------------
1 | Metadata-Version: 1.2
2 | Name: s3cmd
3 | Version: 2.0.1
4 | Summary: Command line tool for managing Amazon S3 and CloudFront services
5 | Home-page: http://s3tools.org
6 | Author: Michal Ludvig
7 | Author-email: michal@logix.cz
8 | Maintainer: github.com/mdomsch, github.com/matteobar, github.com/fviard
9 | Maintainer-email: s3tools-bugs@lists.sourceforge.net
10 | License: GNU GPL v2+
11 | Description:
12 |
13 | S3cmd lets you copy files from/to Amazon S3
14 | (Simple Storage Service) using a simple to use
15 | command line client. Supports rsync-like backup,
16 | GPG encryption, and more. Also supports management
17 | of Amazon's CloudFront content delivery network.
18 |
19 |
20 | Authors:
21 | --------
22 | Michal Ludvig
23 |
24 | Platform: UNKNOWN
25 | Classifier: Development Status :: 5 - Production/Stable
26 | Classifier: Environment :: Console
27 | Classifier: Environment :: MacOS X
28 | Classifier: Environment :: Win32 (MS Windows)
29 | Classifier: Intended Audience :: End Users/Desktop
30 | Classifier: Intended Audience :: System Administrators
31 | Classifier: License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)
32 | Classifier: Natural Language :: English
33 | Classifier: Operating System :: MacOS :: MacOS X
34 | Classifier: Operating System :: Microsoft :: Windows
35 | Classifier: Operating System :: POSIX
36 | Classifier: Operating System :: Unix
37 | Classifier: Programming Language :: Python :: 2
38 | Classifier: Programming Language :: Python :: 2.6
39 | Classifier: Programming Language :: Python :: 2.7
40 | Classifier: Programming Language :: Python :: 3
41 | Classifier: Programming Language :: Python :: 3.3
42 | Classifier: Programming Language :: Python :: 3.4
43 | Classifier: Programming Language :: Python :: 3.5
44 | Classifier: Programming Language :: Python :: 3.6
45 | Classifier: Topic :: System :: Archiving
46 | Classifier: Topic :: Utilities
47 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/s3cmd.egg-info/SOURCES.txt:
--------------------------------------------------------------------------------
1 | INSTALL
2 | LICENSE
3 | MANIFEST.in
4 | NEWS
5 | README.md
6 | s3cmd
7 | s3cmd.1
8 | setup.cfg
9 | setup.py
10 | S3/ACL.py
11 | S3/AccessLog.py
12 | S3/BidirMap.py
13 | S3/CloudFront.py
14 | S3/Config.py
15 | S3/ConnMan.py
16 | S3/Crypto.py
17 | S3/Custom_httplib27.py
18 | S3/Custom_httplib3x.py
19 | S3/Exceptions.py
20 | S3/ExitCodes.py
21 | S3/FileDict.py
22 | S3/FileLists.py
23 | S3/HashCache.py
24 | S3/MultiPart.py
25 | S3/PkgInfo.py
26 | S3/Progress.py
27 | S3/S3.py
28 | S3/S3Uri.py
29 | S3/SortedDict.py
30 | S3/Utils.py
31 | S3/__init__.py
32 | s3cmd.egg-info/PKG-INFO
33 | s3cmd.egg-info/SOURCES.txt
34 | s3cmd.egg-info/dependency_links.txt
35 | s3cmd.egg-info/requires.txt
36 | s3cmd.egg-info/top_level.txt
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/s3cmd.egg-info/dependency_links.txt:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/s3cmd.egg-info/requires.txt:
--------------------------------------------------------------------------------
1 | python-dateutil
2 | python-magic
3 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/s3cmd.egg-info/top_level.txt:
--------------------------------------------------------------------------------
1 | S3
2 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/setup.cfg:
--------------------------------------------------------------------------------
1 | [sdist]
2 | formats = gztar,zip
3 |
4 | [egg_info]
5 | tag_build =
6 | tag_date = 0
7 |
8 |
--------------------------------------------------------------------------------
/dashboard-ui/s3cmd-2.0.1/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 | # -*- coding=utf-8 -*-
3 |
4 | from __future__ import print_function
5 |
6 | import sys
7 | import os
8 |
9 | try:
10 | import xml.etree.ElementTree
11 | print("Using xml.etree.ElementTree for XML processing")
12 | except ImportError as e:
13 | sys.stderr.write(str(e) + "\n")
14 | try:
15 | import elementtree.ElementTree
16 | print("Using elementtree.ElementTree for XML processing")
17 | except ImportError as e:
18 | sys.stderr.write(str(e) + "\n")
19 | sys.stderr.write("Please install ElementTree module from\n")
20 | sys.stderr.write("http://effbot.org/zone/element-index.htm\n")
21 | sys.exit(1)
22 |
23 | from setuptools import setup
24 |
25 | import S3.PkgInfo
26 |
27 | if float("%d.%d" % sys.version_info[:2]) < 2.6:
28 | sys.stderr.write("Your Python version %d.%d.%d is not supported.\n" % sys.version_info[:3])
29 | sys.stderr.write("S3cmd requires Python 2.6 or newer.\n")
30 | sys.exit(1)
31 |
32 | ## Remove 'MANIFEST' file to force
33 | ## distutils to recreate it.
34 | ## Only in "sdist" stage. Otherwise
35 | ## it makes life difficult to packagers.
36 | if len(sys.argv) > 1 and sys.argv[1] == "sdist":
37 | try:
38 | os.unlink("MANIFEST")
39 | except OSError as e:
40 | pass
41 |
42 | ## Re-create the manpage
43 | ## (Beware! Perl script on the loose!!)
44 | if len(sys.argv) > 1 and sys.argv[1] == "sdist":
45 | if os.stat_result(os.stat("s3cmd.1")).st_mtime < os.stat_result(os.stat("s3cmd")).st_mtime:
46 | sys.stderr.write("Re-create man page first!\n")
47 | sys.stderr.write("Run: ./s3cmd --help | ./format-manpage.pl > s3cmd.1\n")
48 | sys.exit(1)
49 |
50 | ## Don't install manpages and docs when $S3CMD_PACKAGING is set
51 | ## This was a requirement of Debian package maintainer.
52 | if not os.getenv("S3CMD_PACKAGING"):
53 | man_path = os.getenv("S3CMD_INSTPATH_MAN") or "share/man"
54 | doc_path = os.getenv("S3CMD_INSTPATH_DOC") or "share/doc/packages"
55 | data_files = [
56 | (doc_path+"/s3cmd", [ "README.md", "INSTALL", "LICENSE", "NEWS" ]),
57 | (man_path+"/man1", [ "s3cmd.1" ] ),
58 | ]
59 | else:
60 | data_files = None
61 |
62 | ## Main distutils info
63 | setup(
64 | ## Content description
65 | name = S3.PkgInfo.package,
66 | version = S3.PkgInfo.version,
67 | packages = [ 'S3' ],
68 | scripts = ['s3cmd'],
69 | data_files = data_files,
70 |
71 | ## Packaging details
72 | author = "Michal Ludvig",
73 | author_email = "michal@logix.cz",
74 | maintainer = "github.com/mdomsch, github.com/matteobar, github.com/fviard",
75 | maintainer_email = "s3tools-bugs@lists.sourceforge.net",
76 | url = S3.PkgInfo.url,
77 | license = S3.PkgInfo.license,
78 | description = S3.PkgInfo.short_description,
79 | long_description = """
80 | %s
81 |
82 | Authors:
83 | --------
84 | Michal Ludvig
85 | """ % (S3.PkgInfo.long_description),
86 |
87 | classifiers = [
88 | 'Development Status :: 5 - Production/Stable',
89 | 'Environment :: Console',
90 | 'Environment :: MacOS X',
91 | 'Environment :: Win32 (MS Windows)',
92 | 'Intended Audience :: End Users/Desktop',
93 | 'Intended Audience :: System Administrators',
94 | 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
95 | 'Natural Language :: English',
96 | 'Operating System :: MacOS :: MacOS X',
97 | 'Operating System :: Microsoft :: Windows',
98 | 'Operating System :: POSIX',
99 | 'Operating System :: Unix',
100 | 'Programming Language :: Python :: 2',
101 | 'Programming Language :: Python :: 2.6',
102 | 'Programming Language :: Python :: 2.7',
103 | 'Programming Language :: Python :: 3',
104 | 'Programming Language :: Python :: 3.3',
105 | 'Programming Language :: Python :: 3.4',
106 | 'Programming Language :: Python :: 3.5',
107 | 'Programming Language :: Python :: 3.6',
108 | 'Topic :: System :: Archiving',
109 | 'Topic :: Utilities',
110 | ],
111 |
112 | install_requires = ["python-dateutil", "python-magic"]
113 | )
114 |
115 | # vim:et:ts=4:sts=4:ai
116 |
--------------------------------------------------------------------------------
/dashboard-ui/src/actions/AuthActions.js:
--------------------------------------------------------------------------------
1 | import Reflux from 'reflux';
2 |
3 | var AuthActions = Reflux.createActions({
4 | login: {children: ['completed', 'failed']},
5 | registration: {children: ['completed', 'failed']},
6 | logout: {}
7 | });
8 |
9 | export default AuthActions;
10 |
--------------------------------------------------------------------------------
/dashboard-ui/src/actions/CoordinatorActions.js:
--------------------------------------------------------------------------------
1 | import Reflux from 'reflux';
2 |
3 | var CoordinatorActions = Reflux.createActions({
4 | fetchCoordinatorStatus: {children: ['completed', 'failed'], asyncResult: true},
5 | });
6 |
7 | export default CoordinatorActions;
8 |
--------------------------------------------------------------------------------
/dashboard-ui/src/actions/DashboardActions.js:
--------------------------------------------------------------------------------
1 | import Reflux from 'reflux';
2 |
3 | var DashboardActions = Reflux.createActions({
4 | fetchAllRepos: {children: ['completed', 'failed'], asyncResult: true}
5 | });
6 |
7 | export default DashboardActions;
8 |
--------------------------------------------------------------------------------
/dashboard-ui/src/actions/RepoDataActions.js:
--------------------------------------------------------------------------------
1 | import Reflux from 'reflux';
2 |
3 | var RepoDataActions = Reflux.createActions({
4 | createNewRepo: {children: ['completed', 'failed'], asyncResult: true},
5 | fetchRepoData: {children: ['completed', 'failed'], asyncResult: true},
6 | fetchReposRemaining: {children: ['completed', 'failed'], asyncResult: true},
7 | resetState: {},
8 | });
9 |
10 | export default RepoDataActions;
11 |
--------------------------------------------------------------------------------
/dashboard-ui/src/actions/RepoLogsActions.js:
--------------------------------------------------------------------------------
1 | import Reflux from 'reflux';
2 |
3 | var RepoLogsActions = Reflux.createActions({
4 | fetchRepoLogs: {children: ['completed', 'failed'], asyncResult: true},
5 | });
6 |
7 | export default RepoLogsActions;
8 |
--------------------------------------------------------------------------------
/dashboard-ui/src/actions/initializeActions.js:
--------------------------------------------------------------------------------
1 | import Dispatcher from '../dispatcher/appDispatcher';
2 | import ActionTypes from '../constants/actionTypes';
3 |
4 | var InitializeActions = {
5 | initApp: function() {
6 |
7 | Dispatcher.dispatch({
8 | actionType: ActionTypes.INITIALIZE,
9 | initialData: {
10 |
11 | }
12 | });
13 |
14 | }
15 | };
16 |
17 | export default InitializeActions;
18 |
--------------------------------------------------------------------------------
/dashboard-ui/src/components/app.js:
--------------------------------------------------------------------------------
1 | /*eslint-disable strict */ // Disabling check because we can't run strict mode. Need global vars.
2 | import React, { Component } from 'react'
3 | import Header from './common/header';
4 | import Routes from './../routes';
5 |
6 | import './common/app.css';
7 |
8 | class App extends Component {
9 | render() {
10 | return (
11 |
17 | );
18 | }
19 | }
20 |
21 | export default App;
22 |
--------------------------------------------------------------------------------
/dashboard-ui/src/components/common/app.css:
--------------------------------------------------------------------------------
1 | body {
2 | margin: 0;
3 | padding: 0;
4 | font-family: sans-serif;
5 | background-color: #f8f9fa !important;
6 | }
7 |
8 | .margin-top-sm {
9 | margin-top: 20px;
10 | }
11 |
12 | .margin-top {
13 | margin-top: 40px;
14 | }
15 |
16 | .margin-bottom {
17 | margin-bottom: 40px;
18 | }
19 |
20 | .btn-transparent {
21 | color: #fff !important;
22 | background-color: #007bff00;
23 | border-color: #fff !important;
24 | border-radius: 0px !important;
25 | }
26 |
27 | .btn-transparent:hover {
28 | color: #dadada;
29 | background-color: #00000038;
30 | border-color: #ffffffc2 !important;
31 | }
32 |
33 | .btn-dark-alt {
34 | color: #000 !important;
35 | background-color: #007bff00;
36 | border-color: #000 !important;
37 | border-radius: 0px !important;
38 | }
39 |
40 | .btn-dark-alt:hover {
41 | opacity: 0.75 !important;
42 | }
43 |
44 | .btn-red-alt {
45 | color: white !important;
46 | background-color: red;
47 | border-color: red !important;
48 | border-radius: 0px !important;
49 | opacity: 0.75 !important;
50 | }
51 |
52 | .btn-red-alt:hover {
53 | opacity: 1 !important;
54 | }
55 |
56 | .btn-xs {
57 | padding: 0.25rem 0.5rem !important;
58 | font-size: 0.7125rem !important;
59 | line-height: 1.25 !important;
60 | border-radius: 0.15rem !important;
61 | }
62 |
63 | .text-black {
64 | color: black;
65 | }
66 |
67 | .badge-success {
68 | background-color: #62ca7a !important;
69 | }
70 |
71 | .text-success {
72 | color: #62ca7a !important;
73 | }
74 |
75 | .badge-primary, .btn-primary.disabled, .btn-primary:disabled {
76 | color: #fff !important;
77 | background-color: #304ffe !important;
78 | border-color: #304ffe !important;
79 | }
80 |
81 | .text-primary {
82 | color: #304ffe !important;
83 | }
84 |
--------------------------------------------------------------------------------
/dashboard-ui/src/components/common/header.css:
--------------------------------------------------------------------------------
1 | .header-logo {
2 | height: 80px;
3 | }
4 |
5 | .navbar {
6 | background: #1cc4c4;
7 | background: linear-gradient(90deg,#1cc4c4 0%,#520ab4 100%);
8 | opacity: 0.95;
9 | }
10 |
11 | .navbar-dark .navbar-nav .nav-link {
12 | color: hsla(0,0%,100%,.75) !important;
13 | }
14 |
15 | .navbar-dark .navbar-nav .nav-link:hover {
16 | color: hsla(0,0%,100%,.90) !important;
17 | }
18 |
--------------------------------------------------------------------------------
/dashboard-ui/src/components/common/header.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import Reflux from 'reflux';
3 | import { Link } from 'react-router-dom';
4 | import AuthStore from './../../stores/AuthStore';
5 | import {FontAwesomeIcon} from '@fortawesome/react-fontawesome';
6 |
7 |
8 | import logo from './white-logo.png';
9 | import './header.css';
10 |
11 | class Header extends Reflux.Component {
12 | constructor(props) {
13 | super(props);
14 | this.store = AuthStore;
15 | }
16 |
17 | render() {
18 | var rightElement;
19 | if (this.state.isAuthenticated) {
20 | let companyName = this.state.claims["company"];
21 |
22 | rightElement = (
23 |
24 | -
25 | New repo
26 |
27 | -
28 |
{"@" + companyName}
29 |
30 | -
31 |
32 |
33 |
34 | );
35 | } else {
36 | rightElement = (
37 |
38 | -
39 | Sign In
40 |
41 |
42 | );
43 | }
44 |
45 | return (
46 |
61 | );
62 | }
63 | }
64 |
65 | export default Header;
66 |
--------------------------------------------------------------------------------
/dashboard-ui/src/components/common/white-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DiscreetAI/dashboard-ui/371291831f6757ab06e8b45f9ec9260982560679/dashboard-ui/src/components/common/white-logo.png
--------------------------------------------------------------------------------
/dashboard-ui/src/components/common/white-logo2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DiscreetAI/dashboard-ui/371291831f6757ab06e8b45f9ec9260982560679/dashboard-ui/src/components/common/white-logo2.png
--------------------------------------------------------------------------------
/dashboard-ui/src/components/dashboard.js:
--------------------------------------------------------------------------------
1 | import React, { Component } from 'react';
2 | import RepoList from './dashboard/repoList';
3 |
4 |
5 | class Dashboard extends Component {
6 | render() {
7 | return (
8 |
9 |
10 |
11 | );
12 | }
13 | }
14 |
15 | export default Dashboard;
16 |
--------------------------------------------------------------------------------
/dashboard-ui/src/components/dashboard/repoList.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import Reflux from 'reflux';
3 | import { Link, withRouter } from "react-router-dom";
4 |
5 | import RepoStatus from './../repo/repoStatus';
6 |
7 | import DashboardStore from './../../stores/DashboardStore';
8 | import DashboardActions from './../../actions/DashboardActions';
9 |
10 | import {FontAwesomeIcon} from '@fortawesome/react-fontawesome';
11 |
12 | var username = null;
13 | var repo_id = null;
14 |
15 | class RepoList extends Reflux.Component {
16 |
17 | constructor(props) {
18 | super(props);
19 | this.store = DashboardStore;
20 | }
21 |
22 | componentDidMount() {
23 | DashboardActions.fetchAllRepos();
24 | }
25 |
26 | render() {
27 | if (this.state.error !== false) {
28 | return Error: {this.state.error}
29 | }
30 |
31 | if (this.state.loading === true) {
32 | return (
33 |
34 |
35 |
36 | );
37 | }
38 |
39 | if (this.state.repos.length === 0) {
40 | return (
41 |
42 |
You don't own any repos yet.
43 |
44 | Start by creating a new repo.
45 |
46 |
47 | )
48 | } else {
49 | var deleteRepo = this.deleteRepo
50 | return (
51 |
52 |
53 | {this.state.repos.map(function(repo, index) {
54 | let createdLessThan10MinutesAgo = Math.floor(Date.now()/1000) < (repo.CreatedAt + 60*10);
55 | repo_id = repo;
56 | return (
57 |
58 |
59 |
60 |
{repo.Name}
61 |
62 |
63 |
64 |
65 | )
66 | })}
67 |
68 |
69 | )
70 | }
71 | }
72 | }
73 |
74 | export default withRouter(RepoList);
75 |
--------------------------------------------------------------------------------
/dashboard-ui/src/components/homePage.js:
--------------------------------------------------------------------------------
1 | import React, { Component } from 'react';
2 | import { Redirect } from 'react-router-dom';
3 |
4 | class Home extends Component {
5 | render() {
6 | return (
7 |
8 | );
9 | }
10 | }
11 |
12 | export default Home;
13 |
--------------------------------------------------------------------------------
/dashboard-ui/src/components/newRepo.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import Reflux from 'reflux';
3 | import ReactDOM from 'react-dom';
4 | import { Link } from 'react-router-dom';
5 |
6 | import RepoDataStore from './../stores/RepoDataStore';
7 | import RepoDataActions from './../actions/RepoDataActions';
8 |
9 | import {FontAwesomeIcon} from '@fortawesome/react-fontawesome';
10 |
11 |
12 | class NewRepo extends Reflux.Component {
13 |
14 | constructor(props) {
15 | super(props);
16 | this.store = RepoDataStore;
17 | }
18 |
19 | componentDidMount() {
20 | RepoDataActions.fetchReposRemaining();
21 | }
22 |
23 | _handleSubmit(event) {
24 | event.preventDefault();
25 | let repoName = ReactDOM.findDOMNode(this.refs.repoName).value.replace(/[^a-zA-Z0-9-]/g,'-');
26 | document.getElementById("wait").hidden = false;
27 | RepoDataActions.createNewRepo(
28 | repoName,
29 | ReactDOM.findDOMNode(this.refs.repoDescription).value
30 | );
31 | }
32 |
33 | _handleContinue(event) {
34 | event.preventDefault();
35 | RepoDataActions.resetState();
36 | this.props.history.push("/repo/" + this.state.creationState.repoId);
37 | }
38 |
39 | render() {
40 | // Get number of repos left.
41 | if (this.state.creationState.loading === true) {
42 | return (
43 |
44 |
45 |
46 | );
47 | }
48 |
49 | if (this.state.creationState.created) {
50 | return (
51 |
52 |
53 |
54 |
55 |
Important!
56 |
The new repo was created and your API Key is shown below.
57 |
Please save it!
58 |
You won't be able to access it again and you will need it to hook up clients to this repo.
59 |
60 |
{this.state.creationState.apiKey}
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 | )
70 | }
71 |
72 | let reposLeft = this.state.creationState.reposRemaining;
73 | if (!reposLeft) {
74 | return (
75 |
76 |
Sorry, but you have no more repos left.
77 |
If you want to upgrade your account to support more repos, please email us.
78 |
Back to dashboard
79 |
80 | );
81 | } else {
82 | return (
83 |
84 |
85 |
86 |
Create a new repo
87 |
A repo is a link to a network of devices, history of training, and resulting models.
88 |
Create a new repository to start doing private federated learning. Repos are private by default.
89 |
105 |
106 |
107 | );
108 | }
109 | }
110 | }
111 |
112 | export default NewRepo;
113 |
--------------------------------------------------------------------------------
/dashboard-ui/src/components/notFoundPage.js:
--------------------------------------------------------------------------------
1 | import React, { Component } from 'react';
2 | import { Link } from 'react-router-dom';
3 |
4 | class NotFoundPage extends Component {
5 | render() {
6 | return (
7 |
8 |
Page Not Found
9 |
Whoops! Sorry, there is nothing to see here.
10 |
Back to Home
11 |
12 | );
13 | }
14 | }
15 |
16 | export default NotFoundPage;
17 |
--------------------------------------------------------------------------------
/dashboard-ui/src/components/repo.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import Reflux from 'reflux';
3 |
4 | import NotFoundPage from './notFoundPage';
5 | import RepoStatus from './repo/repoStatus';
6 | import RepoLogs from './repo/repoLogs';
7 | import RepoModels from './repo/repoModels';
8 | import { Link } from 'react-router-dom';
9 |
10 | import RepoDataStore from './../stores/RepoDataStore';
11 | import RepoDataActions from './../actions/RepoDataActions';
12 |
13 | import RepoLogsStore from './../stores/RepoLogsStore';
14 | import RepoLogsActions from './../actions/RepoLogsActions';
15 |
16 | import {FontAwesomeIcon} from '@fortawesome/react-fontawesome';
17 |
18 | import Endpoints from './../constants/endpoints.js';
19 | import AuthStore from './../stores/AuthStore';
20 |
21 | var username = null;
22 | var repo_id = null;
23 |
24 | class Repo extends Reflux.Component {
25 | constructor(props) {
26 | super(props);
27 | this.stores = [RepoDataStore, RepoLogsStore];
28 |
29 | const { match: { params } } = this.props;
30 | this.repoId = params.repoId;
31 | repo_id = this.repoId
32 | console.log("DONE");
33 |
34 | }
35 |
36 | async componentDidMount() {
37 | const { match: { params } } = this.props;
38 | const repoId = params.repoId;
39 |
40 | if (AuthStore.state.isAuthenticated) {
41 | let jwtString = AuthStore.state.jwt;
42 | console.log("component", jwtString)
43 | fetch(
44 | Endpoints["dashboardGetExploraURL"] + repo_id, {
45 | method: 'POST',
46 | dataType:'json',
47 | headers: {
48 | 'Content-Type':'application/json',
49 | 'Accept': 'application/json',
50 | },
51 | body: JSON.stringify({
52 | 'token': jwtString
53 | })
54 | }
55 | )
56 | .then(r => r.json())
57 | .then(r => {
58 | console.log(r)
59 | username = r["message"];
60 | console.log(username);
61 | var count = 100;
62 | document.getElementById("username").innerHTML = username
63 | });
64 | }
65 | RepoDataActions.fetchRepoData(repoId);
66 | RepoLogsActions.fetchRepoLogs(repoId);
67 |
68 | }
69 |
70 | resetState() {
71 | if (AuthStore.state.isAuthenticated) {
72 | let jwtString = AuthStore.state.jwt;
73 | console.log(jwtString)
74 | fetch(
75 | Endpoints["dashboardResetCloudNode"] + repo_id, {
76 | method: 'POST',
77 | dataType:'json',
78 | headers: {
79 | 'Content-Type':'application/json',
80 | 'Accept': 'application/json',
81 | },
82 | body: JSON.stringify({
83 | 'token': jwtString
84 | })
85 | }
86 | )
87 | .then(r => r.json())
88 | .then(r => {
89 | console.log(r)
90 | });
91 | }
92 | }
93 |
94 | deleteRepo() {
95 | if (AuthStore.state.isAuthenticated) {
96 | let jwtString = AuthStore.state.jwt;
97 | console.log("delete", jwtString)
98 | fetch(
99 | Endpoints["dashboardDeleteRepo"] + repo_id, {
100 | method: 'POST',
101 | dataType:'json',
102 | headers: {
103 | 'Content-Type':'application/json',
104 | 'Accept': 'application/json',
105 | },
106 | body: JSON.stringify({
107 | 'token': jwtString
108 | })
109 | }
110 | )
111 | .then(r => r.json())
112 | .then(r => {
113 | console.log(r)
114 | if (r["Error"] == false)
115 | window.location.href = '/dashboard';
116 | });
117 | }
118 | }
119 |
120 | render() {
121 | // if (this.state.error !== false) {
122 | // return (
123 | //
124 | // Error: {this.state.error}
125 | //
126 | // );
127 | // }
128 |
129 | if (this.state.loading === true) {
130 | return (
131 |
132 |
133 |
134 | );
135 | }
136 |
137 | if (!this.state.repoWasFound) {
138 | return
139 | }
140 | let createdLessThan10MinutesAgo = Math.floor(Date.now()/1000) < (this.state.repoData.CreatedAt + 60*10);
141 | return (
142 |
143 |
144 |
145 |
146 |
{this.state.repoData.Name}
147 |
{this.state.repoData.Description}
148 |
149 |
150 |
151 |
152 |
153 |
154 |
155 |
156 |
157 |
158 |
Click here to use Explora and start your session.
159 |
Sign in with the username {username} and leave the password blank.
160 |
Open a new terminal, and clone this GitHub repo. Open the folder and open the notebook called Explora.ipynb
161 |
Your repo ID is: {this.state.repoData.Id} .
162 |
163 |
164 |
165 |
166 |
167 |
168 | )
169 | }
170 | }
171 |
172 | export default Repo;
173 |
--------------------------------------------------------------------------------
/dashboard-ui/src/components/repo/repoLogs.js:
--------------------------------------------------------------------------------
1 | import React, { Component } from 'react';
2 | import { Link } from 'react-router-dom';
3 |
4 | class RepoLogs extends Component {
5 |
6 | render() {
7 | let content;
8 | if (this.props.logs.length === 0) {
9 | content = (
10 |
11 |
No model has been trained yet.
12 |
Train a new model
13 |
14 | );
15 | } else {
16 | content = (
17 |
18 |
19 |
20 |
21 | SessionId |
22 | Time |
23 | Action |
24 | Log Content |
25 |
26 |
27 |
28 |
29 | {this.props.logs.map((log, index) => {
30 | return
31 | {log.SessionId} |
32 | {this._formatTime(log.Timestamp)} |
33 | {log.ContentType} |
34 | {log.Content} |
35 |
36 | })}
37 |
38 |
39 | );
40 | }
41 |
42 | return (
43 |
44 |
45 |
46 |
47 |
48 |
Logs
49 |
History of training sessions for this repo. You can debug your environment from here.
50 |
51 |
52 | {content}
53 |
54 |
55 |
56 |
57 | )
58 | }
59 |
60 | _formatTime(timestamp) {
61 | var t = new Date(timestamp * 1000);
62 | return t.toISOString();
63 | }
64 | }
65 |
66 | export default RepoLogs;
67 |
--------------------------------------------------------------------------------
/dashboard-ui/src/components/repo/repoModels.js:
--------------------------------------------------------------------------------
1 | import React, { Component } from 'react';
2 | import { Link } from 'react-router-dom';
3 | import AuthStore from './../../stores/AuthStore';
4 | import Endpoints from './../../constants/endpoints.js';
5 |
6 |
7 | class RepoModels extends Component {
8 |
9 | _downloadModel(log) {
10 | let round = JSON.parse(log.Content)["round"];
11 | let session_id = JSON.parse(log.Content)["session_id"];
12 | let repo_id = log.RepoId;
13 |
14 | let jwtString = AuthStore.state.jwt;
15 | fetch(
16 | Endpoints["dashboardGetDownloadModelURL"],
17 | {
18 | method: 'POST',
19 | headers: {
20 | 'Content-Type':'application/json',
21 | 'Accept': 'application/json',
22 | 'Authorization': 'Bearer ' + jwtString,
23 | },
24 | body: JSON.stringify({
25 | "RepoId": repo_id,
26 | "SessionId": session_id,
27 | "Round": round,
28 | })
29 | }
30 | ).then(response => {
31 | response.json().then(res => {
32 | let url = res['DownloadUrl'];
33 | this._openInNewTab(url);
34 | })
35 | });
36 | }
37 |
38 | _openInNewTab(url) {
39 | var win = window.open(url);
40 | win.focus();
41 | }
42 |
43 | render() {
44 | let content;
45 | if (this.props.logs.length === 0) {
46 | content = (
47 |
51 | );
52 | } else {
53 | content = (
54 |
55 |
56 |
57 |
58 | SessionId |
59 | Round |
60 | Time |
61 | Download Model |
62 |
63 |
64 |
65 |
66 | {this.props.logs.map((log, index) => {
67 | return
68 | {log.SessionId} |
69 | {JSON.parse(log.Content).round} |
70 | {this._formatTime(log.Timestamp)} |
71 |
72 | Download
73 | |
74 |
75 | })}
76 |
77 |
78 | );
79 | }
80 |
81 | return (
82 |
83 |
84 |
85 |
86 |
87 |
Model Hub
88 |
Download your resulting models from here.
89 |
90 |
91 | {content}
92 |
93 |
94 |
95 |
96 | )
97 | }
98 |
99 | _formatTime(timestamp) {
100 | var t = new Date(timestamp * 1000);
101 | return t.toISOString();
102 | }
103 | }
104 |
105 | export default RepoModels;
106 |
--------------------------------------------------------------------------------
/dashboard-ui/src/components/repo/repoStatus.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import Reflux from 'reflux';
3 |
4 | import CoordinatorStore from './../../stores/CoordinatorStore';
5 | import CoordinatorActions from './../../actions/CoordinatorActions';
6 |
7 |
8 | class RepoStatus extends Reflux.Component {
9 |
10 | constructor(props) {
11 | super(props);
12 | this.store = CoordinatorStore;
13 | }
14 |
15 | componentDidMount() {
16 | CoordinatorActions.fetchCoordinatorStatus(this.props.repoId);
17 | }
18 |
19 | render() {
20 | const status = this.state.coordinatorStatuses[this.props.repoId];
21 |
22 | if (this.props.isDeploying) {
23 | return Deploying...
24 | }
25 |
26 | if (status === undefined) {
27 | return ...
28 | }
29 |
30 | if (!("Busy" in status)) {
31 | return Unknown
32 | }
33 |
34 | if (status["Busy"] === true) {
35 | return Active
36 | } else {
37 | return Idle;
38 | }
39 | }
40 | }
41 |
42 | export default RepoStatus;
43 |
--------------------------------------------------------------------------------
/dashboard-ui/src/components/signin.js:
--------------------------------------------------------------------------------
1 | import React, { Component } from 'react';
2 | import LoginForm from './signin/loginForm';
3 |
4 | class SignIn extends Component {
5 | render() {
6 | return (
7 |
8 |
9 |
10 | );
11 | }
12 | }
13 |
14 | export default SignIn;
15 |
--------------------------------------------------------------------------------
/dashboard-ui/src/components/signin/common.css:
--------------------------------------------------------------------------------
1 | .form-link, .form-link:hover {
2 | margin-left: 7.5px;
3 | color: white;
4 | font-weight: 700;
5 | text-decoration: none;
6 | }
7 |
--------------------------------------------------------------------------------
/dashboard-ui/src/components/signin/loginForm.js:
--------------------------------------------------------------------------------
1 | // Generated by https://pagedraw.io/pages/7992
2 | import React from 'react';
3 | import ReactDOM from 'react-dom';
4 | import { Link, withRouter } from "react-router-dom";
5 | import Reflux from 'reflux';
6 |
7 | import AuthStore from './../../stores/AuthStore';
8 | import AuthActions from './../../actions/AuthActions';
9 |
10 | import './common.css';
11 |
12 | class LoginForm extends Reflux.Component {
13 |
14 | constructor(props) {
15 | super(props);
16 | this.store = AuthStore;
17 | }
18 |
19 | componentWillUpdate(nextProps, nextState) {
20 | var isAuthenticated = nextState['isAuthenticated'];
21 | if (isAuthenticated) {
22 | this.props.history.push("dashboard");
23 | }
24 | }
25 |
26 | _handleSubmit(event) {
27 | event.preventDefault();
28 |
29 | AuthActions.login(
30 | ReactDOM.findDOMNode(this.refs.email).value,
31 | ReactDOM.findDOMNode(this.refs.password).value
32 | );
33 | }
34 |
35 | render() {
36 | var errorMessage = "";
37 | if (this.state.error) {
38 | errorMessage = (
39 |
40 | { this.state.error }
41 |
42 | );
43 | }
44 |
45 | return (
46 |
71 | );
72 | }
73 | }
74 |
75 | export default withRouter(LoginForm);
76 |
--------------------------------------------------------------------------------
/dashboard-ui/src/components/signin/registrationForm.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import ReactDOM from 'react-dom';
3 | import { Link, withRouter } from "react-router-dom";
4 | import Reflux from 'reflux';
5 |
6 | import AuthStore from './../../stores/AuthStore';
7 | import AuthActions from './../../actions/AuthActions';
8 |
9 | import './common.css';
10 |
11 | class RegistationForm extends Reflux.Component {
12 |
13 | constructor(props) {
14 | super(props);
15 | this.store = AuthStore;
16 | }
17 |
18 | componentWillUpdate(nextProps, nextState) {
19 | var isAuthenticated = nextState['isAuthenticated'];
20 | if (isAuthenticated) {
21 | this.props.history.push("dashboard");
22 | }
23 | }
24 |
25 | _handleSubmit(event) {
26 | event.preventDefault();
27 |
28 | var registrationObject = {
29 | "first_name": ReactDOM.findDOMNode(this.refs.fname).value,
30 | "last_name": ReactDOM.findDOMNode(this.refs.lname).value,
31 | "company": ReactDOM.findDOMNode(this.refs.organization).value,
32 | "occupation": ReactDOM.findDOMNode(this.refs.position).value,
33 | "email": ReactDOM.findDOMNode(this.refs.email).value,
34 | "password1": ReactDOM.findDOMNode(this.refs.password1).value,
35 | "password2": ReactDOM.findDOMNode(this.refs.password2).value
36 | };
37 |
38 | AuthActions.registration(registrationObject);
39 | }
40 |
41 | render() {
42 | var errorMessage = "";
43 | if (this.state.error) {
44 | errorMessage = (
45 |
46 |
×
47 | { this.state.error }
48 |
49 | );
50 | }
51 |
52 |
53 | return (
54 |
102 |
103 | );
104 | }
105 | }
106 |
107 | export default withRouter(RegistationForm);
108 |
--------------------------------------------------------------------------------
/dashboard-ui/src/components/signout.js:
--------------------------------------------------------------------------------
1 | import React, { Component } from 'react';
2 | import { Redirect } from 'react-router-dom';
3 | import AuthActions from './../actions/AuthActions';
4 |
5 |
6 | class SignOut extends Component {
7 |
8 | componentDidMount() {
9 | AuthActions.logout();
10 | }
11 |
12 | render() {
13 | return (
14 |
15 |
16 | Signing out...
17 |
18 | );
19 | }
20 | }
21 |
22 | export default SignOut;
23 |
--------------------------------------------------------------------------------
/dashboard-ui/src/components/signup.js:
--------------------------------------------------------------------------------
1 | import React, { Component } from 'react';
2 | import RegistrationForm from './signin/registrationForm';
3 |
4 | class SignIn extends Component {
5 | render() {
6 | return (
7 |
8 |
9 |
10 | );
11 | }
12 | }
13 |
14 | export default SignIn;
15 |
--------------------------------------------------------------------------------
/dashboard-ui/src/constants/actionTypes.js:
--------------------------------------------------------------------------------
1 | import keyMirror from 'keymirror';
2 |
3 | var actionTypes = keyMirror({
4 | INITIALIZE: null,
5 | });
6 |
7 | export default actionTypes;
8 |
--------------------------------------------------------------------------------
/dashboard-ui/src/constants/endpoints.js:
--------------------------------------------------------------------------------
1 | var Endpoints = {
2 | 'eauthLogin': 'https://eauth.dataagora.com/auth/login/',
3 | 'eauthUser': 'https://eauth.dataagora.com/auth/user/',
4 | 'eauthRegistration': 'https://eauth.dataagora.com/auth/registration/',
5 | 'eauthLogout': 'https://eauth.dataagora.com/auth/logout/', // Not used with JWT.
6 |
7 | 'dashboardFetchReposRemaining': 'https://fwkebecb19.execute-api.us-west-1.amazonaws.com/dev/userdata',
8 | 'dashboardCreateNewRepo': 'https://fwkebecb19.execute-api.us-west-1.amazonaws.com/dev/repo',
9 | 'dashboardFetchAllRepos': 'https://fwkebecb19.execute-api.us-west-1.amazonaws.com/dev/repos',
10 | 'dashboardFetchRepoData': 'https://fwkebecb19.execute-api.us-west-1.amazonaws.com/dev/repo/',
11 | 'dashboardFetchRepoLogs': 'https://fwkebecb19.execute-api.us-west-1.amazonaws.com/dev/logs/',
12 | 'dashboardDeleteRepo': 'https://fwkebecb19.execute-api.us-west-1.amazonaws.com/dev/delete/',
13 | 'dashboardFetchCoordinatorStatus': 'https://fwkebecb19.execute-api.us-west-1.amazonaws.com/dev/coordinator/status/',
14 | 'dashboardGetDownloadModelURL': 'https://fwkebecb19.execute-api.us-west-1.amazonaws.com/dev/model',
15 | 'dashboardGetExploraURL': 'https://fwkebecb19.execute-api.us-west-1.amazonaws.com/dev/get_username/',
16 | 'dashboardResetCloudNode': 'https://fwkebecb19.execute-api.us-west-1.amazonaws.com/dev/reset_state/'
17 | };
18 |
19 | export default Endpoints;
20 |
--------------------------------------------------------------------------------
/dashboard-ui/src/dispatcher/appDispatcher.js:
--------------------------------------------------------------------------------
1 | /**
2 | *
3 | * AppDispatcher
4 | *
5 | * A singleton that operates as the central hub for application updates.
6 | *
7 | */
8 |
9 | var Dispatcher = require('flux').Dispatcher;
10 |
11 | module.exports = new Dispatcher();
12 |
--------------------------------------------------------------------------------
/dashboard-ui/src/index.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { render } from 'react-dom'
3 | import { BrowserRouter } from 'react-router-dom';
4 | import App from './components/app';
5 | import InitializeActions from './actions/initializeActions';
6 | import registerServiceWorker from './utils/registerServiceWorker';
7 |
8 | import 'jquery/dist/jquery.js';
9 | import 'bootstrap/dist/css/bootstrap.css';
10 | import 'bootstrap/dist/js/bootstrap.js';
11 |
12 |
13 | import { library } from '@fortawesome/fontawesome-svg-core'
14 | import { faSignOutAlt, faPlus, faSync } from '@fortawesome/free-solid-svg-icons'
15 |
16 | library.add(faPlus, faSignOutAlt, faSync);
17 |
18 | InitializeActions.initApp();
19 | render((
20 |
21 |
22 |
23 | ), document.getElementById('app'));
24 | registerServiceWorker();
25 |
--------------------------------------------------------------------------------
/dashboard-ui/src/routes.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { Switch, Route } from 'react-router-dom';
3 |
4 | import PrivateRoute from './utils/PrivateRoute';
5 | import AuthRoute from './utils/AuthRoute';
6 |
7 | import Home from './components/homePage';
8 | import SignIn from './components/signin';
9 | import SignUp from './components/signup';
10 | import SignOut from './components/signout';
11 | import Dashboard from './components/dashboard';
12 | import Repo from './components/repo'
13 | import NewRepo from './components/newRepo'
14 | import NotFoundPage from './components/notFoundPage';
15 |
16 |
17 | var Routes = () => (
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 | );
29 |
30 | export default Routes;
31 |
--------------------------------------------------------------------------------
/dashboard-ui/src/stores/AuthStore.js:
--------------------------------------------------------------------------------
1 | import Reflux from 'reflux';
2 | import cookie from 'react-cookies';
3 | import AuthActions from './../actions/AuthActions';
4 | import Endpoints from './../constants/endpoints.js';
5 |
6 |
7 | // TODO: Implement session keep-alive logic.
8 | class AuthStore extends Reflux.Store {
9 | constructor () {
10 | super();
11 | this.init();
12 | this.listenables = AuthActions;
13 | }
14 |
15 | init () {
16 | if (!this._isAuthenticated()) {
17 | this._resetState();
18 | } else {
19 | // Pull cached token if one exists...
20 | this.state = {
21 | error: false,
22 | loading: false,
23 | jwt: this._getJWT(),
24 | claims: this._getClaims(),
25 | isAuthenticated: true
26 | };
27 | }
28 | }
29 |
30 | onLogin (email, password) {
31 | this._resetState();
32 | this.state.loading = true;
33 | this._changed();
34 |
35 | var endpoint = Endpoints["eauthLogin"];
36 | fetch(
37 | endpoint, {
38 | method: 'POST',
39 | headers: {
40 | 'Content-Type':'application/json',
41 | 'Accept': 'application/json',
42 | },
43 | body: JSON.stringify({"email": email, "password": password}),
44 | }
45 | ).then(response => {
46 | this._handleLoginRegistrationResponse(response, AuthActions.login);
47 | });
48 | }
49 |
50 | onLoginCompleted (jwt) {
51 | this.state.jwt = jwt;
52 | localStorage.setItem('jwt', jwt);
53 | this.state.claims = this._getClaims();
54 | this.state.error = false;
55 | this.state.isAuthenticated = true;
56 | this.state.loading = false;
57 | this._deleteCookies();
58 | this._changed();
59 | }
60 |
61 | onLoginFailed (errorMessage) {
62 | this._resetState();
63 | this.state.error = errorMessage;
64 | this._changed();
65 | }
66 |
67 | onRegistration (registrationObject) {
68 | this._resetState();
69 | this.state.loading = true;
70 | this._changed();
71 |
72 | fetch(
73 | Endpoints["eauthRegistration"], {
74 | method: 'POST',
75 | headers: {
76 | 'Content-Type':'application/json',
77 | 'Accept': 'application/json',
78 | },
79 | body: JSON.stringify(registrationObject),
80 | }
81 | ).then(response => {
82 | this._handleLoginRegistrationResponse(response, AuthActions.registration);
83 | });
84 | }
85 |
86 | onRegistrationCompleted (jwt) {
87 | this.state.jwt = jwt;
88 | localStorage.setItem('jwt', jwt);
89 | this.state.claims = this._getClaims();
90 | this.state.error = false;
91 | this.state.isAuthenticated = true;
92 | this.state.loading = false;
93 | this._deleteCookies();
94 | this._changed();
95 | }
96 |
97 | onRegistrationFailed(errorMessage) {
98 | this._resetState();
99 | this.state.error = errorMessage;
100 | this._changed();
101 | }
102 |
103 | onLogout () {
104 | // Clear it all!
105 | this._resetState();
106 | this._changed();
107 | }
108 |
109 | _handleLoginRegistrationResponse(response, refluxAction) {
110 | response.json().then(serverResponse => {
111 | if (serverResponse && "token" in serverResponse) {
112 | var jwt = serverResponse['token'];
113 | refluxAction.completed(jwt);
114 | } else {
115 | // TODO: Use error returned by server.
116 | refluxAction.failed(JSON.stringify(serverResponse));
117 | }
118 | });
119 | }
120 |
121 | _isAuthenticated () {
122 | return this._getJWT();
123 | }
124 |
125 | _getClaims() {
126 | var jwt = this._getJWT();
127 | if (jwt === null) {
128 | return null;
129 | }
130 | return JSON.parse(atob(jwt.split('.')[1]));
131 | }
132 |
133 | _getJWT() {
134 | var jwt = localStorage.getItem("jwt");
135 | if (!jwt) {
136 | return null;
137 | }
138 | return jwt;
139 | }
140 |
141 | _changed () {
142 | this.trigger(this.state);
143 | }
144 |
145 | _resetState () {
146 | this.state = {
147 | error: false,
148 | loading: false,
149 | jwt: null,
150 | isAuthenticated: false,
151 | claims: {},
152 | };
153 |
154 | localStorage.removeItem('jwt');
155 |
156 | this._deleteCookies();
157 | }
158 |
159 | _deleteCookies() {
160 | cookie.remove('csrftoken', { path: '/' });
161 | cookie.remove('sessionid', { path: '/' });
162 | }
163 |
164 | }
165 |
166 | export default AuthStore;
167 |
--------------------------------------------------------------------------------
/dashboard-ui/src/stores/CoordinatorStore.js:
--------------------------------------------------------------------------------
1 | import Reflux from 'reflux';
2 | import CoordinatorActions from './../actions/CoordinatorActions';
3 | import AuthStore from './AuthStore';
4 | import Endpoints from './../constants/endpoints.js';
5 |
6 |
7 | class CoordinatorStore extends Reflux.Store {
8 |
9 | constructor () {
10 | super();
11 | this.init();
12 | this.listenables = CoordinatorActions;
13 | }
14 |
15 | init () {
16 | this.state = {
17 | loading: true, // not used
18 | error: false, // not used
19 | coordinatorStatuses: {},
20 | };
21 | }
22 |
23 | onFetchCoordinatorStatus(repoId) {
24 | if (AuthStore.state.isAuthenticated) {
25 | let jwtString = AuthStore.state.jwt;
26 |
27 | this.state.loading = true;
28 | this._changed();
29 |
30 | fetch(
31 | Endpoints["dashboardFetchCoordinatorStatus"] + repoId, {
32 | method: 'GET',
33 | headers: {
34 | 'Content-Type':'application/json',
35 | 'Accept': 'application/json',
36 | 'Authorization': 'Bearer ' + jwtString,
37 | },
38 | }
39 | ).then(response => {
40 | this._handleResponse(repoId, response, CoordinatorActions.fetchCoordinatorStatus);
41 | });
42 | }
43 | }
44 |
45 | _handleResponse(repoId, response, refluxAction) {
46 | response.json().then(serverResponse => {
47 | if (response.status === 200) {
48 | refluxAction.completed(repoId, serverResponse);
49 | } else {
50 | // TODO: Use error returned by server.
51 | refluxAction.failed(repoId, serverResponse);
52 | }
53 | });
54 | }
55 |
56 | onFetchCoordinatorStatusCompleted (repoId, status) {
57 | this.state.coordinatorStatuses[repoId] = status;
58 | this.state.loading = false;
59 | this._changed();
60 | }
61 |
62 | onFetchCoordinatorStatusFailed (repoId, errorObject) {
63 | this.state.coordinatorStatuses[repoId] = {};
64 | this.state.error = errorObject["message"];
65 | this.state.loading = false;
66 | this._changed();
67 | }
68 |
69 | _changed () {
70 | this.trigger(this.state);
71 | }
72 |
73 | }
74 |
75 | export default CoordinatorStore;
76 |
--------------------------------------------------------------------------------
/dashboard-ui/src/stores/DashboardStore.js:
--------------------------------------------------------------------------------
1 | import Reflux from 'reflux';
2 | import DashboardActions from './../actions/DashboardActions';
3 | import AuthStore from './AuthStore';
4 | import Endpoints from './../constants/endpoints.js';
5 |
6 |
7 | class DashboardStore extends Reflux.Store {
8 |
9 | constructor () {
10 | super();
11 | this.init();
12 | this.listenables = DashboardActions;
13 | }
14 |
15 | init () {
16 | this.state = {
17 | repos: [],
18 | loading: true,
19 | error: false,
20 | };
21 | }
22 |
23 | onFetchAllRepos() {
24 | if (AuthStore.state.isAuthenticated) {
25 | let jwtString = AuthStore.state.jwt;
26 |
27 | this.state.loading = true;
28 | this._changed();
29 |
30 | fetch(
31 | Endpoints["dashboardFetchAllRepos"], {
32 | method: 'GET',
33 | headers: {
34 | 'Content-Type':'application/json',
35 | 'Accept': 'application/json',
36 | 'Authorization': 'Bearer ' + jwtString,
37 | },
38 | }
39 | ).then(response => {
40 | this._handleFetchAllReposResponse(response);
41 | });
42 | }
43 | }
44 |
45 | _handleFetchAllReposResponse(response) {
46 | response.json().then(serverResponse => {
47 | if (response.status === 200) {
48 | DashboardActions.fetchAllRepos.completed(serverResponse);
49 | } else {
50 | // TODO: Use error returned by server.
51 | DashboardActions.fetchAllRepos.failed(serverResponse);
52 | }
53 | });
54 | }
55 |
56 | onFetchAllReposCompleted (repoList) {
57 | this.state.repos = repoList;
58 | this.state.repos.sort((a,b) =>{
59 | if (a.Name < b.Name) return -1;
60 | if (a.Name > b.Name) return 1;
61 | return 0;
62 | });
63 | this.state.loading = false;
64 | this._changed();
65 | }
66 |
67 | onFetchAllReposFailed (errorObject) {
68 | this.state.repos = {};
69 | this.state.error = errorObject["message"];
70 | this.state.loading = false;
71 | this._changed();
72 | }
73 |
74 | _changed () {
75 | this.trigger(this.state);
76 | }
77 |
78 | }
79 |
80 | export default DashboardStore;
81 |
--------------------------------------------------------------------------------
/dashboard-ui/src/stores/RepoDataStore.js:
--------------------------------------------------------------------------------
1 | import Reflux from 'reflux';
2 | import RepoDataActions from './../actions/RepoDataActions';
3 | import AuthStore from './AuthStore';
4 | import Endpoints from './../constants/endpoints.js';
5 |
6 |
7 | class RepoDataStore extends Reflux.Store {
8 |
9 | constructor () {
10 | super();
11 | this.init();
12 | this.listenables = RepoDataActions;
13 | }
14 |
15 | init () {
16 | this.state = {
17 | loading: true,
18 | error: false,
19 |
20 | repoWasFound: false,
21 | repoData: {},
22 | repoStatus: {},
23 | repoLogs: [],
24 |
25 | creationState: {
26 | reposRemaining: false,
27 | repoName: null,
28 | repoId: null,
29 | apiKey: null,
30 | loading: true,
31 | creating: false,
32 | created: false,
33 | error: false,
34 | },
35 | };
36 | }
37 |
38 | onShowModal() {
39 | this.state.creationState.created = true;
40 | this._changed();
41 | console.log(this.state.creationState);
42 | }
43 |
44 | onFetchRepoData(repoId) {
45 | if (AuthStore.state.isAuthenticated) {
46 | let jwtString = AuthStore.state.jwt;
47 |
48 | this.state.loading = true;
49 | this._changed();
50 |
51 | fetch(
52 | Endpoints["dashboardFetchRepoData"] + repoId, {
53 | method: 'GET',
54 | headers: {
55 | 'Content-Type':'application/json',
56 | 'Accept': 'application/json',
57 | 'Authorization': 'Bearer ' + jwtString,
58 | },
59 | }
60 | ).then(response => {
61 | this._handleResponse(response, RepoDataActions.fetchRepoData);
62 | });
63 | }
64 | }
65 |
66 | onFetchRepoDataCompleted (repoData) {
67 | this.state.repoWasFound = true;
68 | this.state.repoData = repoData;
69 | this.state.loading = false;
70 | this._changed();
71 | }
72 |
73 | onFetchRepoDataFailed (errorObject) {
74 | this.state.repoWasFound = false;
75 | this.state.repoData = {};
76 | this.state.error = errorObject["Message"];
77 | this.state.loading = false;
78 | this._changed();
79 | }
80 |
81 |
82 | onCreateNewRepo(repoName, repoDescription) {
83 | if (AuthStore.state.isAuthenticated) {
84 | let jwtString = AuthStore.state.jwt;
85 |
86 | this.state.creationState.creating = true;
87 | this._changed();
88 |
89 | fetch(
90 | Endpoints["dashboardCreateNewRepo"], {
91 | method: 'POST',
92 | headers: {
93 | 'Content-Type':'application/json',
94 | 'Accept': 'application/json',
95 | 'Authorization': 'Bearer ' + jwtString,
96 | },
97 | body: JSON.stringify({
98 | "RepoName": repoName,
99 | "RepoDescription": repoDescription,
100 | })
101 | }
102 | ).then(response => {
103 | this._handleResponse(response, RepoDataActions.createNewRepo);
104 | });
105 | }
106 | }
107 |
108 | onCreateNewRepoCompleted(results) {
109 | console.log(results)
110 | this.state.creationState.repoId = results["Results"]["RepoId"];
111 | this.state.creationState.apiKey = results["Results"]["TrueApiKey"];
112 | this.state.creationState.creating = false;
113 | this.state.creationState.created = true;
114 | this._changed();
115 | }
116 |
117 | onCreateNewRepoFailed(errorObject) {
118 | this.state.creationState.repoId = null;
119 | this.state.creationState.apiKey = null;
120 | this.state.creationState.creating = false;
121 | this.state.creationState.created = false;
122 | this.state.creationState.error = errorObject["Message"];
123 | this._changed();
124 | }
125 |
126 |
127 | onFetchReposRemaining() {
128 | if (AuthStore.state.isAuthenticated) {
129 | let jwtString = AuthStore.state.jwt;
130 |
131 | this.state.creationState.loading = true;
132 | this._changed();
133 |
134 | fetch(
135 | Endpoints["dashboardFetchReposRemaining"], {
136 | method: 'GET',
137 | headers: {
138 | 'Content-Type':'application/json',
139 | 'Accept': 'application/json',
140 | 'Authorization': 'Bearer ' + jwtString,
141 | }
142 | }
143 | ).then(response => {
144 | this._handleResponse(response, RepoDataActions.fetchReposRemaining);
145 | });
146 | }
147 | }
148 |
149 | onFetchReposRemainingCompleted(results) {
150 | this.state.creationState.loading = false;
151 | this.state.creationState.reposRemaining = results["ReposRemaining"];
152 | this._changed();
153 | }
154 |
155 | onFetchReposRemainingFailed(errorObject) {
156 | this.state.creationState.loading = false;
157 | this.state.creationState.error = errorObject["Message"];
158 | this._changed();
159 | }
160 |
161 | onResetState() {
162 | this.init();
163 | }
164 |
165 |
166 | _handleResponse(response, refluxAction) {
167 | response.json().then(serverResponse => {
168 | if (response.status === 200) {
169 | refluxAction.completed(serverResponse);
170 | } else {
171 | // TODO: Use error returned by server.
172 | refluxAction.failed(serverResponse);
173 | }
174 | });
175 | }
176 |
177 | _changed () {
178 | this.trigger(this.state);
179 | }
180 |
181 | }
182 |
183 | export default RepoDataStore;
184 |
--------------------------------------------------------------------------------
/dashboard-ui/src/stores/RepoLogsStore.js:
--------------------------------------------------------------------------------
1 | import Reflux from 'reflux';
2 | import RepoLogsActions from './../actions/RepoLogsActions';
3 | import AuthStore from './AuthStore';
4 | import Endpoints from './../constants/endpoints.js';
5 |
6 |
7 | class RepoLogsStore extends Reflux.Store {
8 |
9 | constructor () {
10 | super();
11 | this.init();
12 | this.listenables = RepoLogsActions;
13 | }
14 |
15 | init () {
16 | this.state = {
17 | loadingLogs: true,
18 | errorLogs: false,
19 | repoLogs: [],
20 | };
21 | }
22 |
23 | onFetchRepoLogs(repoId) {
24 | if (AuthStore.state.isAuthenticated) {
25 | let jwtString = AuthStore.state.jwt;
26 |
27 | this.state.loadingLogs = true;
28 | this._changed();
29 |
30 | fetch(
31 | Endpoints["dashboardFetchRepoLogs"] + repoId, {
32 | method: 'GET',
33 | headers: {
34 | 'Content-Type':'application/json',
35 | 'Accept': 'application/json',
36 | 'Authorization': 'Bearer ' + jwtString,
37 | },
38 | }
39 | ).then(response => {
40 | this._handleResponse(response, RepoLogsActions.fetchRepoLogs);
41 | });
42 | }
43 | }
44 |
45 | _handleResponse(response, refluxAction) {
46 | response.json().then(serverResponse => {
47 | if (response.status === 200) {
48 | refluxAction.completed(serverResponse);
49 | } else {
50 | // TODO: Use error returned by server.
51 | refluxAction.failed(serverResponse);
52 | }
53 | });
54 | }
55 |
56 | onFetchRepoLogsCompleted (repoLogs) {
57 | this.state.repoLogs = repoLogs;
58 | this.state.loadingLogs = false;
59 | this._changed();
60 | }
61 |
62 | onFetchRepoLogsFailed (errorObject) {
63 | this.state.repoLogs = {};
64 | this.state.errorLogs = errorObject["Message"];
65 | this.state.loadingLogs = false;
66 | this._changed();
67 | }
68 |
69 | _changed () {
70 | this.trigger(this.state);
71 | }
72 |
73 | }
74 |
75 | export default RepoLogsStore;
76 |
--------------------------------------------------------------------------------
/dashboard-ui/src/utils/AuthRoute.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { Route, Redirect } from 'react-router-dom';
3 | import AuthStore from './../stores/AuthStore';
4 |
5 | const AuthRoute = ({ component: Component, ...rest }) => (
6 |
9 | !AuthStore.state.isAuthenticated ? (
10 |
11 | ) : (
12 |
18 | )
19 | }
20 | />
21 | );
22 |
23 | export default AuthRoute;
24 |
--------------------------------------------------------------------------------
/dashboard-ui/src/utils/PrivateRoute.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { Route, Redirect } from 'react-router-dom';
3 | import AuthStore from './../stores/AuthStore';
4 |
5 | const PrivateRoute = ({ component: Component, ...rest }) => (
6 |
9 | AuthStore.state.isAuthenticated ? (
10 |
11 | ) : (
12 |
18 | )
19 | }
20 | />
21 | );
22 |
23 | export default PrivateRoute;
24 |
--------------------------------------------------------------------------------
/dashboard-ui/src/utils/registerServiceWorker.js:
--------------------------------------------------------------------------------
1 | // In production, we register a service worker to serve assets from local cache.
2 |
3 | // This lets the app load faster on subsequent visits in production, and gives
4 | // it offline capabilities. However, it also means that developers (and users)
5 | // will only see deployed updates on the "N+1" visit to a page, since previously
6 | // cached resources are updated in the background.
7 |
8 | // To learn more about the benefits of this model, read https://goo.gl/KwvDNy.
9 | // This link also includes instructions on opting out of this behavior.
10 |
11 | const isLocalhost = Boolean(
12 | window.location.hostname === 'localhost' ||
13 | // [::1] is the IPv6 localhost address.
14 | window.location.hostname === '[::1]' ||
15 | // 127.0.0.1/8 is considered localhost for IPv4.
16 | window.location.hostname.match(
17 | /^127(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$/
18 | )
19 | );
20 |
21 | export default function register() {
22 | if (process.env.NODE_ENV === 'production' && 'serviceWorker' in navigator) {
23 | // The URL constructor is available in all browsers that support SW.
24 | const publicUrl = new URL(process.env.PUBLIC_URL, window.location);
25 | if (publicUrl.origin !== window.location.origin) {
26 | // Our service worker won't work if PUBLIC_URL is on a different origin
27 | // from what our page is served on. This might happen if a CDN is used to
28 | // serve assets; see https://github.com/facebookincubator/create-react-app/issues/2374
29 | return;
30 | }
31 |
32 | window.addEventListener('load', () => {
33 | const swUrl = `${process.env.PUBLIC_URL}/service-worker.js`;
34 |
35 | if (isLocalhost) {
36 | // This is running on localhost. Lets check if a service worker still exists or not.
37 | checkValidServiceWorker(swUrl);
38 |
39 | // Add some additional logging to localhost, pointing developers to the
40 | // service worker/PWA documentation.
41 | navigator.serviceWorker.ready.then(() => {
42 | console.log(
43 | 'This web app is being served cache-first by a service ' +
44 | 'worker. To learn more, visit https://goo.gl/SC7cgQ'
45 | );
46 | });
47 | } else {
48 | // Is not local host. Just register service worker
49 | registerValidSW(swUrl);
50 | }
51 | });
52 | }
53 | }
54 |
55 | function registerValidSW(swUrl) {
56 | navigator.serviceWorker
57 | .register(swUrl)
58 | .then(registration => {
59 | registration.onupdatefound = () => {
60 | const installingWorker = registration.installing;
61 | installingWorker.onstatechange = () => {
62 | if (installingWorker.state === 'installed') {
63 | if (navigator.serviceWorker.controller) {
64 | // At this point, the old content will have been purged and
65 | // the fresh content will have been added to the cache.
66 | // It's the perfect time to display a "New content is
67 | // available; please refresh." message in your web app.
68 | console.log('New content is available; please refresh.');
69 | } else {
70 | // At this point, everything has been precached.
71 | // It's the perfect time to display a
72 | // "Content is cached for offline use." message.
73 | console.log('Content is cached for offline use.');
74 | }
75 | }
76 | };
77 | };
78 | })
79 | .catch(error => {
80 | console.error('Error during service worker registration:', error);
81 | });
82 | }
83 |
84 | function checkValidServiceWorker(swUrl) {
85 | // Check if the service worker can be found. If it can't reload the page.
86 | fetch(swUrl)
87 | .then(response => {
88 | // Ensure service worker exists, and that we really are getting a JS file.
89 | if (
90 | response.status === 404 ||
91 | response.headers.get('content-type').indexOf('javascript') === -1
92 | ) {
93 | // No service worker found. Probably a different app. Reload the page.
94 | navigator.serviceWorker.ready.then(registration => {
95 | registration.unregister().then(() => {
96 | window.location.reload();
97 | });
98 | });
99 | } else {
100 | // Service worker found. Proceed as normal.
101 | registerValidSW(swUrl);
102 | }
103 | })
104 | .catch(() => {
105 | console.log(
106 | 'No internet connection found. App is running in offline mode.'
107 | );
108 | });
109 | }
110 |
111 | export function unregister() {
112 | if ('serviceWorker' in navigator) {
113 | navigator.serviceWorker.ready.then(registration => {
114 | registration.unregister();
115 | });
116 | }
117 | }
118 |
--------------------------------------------------------------------------------