├── .DS_Store
├── .ipynb_checkpoints
├── CalculateC-checkpoint.ipynb
└── Naive data-checkpoint.ipynb
├── CalculateC.ipynb
├── Clustering Data
├── Aggregation.txt
├── Compound.txt
├── D31.txt
├── R15.txt
├── flame.txt
├── jain.txt
├── pathbased.txt
└── spiral.txt
├── Naive data.ipynb
├── README.md
├── SSC_1.0
├── BuildAdjacency.m
├── DataProjection.m
├── Misclassification.m
├── OutlierDetection.m
├── Readme.tex
├── SSC.m
├── SparseCoefRecovery.m
├── SpectralClustering.m
└── missclassGroups.m
├── dsc.py
├── dsc.pyc
├── l1benchmark
├── .DS_Store
├── L1Solvers
│ ├── .DS_Store
│ ├── SolveAMP.m
│ ├── SolveDALM.m
│ ├── SolveDALM_CBM.m
│ ├── SolveFISTA.m
│ ├── SolveFISTA_CBM.m
│ ├── SolveHomotopy.m
│ ├── SolveHomotopy_CBM.m
│ ├── SolveHomotopy_CBM_std.m
│ ├── SolveL1LS.m
│ ├── SolveL1LS_CBM.m
│ ├── SolveOMP.m
│ ├── SolveOMP_CBM.m
│ ├── SolvePALM.m
│ ├── SolvePALM_CBM.m
│ ├── SolvePDIPA.m
│ ├── SolvePDIPA_CBM.m
│ ├── SolvePDIPA_CBM_std.m
│ ├── SolveSesopPCD.m
│ ├── SolveSesopPCD_CBM.m
│ ├── SolveSpaRSA.m
│ ├── SolveSpaRSA_CBM.m
│ ├── SolveTFOCS.m
│ ├── SolveTFOCS_CBM.m
│ ├── multMatrAdj_zhou.m
│ ├── multMatr_zhou.m
│ └── sesoptn_t.m
└── compare_noise_free.m
├── parser.out
├── parsetab.py
├── smop
├── SMOP.rst
├── __init__.py
├── __init__.pyc
├── backend.py
├── backend.pyc
├── benchmark5
│ ├── Currency1.txt
│ ├── Currency2.txt
│ ├── benchmark2.m
│ └── database.mdb
├── callgraph.py
├── core.py
├── core.pyc
├── fastsolver.m
├── foo.pyx
├── go.m
├── go.py
├── graphviz.py
├── graphviz.pyc
├── homo.py
├── homo.pyc
├── lex.py
├── lex.pyc
├── lexer.py
├── lexer.pyc
├── lm.py
├── main.py
├── main.pyc
├── node.py
├── node.pyc
├── options.py
├── options.pyc
├── parse.py
├── parse.pyc
├── parser.out
├── parsetab.py
├── parsetab.pyc
├── r8_random.m
├── rank.py
├── recipes.py
├── recipes.pyc
├── resolve.py
├── resolve.pyc
├── rewrite.py
├── runtime.pyx
├── solver.m
├── solver.py
├── solver.pyc
├── solver.pyx
├── sparsearray.py
├── test_core.py
├── test_lexer.py
├── test_matlabarray.py
├── test_parse.py
├── test_sparsearray.py
├── typeof.py
├── yacc.py
└── yacc.pyc
├── solveHomotopy.py
├── solveHomotopy.pyc
├── sp.py
├── sp.pyc
├── sp_blitzl1.py
├── sp_blitzl1.pyc
├── sp_cvx.py
├── sp_cvx.pyc
└── supporting_files
├── SolveHomotopy.m
├── __init__.py
├── __init__.pyc
├── helpers.py
├── helpers.pyc
├── nncomponents.py
├── nncomponents.pyc
├── sda.py
└── sda.pyc
/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/.DS_Store
--------------------------------------------------------------------------------
/CalculateC.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {
7 | "collapsed": false
8 | },
9 | "outputs": [],
10 | "source": [
11 | "from sp import getSparcityPrior\n",
12 | "from dsc import DeepSubspaceClustering\n",
13 | "import scipy.io as sio\n",
14 | "\n",
15 | "# ourdata = sio.loadmat(\"/Users/xupeng.tong/Documents/Data/OriginalData/B_mean_2labels.mat\")\n",
16 | "ourdata = sio.loadmat(\"/Volumes/TONY/Regeneron/Data/OriginalData/B_mean_2labels.mat\")\n",
17 | "\n",
18 | "inputX = ourdata['X']\n",
19 | "# inputX = normalize(inputX, axis=0)\n",
20 | "inputY = ourdata['Y'][0,:]\n",
21 | "columnNames = ourdata['columnNames']\n",
22 | "\n",
23 | "# getSparcityPrior(inputX, lambda1=0.01, lambda2=10)"
24 | ]
25 | },
26 | {
27 | "cell_type": "code",
28 | "execution_count": null,
29 | "metadata": {
30 | "collapsed": false
31 | },
32 | "outputs": [],
33 | "source": [
34 | "import numpy as np\n",
35 | "\n",
36 | "data = np.random.rand(20,10)\n",
37 | "# 20 samples 10 features"
38 | ]
39 | },
40 | {
41 | "cell_type": "code",
42 | "execution_count": 4,
43 | "metadata": {
44 | "collapsed": false
45 | },
46 | "outputs": [],
47 | "source": [
48 | "from sp_blitzl1 import sparseCoefRecovery\n",
49 | "\n",
50 | "# C = sparseCoefRecovery(inputX.T, l=0.01)\n",
51 | "\n",
52 | "# np.save(\"C_inputX\", C)"
53 | ]
54 | },
55 | {
56 | "cell_type": "code",
57 | "execution_count": 3,
58 | "metadata": {
59 | "collapsed": true
60 | },
61 | "outputs": [],
62 | "source": [
63 | "import numpy as np\n",
64 | "\n",
65 | "np.save(\"C_inputX\", C)"
66 | ]
67 | },
68 | {
69 | "cell_type": "code",
70 | "execution_count": null,
71 | "metadata": {
72 | "collapsed": false
73 | },
74 | "outputs": [],
75 | "source": [
76 | "import numpy as np\n",
77 | "from smop.homo import SolveHomotopy\n",
78 | "\n",
79 | "STOPPING_TIME = -2 ;\n",
80 | "STOPPING_GROUND_TRUTH = -1;\n",
81 | "STOPPING_DUALITY_GAP = 1;\n",
82 | "STOPPING_SPARSE_SUPPORT = 2;\n",
83 | "STOPPING_OBJECTIVE_VALUE = 3;\n",
84 | "STOPPING_SUBGRADIENT = 4;\n",
85 | "\n",
86 | "maxTime = 8;\n",
87 | "\n",
88 | "data = np.random.rand(20,10)\n",
89 | "\n",
90 | "A, b = np.matrix(data[:, 1:]), np.matrix(data[:,1]).T\n",
91 | "\n",
92 | "x = np.matrix(np.zeros((data.shape[1]-1, 1)))\n",
93 | "SolveHomotopy(A, b, 'stoppingCriterion', STOPPING_TIME, 'groundTruth', x, 'maxtime', maxTime, 'maxiteration', 1e6)"
94 | ]
95 | },
96 | {
97 | "cell_type": "code",
98 | "execution_count": null,
99 | "metadata": {
100 | "collapsed": false
101 | },
102 | "outputs": [],
103 | "source": [
104 | "import blitzl1\n",
105 | "import numpy as np\n",
106 | "\n",
107 | "\n",
108 | "A, b = np.matrix(inputX[:, 1:]), np.matrix(inputX[:,1]).T\n",
109 | "\n",
110 | "prob = blitzl1.LogRegProblem(A, b)\n",
111 | "lammax = prob.compute_lambda_max()\n",
112 | "sol = prob.solve(0.001 * lammax)\n",
113 | "len(sol.x[sol.x!=0])\n",
114 | "\n",
115 | "\n",
116 | "# from sp_cvx import sparseCoefRecovery\n",
117 | "# from solveHomotopy import SolveHomotopy\n",
118 | "# from sklearn.decomposition import sparse_encode\n",
119 | "# from sklearn.linear_model import LassoLars\n",
120 | "# from sklearn.linear_model import Lasso\n",
121 | "# from sklearn.linear_model import Lars\n",
122 | "\n",
123 | "# # C = sparse_encode(data, data)\n",
124 | "\n",
125 | "# lars = Lars()\n",
126 | "\n",
127 | "# # inputX_T = inputX.T\n",
128 | "\n",
129 | "# lars.fit(A, b)\n",
130 | "\n",
131 | "# # if i > 1:\n",
132 | "# # C[:i-1,i] = c_val[:i-1]\n",
133 | "# # if i < n:\n",
134 | "# # C[i+1:n,i] = c_val[i:n]\n",
135 | "# # C[i,i] = 0\n",
136 | "\n",
137 | "# print len(lars.coef_[lars.coef_!=0])"
138 | ]
139 | },
140 | {
141 | "cell_type": "code",
142 | "execution_count": null,
143 | "metadata": {
144 | "collapsed": false
145 | },
146 | "outputs": [],
147 | "source": []
148 | },
149 | {
150 | "cell_type": "code",
151 | "execution_count": null,
152 | "metadata": {
153 | "collapsed": false
154 | },
155 | "outputs": [],
156 | "source": []
157 | },
158 | {
159 | "cell_type": "code",
160 | "execution_count": null,
161 | "metadata": {
162 | "collapsed": true
163 | },
164 | "outputs": [],
165 | "source": []
166 | },
167 | {
168 | "cell_type": "code",
169 | "execution_count": null,
170 | "metadata": {
171 | "collapsed": false
172 | },
173 | "outputs": [],
174 | "source": [
175 | "sol.x"
176 | ]
177 | },
178 | {
179 | "cell_type": "code",
180 | "execution_count": null,
181 | "metadata": {
182 | "collapsed": false
183 | },
184 | "outputs": [],
185 | "source": [
186 | "np.matrix(b).T"
187 | ]
188 | },
189 | {
190 | "cell_type": "code",
191 | "execution_count": null,
192 | "metadata": {
193 | "collapsed": false
194 | },
195 | "outputs": [],
196 | "source": [
197 | "print A.T.shape"
198 | ]
199 | },
200 | {
201 | "cell_type": "code",
202 | "execution_count": null,
203 | "metadata": {
204 | "collapsed": false
205 | },
206 | "outputs": [],
207 | "source": [
208 | "(A.T*b).shape"
209 | ]
210 | },
211 | {
212 | "cell_type": "code",
213 | "execution_count": null,
214 | "metadata": {
215 | "collapsed": false
216 | },
217 | "outputs": [],
218 | "source": [
219 | "data.shape[1]-1"
220 | ]
221 | },
222 | {
223 | "cell_type": "code",
224 | "execution_count": null,
225 | "metadata": {
226 | "collapsed": false
227 | },
228 | "outputs": [],
229 | "source": [
230 | "np.delete(data,(1),axis=0).shape"
231 | ]
232 | },
233 | {
234 | "cell_type": "code",
235 | "execution_count": null,
236 | "metadata": {
237 | "collapsed": false
238 | },
239 | "outputs": [],
240 | "source": [
241 | "np.delete(data, (1), axis=1).shape"
242 | ]
243 | },
244 | {
245 | "cell_type": "code",
246 | "execution_count": null,
247 | "metadata": {
248 | "collapsed": false
249 | },
250 | "outputs": [],
251 | "source": [
252 | "from mlabwrap import mlab"
253 | ]
254 | },
255 | {
256 | "cell_type": "code",
257 | "execution_count": null,
258 | "metadata": {
259 | "collapsed": false
260 | },
261 | "outputs": [],
262 | "source": [
263 | "# C = getSparcityPrior(inputX, lambda1=0.01, lambda2=0.1, epochs=100, print_step=1)"
264 | ]
265 | }
266 | ],
267 | "metadata": {
268 | "kernelspec": {
269 | "display_name": "Python 2",
270 | "language": "python",
271 | "name": "python2"
272 | },
273 | "language_info": {
274 | "codemirror_mode": {
275 | "name": "ipython",
276 | "version": 2
277 | },
278 | "file_extension": ".py",
279 | "mimetype": "text/x-python",
280 | "name": "python",
281 | "nbconvert_exporter": "python",
282 | "pygments_lexer": "ipython2",
283 | "version": "2.7.11"
284 | }
285 | },
286 | "nbformat": 4,
287 | "nbformat_minor": 0
288 | }
289 |
--------------------------------------------------------------------------------
/Clustering Data/Compound.txt:
--------------------------------------------------------------------------------
1 | 26.75 22.15 1
2 | 29.8 22.15 1
3 | 31.55 21.1 1
4 | 27.7 20.85 1
5 | 29.9 19.95 1
6 | 26.8 19.05 1
7 | 28.35 18.25 1
8 | 30.4 17.85 1
9 | 27.25 16.7 1
10 | 29.05 16 1
11 | 27.15 14.85 1
12 | 28.2 13.95 1
13 | 30.35 13.85 1
14 | 27.25 11.95 1
15 | 29.45 12.05 1
16 | 31.55 12.2 1
17 | 33.05 10.65 1
18 | 29.95 9.85 1
19 | 28 9.75 1
20 | 27.15 7.85 1
21 | 29.15 8.1 1
22 | 31.95 8.6 1
23 | 34.7 8.55 1
24 | 34.8 12.25 1
25 | 36.3 15.25 1
26 | 36.6 13.2 1
27 | 38.7 14.25 1
28 | 40.3 15.5 1
29 | 42.25 14.25 1
30 | 40.7 12.8 1
31 | 38.6 12.1 1
32 | 36.1 10.5 1
33 | 38.35 10.4 1
34 | 37.65 8.4 1
35 | 40.15 8.55 1
36 | 40.8 10.65 1
37 | 42.9 11.25 1
38 | 41.95 8.5 1
39 | 42.45 17.45 1
40 | 40.25 18.45 1
41 | 42.55 19.45 1
42 | 40.95 20.65 1
43 | 42.25 22.15 1
44 | 38.85 22.4 1
45 | 38.4 20 1
46 | 35.25 20.2 1
47 | 33.25 21 1
48 | 34.15 22.35 1
49 | 35.55 22.5 1
50 | 36.55 21.4 1
51 | 33.35 19.6 2
52 | 32.85 19.55 2
53 | 32.4 19.15 2
54 | 32.45 18.7 2
55 | 32.8 18.9 2
56 | 33.2 19.2 2
57 | 33.7 19.05 2
58 | 33.4 18.75 2
59 | 33.05 18.5 2
60 | 32.8 18.2 2
61 | 34 18.7 2
62 | 33.85 18.25 2
63 | 33.35 18.15 2
64 | 32.8 17.7 2
65 | 33.15 17.55 2
66 | 33.75 17.75 2
67 | 34.15 17.85 2
68 | 34.35 18.35 2
69 | 34.95 18.5 2
70 | 34.75 18.05 2
71 | 35.15 18.05 2
72 | 35.65 18.15 2
73 | 35.45 18.7 2
74 | 36.05 18.75 2
75 | 36.25 18.2 2
76 | 36.6 18.7 2
77 | 37.1 18.5 2
78 | 36.75 18.1 2
79 | 37.65 18.3 2
80 | 37.15 17.85 2
81 | 37.65 17.75 2
82 | 38.05 18.1 2
83 | 38.45 17.7 2
84 | 38.8 17.3 2
85 | 38.2 17.25 2
86 | 38.6 16.8 2
87 | 38.25 16.35 2
88 | 37.9 16.85 2
89 | 37.5 17.3 2
90 | 37.65 16.4 2
91 | 37.15 16.7 2
92 | 37 17.15 2
93 | 36.6 17.4 2
94 | 36.15 17.55 2
95 | 35.75 17.65 2
96 | 36.6 16.9 2
97 | 36.05 16.95 2
98 | 35.45 17 2
99 | 35.3 17.55 2
100 | 34.9 17 2
101 | 34.75 17.45 2
102 | 34.3 17.35 2
103 | 34.3 16.8 2
104 | 33.9 17.2 2
105 | 33.35 17.05 2
106 | 32.85 16.95 2
107 | 33.55 16.6 2
108 | 34 16.4 2
109 | 32.45 17.2 2
110 | 32.1 16.85 2
111 | 31.7 16.65 2
112 | 31.2 16.35 2
113 | 30.95 15.75 2
114 | 31.15 15.35 2
115 | 31.45 15.1 2
116 | 31.75 14.7 2
117 | 32.15 14.35 2
118 | 32.65 14.15 2
119 | 33.15 14.05 2
120 | 33.8 13.9 2
121 | 34.35 14.2 2
122 | 34.3 14.85 2
123 | 34.05 15.35 2
124 | 33.9 15.95 2
125 | 33.35 16.05 2
126 | 33 16.5 2
127 | 32.45 16.6 2
128 | 31.95 16.25 2
129 | 31.5 15.85 2
130 | 31.75 15.4 2
131 | 32.15 15.8 2
132 | 32.55 16.1 2
133 | 32.9 15.7 2
134 | 32.55 15.4 2
135 | 32.05 15.2 2
136 | 32.5 14.8 2
137 | 33 15.25 2
138 | 33.5 15.6 2
139 | 33.6 15.05 2
140 | 32.9 14.7 2
141 | 33.3 14.5 2
142 | 33.8 14.5 2
143 | 9.2 22.35 3
144 | 10.9 22.35 3
145 | 12.45 22.3 3
146 | 13.95 22.05 3
147 | 14.65 20.3 3
148 | 13.15 20.8 3
149 | 11.6 20.95 3
150 | 10.25 21.25 3
151 | 9.2 20.8 3
152 | 8.05 21.55 3
153 | 7.15 19.9 3
154 | 8.55 20 3
155 | 8.5 19.2 3
156 | 7.35 18.3 3
157 | 8.25 16.65 3
158 | 8.95 18 3
159 | 9.6 18.85 3
160 | 9.65 19.75 3
161 | 10.2 20.25 3
162 | 10.9 20.3 3
163 | 12.15 20 3
164 | 11.25 19.75 3
165 | 10.8 19.6 3
166 | 10.4 19.55 3
167 | 10.65 19.35 3
168 | 10.3 19.15 3
169 | 10.95 19.1 3
170 | 10.6 18.85 3
171 | 10.05 18.1 3
172 | 10.35 16.9 3
173 | 10.05 15.9 3
174 | 11.15 18.1 3
175 | 12.1 18.75 3
176 | 13.2 19.2 3
177 | 11.5 17.1 3
178 | 12.65 17.65 3
179 | 14.45 18.35 4
180 | 13.9 16.7 3
181 | 12.6 15.8 3
182 | 15.95 20.75 4
183 | 16.95 21.6 4
184 | 17.9 21.95 4
185 | 19 22.7 4
186 | 20.45 22.75 4
187 | 19.1 21.7 4
188 | 20.4 21.4 4
189 | 21.95 21.9 4
190 | 18.65 20.7 4
191 | 17.75 20.55 4
192 | 17.05 19.85 4
193 | 15.75 19.45 4
194 | 15.75 18.25 4
195 | 16.35 16.9 4
196 | 17.2 15.9 4
197 | 17.9 17 4
198 | 17.3 17.75 4
199 | 17 18.9 4
200 | 17.8 18.65 4
201 | 17.85 19.5 4
202 | 18.5 19.9 4
203 | 19.1 19.95 4
204 | 19.55 20.55 4
205 | 20.1 19.9 4
206 | 19.55 19.3 4
207 | 18.95 19.3 4
208 | 18.55 19.2 4
209 | 18.45 18.85 4
210 | 18.85 18.9 4
211 | 19.2 18.8 4
212 | 18.75 18.55 4
213 | 18.3 18.1 4
214 | 19.1 17.8 4
215 | 19 16.75 4
216 | 18.75 15.5 4
217 | 19.65 18.2 4
218 | 20.1 18.95 4
219 | 21.25 20.4 4
220 | 21.45 19 4
221 | 20.9 17.9 4
222 | 20.25 17.2 4
223 | 20.1 15.4 4
224 | 21.4 15.95 4
225 | 22.2 17.15 4
226 | 11.4 12.55 5
227 | 12.05 12.75 5
228 | 12.7 13 5
229 | 13.35 13.05 5
230 | 14.2 12.95 5
231 | 15.05 12.95 5
232 | 15.6 12.95 5
233 | 16.1 13.1 5
234 | 15.95 12.6 5
235 | 15.4 12.45 5
236 | 14.65 12.4 5
237 | 13.85 12.4 5
238 | 13.15 12.2 5
239 | 12.65 12.4 5
240 | 11.9 12.1 5
241 | 12 11.5 5
242 | 12.65 11.65 5
243 | 13.4 11.65 5
244 | 14.1 11.7 5
245 | 14.6 11.8 5
246 | 15.2 11.95 5
247 | 15.05 11.55 5
248 | 14.45 11.2 5
249 | 13.95 10.9 5
250 | 13.05 11.1 5
251 | 13.55 10.65 5
252 | 12.45 10.9 5
253 | 13.2 10.25 5
254 | 11.25 11.1 5
255 | 11.25 11.85 5
256 | 10.7 12.25 5
257 | 10.05 11.85 5
258 | 10.6 11.6 5
259 | 9.75 11.35 5
260 | 10.4 10.9 5
261 | 9.75 10.6 5
262 | 9.75 9.8 5
263 | 10.35 10.2 5
264 | 10.9 10.4 5
265 | 11.7 10.55 5
266 | 12.4 10.1 5
267 | 12.9 9.7 5
268 | 12.35 9.65 5
269 | 11.85 10 5
270 | 11.15 9.8 5
271 | 10.65 9.55 5
272 | 10.1 9.25 5
273 | 10.75 9 5
274 | 11.1 9.3 5
275 | 11.7 9.4 5
276 | 12.15 9.1 5
277 | 12.85 9.05 5
278 | 12.45 8.7 5
279 | 11.95 8.25 5
280 | 11.7 8.85 5
281 | 11.3 8.5 5
282 | 11.55 7.95 5
283 | 12.9 8.5 5
284 | 13.25 8.05 5
285 | 12.65 7.95 5
286 | 12.1 7.6 5
287 | 11.65 7.35 5
288 | 12.2 7 5
289 | 11.8 6.65 5
290 | 12.65 7.3 5
291 | 13.2 7.55 5
292 | 13.65 7.75 5
293 | 14.35 7.55 5
294 | 13.8 7.3 5
295 | 13.35 6.85 5
296 | 12.7 6.7 5
297 | 12.45 6.25 5
298 | 13.2 5.85 5
299 | 13.65 6.25 5
300 | 14.1 6.75 5
301 | 14.7 6.9 5
302 | 15 7.5 5
303 | 15.85 7.3 5
304 | 15.35 7.05 5
305 | 15.1 6.35 5
306 | 14.45 6.3 5
307 | 14.75 5.75 5
308 | 13.95 5.8 5
309 | 15.5 5.9 5
310 | 15.8 6.4 5
311 | 16.05 6.85 5
312 | 16.55 7.1 5
313 | 16.7 6.5 5
314 | 16.25 6.1 5
315 | 17.05 6.25 5
316 | 15.85 11.55 5
317 | 15.9 12.1 5
318 | 16.3 11.65 5
319 | 16.55 12.05 5
320 | 16.5 12.6 5
321 | 16.75 13.1 5
322 | 17.5 13 5
323 | 17.15 12.65 5
324 | 17.1 12.1 5
325 | 16.9 11.7 5
326 | 17.4 11.65 5
327 | 17.55 12.1 5
328 | 17.75 12.65 5
329 | 18.3 12.75 5
330 | 18.25 12.25 5
331 | 18 11.95 5
332 | 17.85 11.5 5
333 | 18.3 11.65 5
334 | 18.6 12 5
335 | 18.85 12.45 5
336 | 19.1 11.8 5
337 | 18.85 11.45 5
338 | 18.5 11.15 5
339 | 18.95 10.8 5
340 | 19.3 11.15 5
341 | 19.4 10.7 5
342 | 19.25 10.35 5
343 | 19.9 10.6 5
344 | 19.65 10.15 5
345 | 19.45 9.75 5
346 | 19.9 9.45 5
347 | 20.3 10.05 5
348 | 20.65 10.35 5
349 | 21.25 10.1 5
350 | 20.9 9.9 5
351 | 21.65 9.65 5
352 | 21.15 9.35 5
353 | 20.5 9.4 5
354 | 19.5 9.2 5
355 | 19.95 8.85 5
356 | 20.65 8.8 5
357 | 21.2 8.7 5
358 | 21.9 8.85 5
359 | 21.75 8.25 5
360 | 21.65 7.8 5
361 | 21.05 8 5
362 | 20.3 8.2 5
363 | 19.4 8.7 5
364 | 19.6 8.05 5
365 | 18.95 8.1 5
366 | 20 7.6 5
367 | 20.55 7.55 5
368 | 21.25 7.25 5
369 | 20.85 6.85 5
370 | 20.25 7.05 5
371 | 19.55 7.05 5
372 | 19.05 7.45 5
373 | 18.35 7.6 5
374 | 17.85 7.3 5
375 | 18.3 7.1 5
376 | 18.95 6.85 5
377 | 19.6 6.25 5
378 | 20.15 6.45 5
379 | 18.8 6.25 5
380 | 18.35 6.55 5
381 | 17.65 6.55 5
382 | 17.25 6.9 5
383 | 17.95 6.2 5
384 | 17.45 9.85 6
385 | 17.2 9.25 6
386 | 17 9.6 6
387 | 17 10.05 6
388 | 16.45 10.1 6
389 | 16.5 9.8 6
390 | 16.6 9.45 6
391 | 16.6 9.05 6
392 | 15.9 9 6
393 | 16.05 9.35 6
394 | 16.05 9.65 6
395 | 15.85 9.95 6
396 | 15.35 9.9 6
397 | 15.6 9.45 6
398 | 15.3 9.15 6
399 | 15.1 9.55 6
400 |
--------------------------------------------------------------------------------
/Clustering Data/flame.txt:
--------------------------------------------------------------------------------
1 | 1.85 27.8 1
2 | 1.35 26.65 1
3 | 1.4 23.25 2
4 | 0.85 23.05 2
5 | 0.5 22.35 2
6 | 0.65 21.35 2
7 | 1.1 22.05 2
8 | 1.35 22.65 2
9 | 1.95 22.8 2
10 | 2.4 22.45 2
11 | 1.8 22 2
12 | 2.5 21.85 2
13 | 2.95 21.4 2
14 | 1.9 21.25 2
15 | 1.35 21.45 2
16 | 1.35 20.9 2
17 | 1.25 20.35 2
18 | 1.75 20.05 2
19 | 2 20.6 2
20 | 2.5 21 2
21 | 1.7 19.05 2
22 | 2.4 20.05 2
23 | 3.05 20.45 2
24 | 3.7 20.45 2
25 | 3.45 19.9 2
26 | 2.95 19.5 2
27 | 2.4 19.4 2
28 | 2.4 18.25 2
29 | 2.85 18.75 2
30 | 3.25 19.05 2
31 | 3.95 19.6 2
32 | 2.7 17.8 2
33 | 3.45 18.05 2
34 | 3.8 18.55 2
35 | 4 19.1 2
36 | 4.45 19.9 2
37 | 4.65 19.15 2
38 | 4.85 18.45 2
39 | 4.3 18.05 2
40 | 3.35 17.3 2
41 | 3.7 16.3 2
42 | 4.4 16.95 2
43 | 4.25 17.4 2
44 | 4.8 17.65 2
45 | 5.25 18.25 2
46 | 5.75 18.55 2
47 | 5.3 19.25 2
48 | 6.05 19.55 2
49 | 6.5 18.9 2
50 | 6.05 18.2 2
51 | 5.6 17.8 2
52 | 5.45 17.15 2
53 | 5.05 16.55 2
54 | 4.55 16.05 2
55 | 4.95 15.45 2
56 | 5.85 14.8 2
57 | 5.6 15.3 2
58 | 5.65 16 2
59 | 5.95 16.8 2
60 | 6.25 16.4 2
61 | 6.1 17.45 2
62 | 6.6 17.65 2
63 | 6.65 18.3 2
64 | 7.3 18.35 2
65 | 7.85 18.3 2
66 | 7.15 17.8 2
67 | 7.6 17.7 2
68 | 6.7 17.25 2
69 | 7.3 17.25 2
70 | 6.7 16.8 2
71 | 7.3 16.65 2
72 | 6.75 16.3 2
73 | 7.4 16.2 2
74 | 6.55 15.75 2
75 | 7.35 15.8 2
76 | 6.8 14.95 2
77 | 7.45 15.1 2
78 | 6.85 14.45 2
79 | 7.6 14.6 2
80 | 8.55 14.65 2
81 | 8.2 15.5 2
82 | 7.9 16.1 2
83 | 8.05 16.5 2
84 | 7.8 17 2
85 | 8 17.45 2
86 | 8.4 18.1 2
87 | 8.65 17.75 2
88 | 8.9 17.1 2
89 | 8.4 17.1 2
90 | 8.65 16.65 2
91 | 8.45 16.05 2
92 | 8.85 15.35 2
93 | 9.6 15.3 2
94 | 9.15 16 2
95 | 10.2 16 2
96 | 9.5 16.65 2
97 | 10.75 16.6 2
98 | 10.45 17.2 2
99 | 9.85 17.1 2
100 | 9.4 17.6 2
101 | 10.15 17.7 2
102 | 9.85 18.15 2
103 | 9.05 18.25 2
104 | 9.3 18.7 2
105 | 9.15 19.15 2
106 | 8.5 18.8 2
107 | 11.65 17.45 2
108 | 11.1 17.65 2
109 | 10.4 18.25 2
110 | 10 18.95 2
111 | 11.95 18.25 2
112 | 11.25 18.4 2
113 | 10.6 18.9 2
114 | 11.15 19 2
115 | 11.9 18.85 2
116 | 12.6 18.9 2
117 | 11.8 19.45 2
118 | 11.05 19.45 2
119 | 10.3 19.4 2
120 | 9.9 19.75 2
121 | 10.45 20 2
122 | 13.05 19.9 2
123 | 12.5 19.75 2
124 | 11.9 20.05 2
125 | 11.2 20.25 2
126 | 10.85 20.85 2
127 | 11.4 21.25 2
128 | 11.7 20.6 2
129 | 12.3 20.45 2
130 | 12.95 20.55 2
131 | 12.55 20.95 2
132 | 12.05 21.25 2
133 | 11.75 22.1 2
134 | 12.25 21.85 2
135 | 12.8 21.5 2
136 | 13.55 21 2
137 | 13.6 21.6 2
138 | 12.95 22 2
139 | 12.5 22.25 2
140 | 12.2 22.85 2
141 | 12.7 23.35 2
142 | 13 22.7 2
143 | 13.55 22.2 2
144 | 14.05 22.25 2
145 | 14.2 23.05 2
146 | 14.1 23.6 2
147 | 13.5 22.8 2
148 | 13.35 23.5 2
149 | 13.3 24 2
150 | 7.3 19.15 2
151 | 7.95 19.35 2
152 | 7.7 20.05 2
153 | 6.75 19.9 2
154 | 5.25 20.35 2
155 | 6.15 20.7 1
156 | 7 20.7 1
157 | 7.6 21.2 1
158 | 8.55 20.6 1
159 | 9.35 20.5 1
160 | 8.3 21.45 1
161 | 7.9 21.6 1
162 | 7.15 21.75 1
163 | 6.7 21.3 1
164 | 5.2 21.1 2
165 | 6.2 21.95 1
166 | 6.75 22.4 1
167 | 6.15 22.5 1
168 | 5.65 22.2 1
169 | 4.65 22.55 1
170 | 4.1 23.45 1
171 | 5.35 22.8 1
172 | 7.4 22.6 1
173 | 7.75 22.1 1
174 | 8.5 22.3 1
175 | 9.3 22 1
176 | 9.7 22.95 1
177 | 8.8 22.95 1
178 | 8.05 22.9 1
179 | 7.6 23.15 1
180 | 6.85 23 1
181 | 6.2 23.25 1
182 | 5.7 23.4 1
183 | 5.1 23.55 1
184 | 4.55 24.15 1
185 | 5.5 24 1
186 | 6.1 24.05 1
187 | 6.5 23.6 1
188 | 6.75 23.95 1
189 | 7.3 23.75 1
190 | 8.3 23.4 1
191 | 8.9 23.7 1
192 | 9.55 23.65 1
193 | 10.35 24.1 1
194 | 7.95 24.05 1
195 | 3.95 24.4 1
196 | 3.75 25.25 1
197 | 3.9 25.95 1
198 | 4.55 26.65 1
199 | 5.25 26.75 1
200 | 6.5 27.6 1
201 | 7.45 27.6 1
202 | 8.35 27.35 1
203 | 9.25 27.2 1
204 | 9.95 26.5 1
205 | 10.55 25.6 1
206 | 9.9 24.95 1
207 | 9.2 24.5 1
208 | 8.55 24.2 1
209 | 8.8 24.8 1
210 | 9.2 25.35 1
211 | 9.55 26.05 1
212 | 9.05 26.6 1
213 | 8.8 25.8 1
214 | 8.15 26.35 1
215 | 8.05 25.8 1
216 | 8.35 25.2 1
217 | 7.9 25.3 1
218 | 8.05 24.7 1
219 | 7.3 24.4 1
220 | 7.55 24.85 1
221 | 6.85 24.45 1
222 | 6.25 24.65 1
223 | 5.55 24.5 1
224 | 4.65 25.1 1
225 | 5 25.55 1
226 | 5.55 26.1 1
227 | 5.55 25.25 1
228 | 6.2 25.2 1
229 | 6.8 25.05 1
230 | 7.4 25.25 1
231 | 6.65 25.45 1
232 | 6.15 25.8 1
233 | 6.5 26.1 1
234 | 6.6 26.6 1
235 | 7.7 26.65 1
236 | 7.5 26.2 1
237 | 7.5 25.65 1
238 | 7.05 25.85 1
239 | 6.9 27.15 1
240 | 6.15 26.9 1
241 |
--------------------------------------------------------------------------------
/Clustering Data/jain.txt:
--------------------------------------------------------------------------------
1 | 0.85 17.45 2
2 | 0.75 15.6 2
3 | 3.3 15.45 2
4 | 5.25 14.2 2
5 | 4.9 15.65 2
6 | 5.35 15.85 2
7 | 5.1 17.9 2
8 | 4.6 18.25 2
9 | 4.05 18.75 2
10 | 3.4 19.7 2
11 | 2.9 21.15 2
12 | 3.1 21.85 2
13 | 3.9 21.85 2
14 | 4.4 20.05 2
15 | 7.2 14.5 2
16 | 7.65 16.5 2
17 | 7.1 18.65 2
18 | 7.05 19.9 2
19 | 5.85 20.55 2
20 | 5.5 21.8 2
21 | 6.55 21.8 2
22 | 6.05 22.3 2
23 | 5.2 23.4 2
24 | 4.55 23.9 2
25 | 5.1 24.4 2
26 | 8.1 26.35 2
27 | 10.15 27.7 2
28 | 9.75 25.5 2
29 | 9.2 21.1 2
30 | 11.2 22.8 2
31 | 12.6 23.1 2
32 | 13.25 23.5 2
33 | 11.65 26.85 2
34 | 12.45 27.55 2
35 | 13.3 27.85 2
36 | 13.7 27.75 2
37 | 14.15 26.9 2
38 | 14.05 26.55 2
39 | 15.15 24.2 2
40 | 15.2 24.75 2
41 | 12.2 20.9 2
42 | 12.15 21.45 2
43 | 12.75 22.05 2
44 | 13.15 21.85 2
45 | 13.75 22 2
46 | 13.95 22.7 2
47 | 14.4 22.65 2
48 | 14.2 22.15 2
49 | 14.1 21.75 2
50 | 14.05 21.4 2
51 | 17.2 24.8 2
52 | 17.7 24.85 2
53 | 17.55 25.2 2
54 | 17 26.85 2
55 | 16.55 27.1 2
56 | 19.15 25.35 2
57 | 18.8 24.7 2
58 | 21.4 25.85 2
59 | 15.8 21.35 2
60 | 16.6 21.15 2
61 | 17.45 20.75 2
62 | 18 20.95 2
63 | 18.25 20.2 2
64 | 18 22.3 2
65 | 18.6 22.25 2
66 | 19.2 21.95 2
67 | 19.45 22.1 2
68 | 20.1 21.6 2
69 | 20.1 20.9 2
70 | 19.9 20.35 2
71 | 19.45 19.05 2
72 | 19.25 18.7 2
73 | 21.3 22.3 2
74 | 22.9 23.65 2
75 | 23.15 24.1 2
76 | 24.25 22.85 2
77 | 22.05 20.25 2
78 | 20.95 18.25 2
79 | 21.65 17.25 2
80 | 21.55 16.7 2
81 | 21.6 16.3 2
82 | 21.5 15.5 2
83 | 22.4 16.5 2
84 | 22.25 18.1 2
85 | 23.15 19.05 2
86 | 23.5 19.8 2
87 | 23.75 20.2 2
88 | 25.15 19.8 2
89 | 25.5 19.45 2
90 | 23 18 2
91 | 23.95 17.75 2
92 | 25.9 17.55 2
93 | 27.65 15.65 2
94 | 23.1 14.6 2
95 | 23.5 15.2 2
96 | 24.05 14.9 2
97 | 24.5 14.7 2
98 | 14.15 17.35 1
99 | 14.3 16.8 1
100 | 14.3 15.75 1
101 | 14.75 15.1 1
102 | 15.35 15.5 1
103 | 15.95 16.45 1
104 | 16.5 17.05 1
105 | 17.35 17.05 1
106 | 17.15 16.3 1
107 | 16.65 16.1 1
108 | 16.5 15.15 1
109 | 16.25 14.95 1
110 | 16 14.25 1
111 | 15.9 13.2 1
112 | 15.15 12.05 1
113 | 15.2 11.7 1
114 | 17 15.65 1
115 | 16.9 15.35 1
116 | 17.35 15.45 1
117 | 17.15 15.1 1
118 | 17.3 14.9 1
119 | 17.7 15 1
120 | 17 14.6 1
121 | 16.85 14.3 1
122 | 16.6 14.05 1
123 | 17.1 14 1
124 | 17.45 14.15 1
125 | 17.8 14.2 1
126 | 17.6 13.85 1
127 | 17.2 13.5 1
128 | 17.25 13.15 1
129 | 17.1 12.75 1
130 | 16.95 12.35 1
131 | 16.5 12.2 1
132 | 16.25 12.5 1
133 | 16.05 11.9 1
134 | 16.65 10.9 1
135 | 16.7 11.4 1
136 | 16.95 11.25 1
137 | 17.3 11.2 1
138 | 18.05 11.9 1
139 | 18.6 12.5 1
140 | 18.9 12.05 1
141 | 18.7 11.25 1
142 | 17.95 10.9 1
143 | 18.4 10.05 1
144 | 17.45 10.4 1
145 | 17.6 10.15 1
146 | 17.7 9.85 1
147 | 17.3 9.7 1
148 | 16.95 9.7 1
149 | 16.75 9.65 1
150 | 19.8 9.95 1
151 | 19.1 9.55 1
152 | 17.5 8.3 1
153 | 17.55 8.1 1
154 | 17.85 7.55 1
155 | 18.2 8.35 1
156 | 19.3 9.1 1
157 | 19.4 8.85 1
158 | 19.05 8.85 1
159 | 18.9 8.5 1
160 | 18.6 7.85 1
161 | 18.7 7.65 1
162 | 19.35 8.2 1
163 | 19.95 8.3 1
164 | 20 8.9 1
165 | 20.3 8.9 1
166 | 20.55 8.8 1
167 | 18.35 6.95 1
168 | 18.65 6.9 1
169 | 19.3 7 1
170 | 19.1 6.85 1
171 | 19.15 6.65 1
172 | 21.2 8.8 1
173 | 21.4 8.8 1
174 | 21.1 8 1
175 | 20.4 7 1
176 | 20.5 6.35 1
177 | 20.1 6.05 1
178 | 20.45 5.15 1
179 | 20.95 5.55 1
180 | 20.95 6.2 1
181 | 20.9 6.6 1
182 | 21.05 7 1
183 | 21.85 8.5 1
184 | 21.9 8.2 1
185 | 22.3 7.7 1
186 | 21.85 6.65 1
187 | 21.3 5.05 1
188 | 22.6 6.7 1
189 | 22.5 6.15 1
190 | 23.65 7.2 1
191 | 24.1 7 1
192 | 21.95 4.8 1
193 | 22.15 5.05 1
194 | 22.45 5.3 1
195 | 22.45 4.9 1
196 | 22.7 5.5 1
197 | 23 5.6 1
198 | 23.2 5.3 1
199 | 23.45 5.95 1
200 | 23.75 5.95 1
201 | 24.45 6.15 1
202 | 24.6 6.45 1
203 | 25.2 6.55 1
204 | 26.05 6.4 1
205 | 25.3 5.75 1
206 | 24.35 5.35 1
207 | 23.3 4.9 1
208 | 22.95 4.75 1
209 | 22.4 4.55 1
210 | 22.8 4.1 1
211 | 22.9 4 1
212 | 23.25 3.85 1
213 | 23.45 3.6 1
214 | 23.55 4.2 1
215 | 23.8 3.65 1
216 | 23.8 4.75 1
217 | 24.2 4 1
218 | 24.55 4 1
219 | 24.7 3.85 1
220 | 24.7 4.3 1
221 | 24.9 4.75 1
222 | 26.4 5.7 1
223 | 27.15 5.95 1
224 | 27.3 5.45 1
225 | 27.5 5.45 1
226 | 27.55 5.1 1
227 | 26.85 4.95 1
228 | 26.6 4.9 1
229 | 26.85 4.4 1
230 | 26.2 4.4 1
231 | 26 4.25 1
232 | 25.15 4.1 1
233 | 25.6 3.9 1
234 | 25.85 3.6 1
235 | 24.95 3.35 1
236 | 25.1 3.25 1
237 | 25.45 3.15 1
238 | 26.85 2.95 1
239 | 27.15 3.15 1
240 | 27.2 3 1
241 | 27.95 3.25 1
242 | 27.95 3.5 1
243 | 28.8 4.05 1
244 | 28.8 4.7 1
245 | 28.75 5.45 1
246 | 28.6 5.75 1
247 | 29.25 6.3 1
248 | 30 6.55 1
249 | 30.6 3.4 1
250 | 30.05 3.45 1
251 | 29.75 3.45 1
252 | 29.2 4 1
253 | 29.45 4.05 1
254 | 29.05 4.55 1
255 | 29.4 4.85 1
256 | 29.5 4.7 1
257 | 29.9 4.45 1
258 | 30.75 4.45 1
259 | 30.4 4.05 1
260 | 30.8 3.95 1
261 | 31.05 3.95 1
262 | 30.9 5.2 1
263 | 30.65 5.85 1
264 | 30.7 6.15 1
265 | 31.5 6.25 1
266 | 31.65 6.55 1
267 | 32 7 1
268 | 32.5 7.95 1
269 | 33.35 7.45 1
270 | 32.6 6.95 1
271 | 32.65 6.6 1
272 | 32.55 6.35 1
273 | 32.35 6.1 1
274 | 32.55 5.8 1
275 | 32.2 5.05 1
276 | 32.35 4.25 1
277 | 32.9 4.15 1
278 | 32.7 4.6 1
279 | 32.75 4.85 1
280 | 34.1 4.6 1
281 | 34.1 5 1
282 | 33.6 5.25 1
283 | 33.35 5.65 1
284 | 33.75 5.95 1
285 | 33.4 6.2 1
286 | 34.45 5.8 1
287 | 34.65 5.65 1
288 | 34.65 6.25 1
289 | 35.25 6.25 1
290 | 34.35 6.8 1
291 | 34.1 7.15 1
292 | 34.45 7.3 1
293 | 34.7 7.2 1
294 | 34.85 7 1
295 | 34.35 7.75 1
296 | 34.55 7.85 1
297 | 35.05 8 1
298 | 35.5 8.05 1
299 | 35.8 7.1 1
300 | 36.6 6.7 1
301 | 36.75 7.25 1
302 | 36.5 7.4 1
303 | 35.95 7.9 1
304 | 36.1 8.1 1
305 | 36.15 8.4 1
306 | 37.6 7.35 1
307 | 37.9 7.65 1
308 | 29.15 4.4 1
309 | 34.9 9 1
310 | 35.3 9.4 1
311 | 35.9 9.35 1
312 | 36 9.65 1
313 | 35.75 10 1
314 | 36.7 9.15 1
315 | 36.6 9.8 1
316 | 36.9 9.75 1
317 | 37.25 10.15 1
318 | 36.4 10.15 1
319 | 36.3 10.7 1
320 | 36.75 10.85 1
321 | 38.15 9.7 1
322 | 38.4 9.45 1
323 | 38.35 10.5 1
324 | 37.7 10.8 1
325 | 37.45 11.15 1
326 | 37.35 11.4 1
327 | 37 11.75 1
328 | 36.8 12.2 1
329 | 37.15 12.55 1
330 | 37.25 12.15 1
331 | 37.65 11.95 1
332 | 37.95 11.85 1
333 | 38.6 11.75 1
334 | 38.5 12.2 1
335 | 38 12.95 1
336 | 37.3 13 1
337 | 37.5 13.4 1
338 | 37.85 14.5 1
339 | 38.3 14.6 1
340 | 38.05 14.45 1
341 | 38.35 14.35 1
342 | 38.5 14.25 1
343 | 39.3 14.2 1
344 | 39 13.2 1
345 | 38.95 12.9 1
346 | 39.2 12.35 1
347 | 39.5 11.8 1
348 | 39.55 12.3 1
349 | 39.75 12.75 1
350 | 40.2 12.8 1
351 | 40.4 12.05 1
352 | 40.45 12.5 1
353 | 40.55 13.15 1
354 | 40.45 14.5 1
355 | 40.2 14.8 1
356 | 40.65 14.9 1
357 | 40.6 15.25 1
358 | 41.3 15.3 1
359 | 40.95 15.7 1
360 | 41.25 16.8 1
361 | 40.95 17.05 1
362 | 40.7 16.45 1
363 | 40.45 16.3 1
364 | 39.9 16.2 1
365 | 39.65 16.2 1
366 | 39.25 15.5 1
367 | 38.85 15.5 1
368 | 38.3 16.5 1
369 | 38.75 16.85 1
370 | 39 16.6 1
371 | 38.25 17.35 1
372 | 39.5 16.95 1
373 | 39.9 17.05 1
374 |
--------------------------------------------------------------------------------
/Clustering Data/pathbased.txt:
--------------------------------------------------------------------------------
1 | 11.25 5.05 1
2 | 10.95 4.7 1
3 | 9.85 5.8 1
4 | 9.8 5.75 1
5 | 9.15 6.8 1
6 | 8.65 6.6 1
7 | 8.4 7.5 1
8 | 7.9 7.75 1
9 | 6.95 8.7 1
10 | 7.25 9.75 1
11 | 7.3 10.25 1
12 | 5.9 10.7 1
13 | 5.85 11.8 1
14 | 6.45 12.05 1
15 | 5.7 12.95 1
16 | 5.35 13.45 1
17 | 5.4 14.65 1
18 | 4.7 14.85 1
19 | 5.4 15.4 1
20 | 5.1 16.25 1
21 | 5.75 16.7 1
22 | 4.85 17.65 1
23 | 5 18 1
24 | 6.05 18 1
25 | 5.7 19.45 1
26 | 5.3 19.55 1
27 | 5.85 21.25 1
28 | 6.1 21.35 1
29 | 6.55 22.15 1
30 | 5.9 22.8 1
31 | 7.15 23.7 1
32 | 6.75 24.25 1
33 | 7.95 24.65 1
34 | 7.75 25.3 1
35 | 8.8 26.05 1
36 | 8.85 26.95 1
37 | 9.35 27.45 1
38 | 9.95 27.1 1
39 | 11.25 28.2 1
40 | 10.7 28.55 1
41 | 11.95 29.45 1
42 | 11.95 28.65 1
43 | 13.1 30.05 1
44 | 13.4 29.3 1
45 | 14.7 30.2 1
46 | 14.7 30.6 1
47 | 16.1 30.4 1
48 | 16.1 31.05 1
49 | 17.55 30.8 1
50 | 17.65 31.75 1
51 | 18.55 31.6 1
52 | 18.85 30.6 1
53 | 19.85 30.9 1
54 | 20.1 31.3 1
55 | 21.5 31.35 1
56 | 20.85 30.4 1
57 | 22.95 30.05 1
58 | 23.4 30.3 1
59 | 24.2 29.9 1
60 | 24.75 30 1
61 | 25.55 29.3 1
62 | 25.55 28.45 1
63 | 26.7 28.3 1
64 | 26.85 28.75 1
65 | 27.6 27.15 1
66 | 28.25 27.4 1
67 | 29.05 27 1
68 | 29.05 26.2 1
69 | 29.45 25.55 1
70 | 30.05 25.55 1
71 | 30.3 23.3 1
72 | 30.6 23.95 1
73 | 30.9 22.75 1
74 | 31 22.3 1
75 | 30.65 21.3 1
76 | 31.3 20.8 1
77 | 31.85 21.2 1
78 | 31.45 19.3 1
79 | 32.7 19.3 1
80 | 31.9 17.9 1
81 | 33.05 18.05 1
82 | 32.8 16.6 1
83 | 32.2 16.3 1
84 | 32.4 15.15 1
85 | 31.8 14.75 1
86 | 32.35 13.25 1
87 | 31.65 13.35 1
88 | 31.15 12.05 1
89 | 32 11.9 1
90 | 31.05 10.3 1
91 | 31.95 10.4 1
92 | 30.05 9.55 1
93 | 30.5 8.25 1
94 | 29.6 8.25 1
95 | 29.8 7.6 1
96 | 29 7.05 1
97 | 29 6.7 1
98 | 27.6 5.95 1
99 | 28.15 5.45 1
100 | 26.5 4.8 1
101 | 26.4 4.4 1
102 | 25.8 3.65 1
103 | 25.5 4.1 1
104 | 31.6 16.6 1
105 | 30.7 17.4 1
106 | 29.65 17.95 1
107 | 29.15 16.5 1
108 | 30.5 15.55 1
109 | 29.95 13.55 3
110 | 30 11.85 1
111 | 28.65 14.1 3
112 | 27.45 10.85 3
113 | 26.4 10.75 3
114 | 25.05 10.1 3
115 | 26.2 11.55 3
116 | 27.2 13.3 3
117 | 28.3 14.45 3
118 | 27.95 14.65 3
119 | 27.95 14.7 3
120 | 23.15 11 3
121 | 22.9 11.6 3
122 | 23.9 11.6 3
123 | 24.55 11.6 3
124 | 23.85 12.45 3
125 | 23.35 13.1 3
126 | 24.85 13.2 3
127 | 25.1 12.25 3
128 | 25.15 12.5 3
129 | 25.65 12.9 3
130 | 25.7 13.5 3
131 | 26.3 13.3 3
132 | 27.1 14.55 3
133 | 27.15 14.6 3
134 | 26.4 14.35 3
135 | 26.4 14.35 3
136 | 25.75 14.55 3
137 | 25.75 14.9 3
138 | 25.35 14.65 3
139 | 23.7 14.55 3
140 | 24.05 14.9 3
141 | 23.65 15.3 3
142 | 22.75 14.5 3
143 | 22 14 3
144 | 20.9 12.95 3
145 | 20.3 13.1 3
146 | 22.2 16.45 3
147 | 22.15 16.65 3
148 | 22.4 15.15 3
149 | 22.15 15.2 3
150 | 23.95 15.95 3
151 | 24.25 16.1 3
152 | 24.8 16.1 3
153 | 25.15 16.15 3
154 | 25.5 16.7 3
155 | 25.75 16.85 3
156 | 26.2 16.85 3
157 | 26.25 16.9 3
158 | 26.25 16.35 3
159 | 26.75 16.2 3
160 | 27.4 16.15 3
161 | 27.6 16.85 3
162 | 26.95 17.2 3
163 | 26.3 18.1 3
164 | 27.55 17.95 3
165 | 27.7 17.6 3
166 | 28.25 18.25 3
167 | 28.8 19.15 3
168 | 28.5 19.15 3
169 | 28.1 19.35 3
170 | 28.05 20.3 3
171 | 27.3 20.5 3
172 | 27.1 21.6 3
173 | 26.75 19.5 3
174 | 26.5 20 3
175 | 25.9 19.8 3
176 | 25.1 19.8 3
177 | 24.75 20.7 3
178 | 24.35 20.55 3
179 | 23.55 20.35 3
180 | 24.3 19.7 3
181 | 24.9 19 3
182 | 24.7 16.8 3
183 | 24.35 16.8 3
184 | 24.4 17.15 3
185 | 24.9 17.3 3
186 | 24.35 17.7 3
187 | 24.95 17.8 3
188 | 24.95 18.05 3
189 | 24.4 18.35 3
190 | 23.65 18.6 3
191 | 22.85 18.9 3
192 | 22.4 20.65 3
193 | 22.5 17.8 3
194 | 22.45 18.25 3
195 | 21.6 17.7 3
196 | 21.35 18.05 3
197 | 21.3 18.25 3
198 | 19.95 19.8 3
199 | 20.45 20.45 3
200 | 20.35 16.95 3
201 | 19.7 17.45 3
202 | 19.35 17.45 3
203 | 12.45 9.15 2
204 | 10.1 10.05 2
205 | 11.75 12.2 2
206 | 9.55 12.4 2
207 | 8.65 13.35 2
208 | 7.75 13.55 2
209 | 8.55 15.15 2
210 | 8.05 15.9 2
211 | 8.45 15.9 2
212 | 8.6 16.85 2
213 | 9 17.05 2
214 | 9 16.3 2
215 | 9.35 16.3 2
216 | 9.55 15.3 2
217 | 9.65 14.85 2
218 | 10.15 15.05 2
219 | 10.05 15.6 2
220 | 10.4 16 2
221 | 10.65 16 2
222 | 10.9 15.95 2
223 | 10.7 15.35 2
224 | 11.35 15.05 2
225 | 11.15 14.75 2
226 | 11.05 14.6 2
227 | 11.15 14.2 2
228 | 11.1 13.6 2
229 | 12.5 13 2
230 | 13.3 12.45 2
231 | 13.5 12.4 2
232 | 13.95 11.75 2
233 | 14.4 12.2 2
234 | 15.4 12.2 2
235 | 15.25 12.45 2
236 | 14.6 12.75 2
237 | 14.1 13.05 2
238 | 14.2 13.25 2
239 | 14.75 13.45 2
240 | 13.15 13.4 2
241 | 13.05 13.7 2
242 | 12.65 13.65 2
243 | 15.45 13.75 2
244 | 14.65 14.2 2
245 | 13.75 14.05 2
246 | 13.75 14.5 2
247 | 12.95 14.8 2
248 | 13.2 14.9 2
249 | 13.25 15.5 2
250 | 12.1 15.35 2
251 | 12.15 15.5 2
252 | 11.25 16.4 2
253 | 12.7 15.6 2
254 | 12.5 16.15 2
255 | 12.7 16.6 2
256 | 12.15 16.2 2
257 | 11.95 16.5 2
258 | 11.45 16.8 2
259 | 11.05 17.2 2
260 | 11.3 17.6 2
261 | 11.65 17.6 2
262 | 11.25 18.25 2
263 | 11.05 18.45 2
264 | 11.05 18.55 2
265 | 10.55 18.55 2
266 | 10.8 19.2 2
267 | 7.45 19 1
268 | 10.05 20.1 2
269 | 9.95 20.5 2
270 | 10.65 20.45 2
271 | 10.3 22.75 2
272 | 11.7 19.6 2
273 | 12.2 19.65 2
274 | 13.2 20.1 2
275 | 13.55 20.05 2
276 | 14.15 20.05 2
277 | 14.25 21.5 2
278 | 13.25 21.4 2
279 | 12.85 18.1 2
280 | 13.75 18.3 2
281 | 14.2 18.35 2
282 | 14.25 18.8 2
283 | 13.75 16 2
284 | 13.75 16.7 2
285 | 13.75 17.05 2
286 | 14.05 16.8 2
287 | 14.5 16.95 2
288 | 14.75 16.65 2
289 | 15.25 16.05 2
290 | 15.4 16.2 2
291 | 15.85 16.2 2
292 | 15.5 15.55 2
293 | 15 14.95 2
294 | 16.6 16.15 2
295 | 17.9 15.6 2
296 | 17.5 18.05 2
297 | 16.65 17.5 2
298 | 15.45 17.3 2
299 | 15.45 17.8 2
300 | 15.7 18.4 2
301 |
--------------------------------------------------------------------------------
/Clustering Data/spiral.txt:
--------------------------------------------------------------------------------
1 | 31.95 7.95 3
2 | 31.15 7.3 3
3 | 30.45 6.65 3
4 | 29.7 6 3
5 | 28.9 5.55 3
6 | 28.05 5 3
7 | 27.2 4.55 3
8 | 26.35 4.15 3
9 | 25.4 3.85 3
10 | 24.6 3.6 3
11 | 23.6 3.3 3
12 | 22.75 3.15 3
13 | 21.85 3.05 3
14 | 20.9 3 3
15 | 20 2.9 3
16 | 19.1 3 3
17 | 18.2 3.2 3
18 | 17.3 3.25 3
19 | 16.55 3.5 3
20 | 15.7 3.7 3
21 | 14.85 4.1 3
22 | 14.15 4.4 3
23 | 13.4 4.75 3
24 | 12.7 5.2 3
25 | 12.05 5.65 3
26 | 11.45 6.15 3
27 | 10.9 6.65 3
28 | 10.3 7.25 3
29 | 9.7 7.85 3
30 | 9.35 8.35 3
31 | 8.9 9.05 3
32 | 8.55 9.65 3
33 | 8.15 10.35 3
34 | 7.95 10.95 3
35 | 7.75 11.7 3
36 | 7.55 12.35 3
37 | 7.45 13 3
38 | 7.35 13.75 3
39 | 7.3 14.35 3
40 | 7.35 14.95 3
41 | 7.35 15.75 3
42 | 7.55 16.35 3
43 | 7.7 16.95 3
44 | 7.8 17.55 3
45 | 8.05 18.15 3
46 | 8.3 18.75 3
47 | 8.65 19.3 3
48 | 8.9 19.85 3
49 | 9.3 20.3 3
50 | 9.65 20.8 3
51 | 10.2 21.25 3
52 | 10.6 21.65 3
53 | 11.1 22.15 3
54 | 11.55 22.45 3
55 | 11.95 22.7 3
56 | 12.55 23 3
57 | 13.05 23.2 3
58 | 13.45 23.4 3
59 | 14 23.55 3
60 | 14.55 23.6 3
61 | 15.1 23.75 3
62 | 15.7 23.75 3
63 | 16.15 23.85 3
64 | 16.7 23.8 3
65 | 17.15 23.75 3
66 | 17.75 23.75 3
67 | 18.2 23.6 3
68 | 18.65 23.5 3
69 | 19.1 23.35 3
70 | 19.6 23.15 3
71 | 20 22.95 3
72 | 20.4 22.7 3
73 | 20.7 22.55 3
74 | 21 22.15 3
75 | 21.45 21.95 3
76 | 21.75 21.55 3
77 | 22 21.25 3
78 | 22.25 21 3
79 | 22.5 20.7 3
80 | 22.65 20.35 3
81 | 22.75 20.05 3
82 | 22.9 19.65 3
83 | 23 19.35 3
84 | 23.1 19 3
85 | 23.15 18.65 3
86 | 23.2 18.25 3
87 | 23.2 18.05 3
88 | 23.2 17.8 3
89 | 23.1 17.45 3
90 | 23.05 17.15 3
91 | 22.9 16.9 3
92 | 22.85 16.6 3
93 | 22.7 16.4 3
94 | 22.6 16.2 3
95 | 22.55 16.05 3
96 | 22.4 15.95 3
97 | 22.35 15.8 3
98 | 22.2 15.65 3
99 | 22.15 15.55 3
100 | 22 15.4 3
101 | 21.9 15.3 3
102 | 21.85 15.25 3
103 | 21.75 15.15 3
104 | 21.65 15.05 3
105 | 21.55 15 3
106 | 21.5 14.9 3
107 | 19.35 31.65 1
108 | 20.35 31.45 1
109 | 21.35 31.1 1
110 | 22.25 30.9 1
111 | 23.2 30.45 1
112 | 23.95 30.05 1
113 | 24.9 29.65 1
114 | 25.6 29.05 1
115 | 26.35 28.5 1
116 | 27.15 27.9 1
117 | 27.75 27.35 1
118 | 28.3 26.6 1
119 | 28.95 25.85 1
120 | 29.5 25.15 1
121 | 29.95 24.45 1
122 | 30.4 23.7 1
123 | 30.6 22.9 1
124 | 30.9 22.1 1
125 | 31.25 21.3 1
126 | 31.35 20.55 1
127 | 31.5 19.7 1
128 | 31.55 18.9 1
129 | 31.65 18.15 1
130 | 31.6 17.35 1
131 | 31.45 16.55 1
132 | 31.3 15.8 1
133 | 31.15 15.05 1
134 | 30.9 14.35 1
135 | 30.6 13.65 1
136 | 30.3 13 1
137 | 29.9 12.3 1
138 | 29.5 11.75 1
139 | 29 11.15 1
140 | 28.5 10.6 1
141 | 28 10.1 1
142 | 27.55 9.65 1
143 | 26.9 9.1 1
144 | 26.25 8.8 1
145 | 25.7 8.4 1
146 | 25.15 8.05 1
147 | 24.5 7.75 1
148 | 23.9 7.65 1
149 | 23.15 7.4 1
150 | 22.5 7.3 1
151 | 21.9 7.1 1
152 | 21.25 7.05 1
153 | 20.5 7 1
154 | 19.9 6.95 1
155 | 19.25 7.05 1
156 | 18.75 7.1 1
157 | 18.05 7.25 1
158 | 17.5 7.35 1
159 | 16.9 7.6 1
160 | 16.35 7.8 1
161 | 15.8 8.05 1
162 | 15.4 8.35 1
163 | 14.9 8.7 1
164 | 14.45 8.9 1
165 | 13.95 9.3 1
166 | 13.6 9.65 1
167 | 13.25 10.1 1
168 | 12.95 10.55 1
169 | 12.65 10.9 1
170 | 12.35 11.4 1
171 | 12.2 11.75 1
172 | 11.95 12.2 1
173 | 11.8 12.65 1
174 | 11.75 13.05 1
175 | 11.55 13.6 1
176 | 11.55 14 1
177 | 11.55 14.35 1
178 | 11.55 14.7 1
179 | 11.6 15.25 1
180 | 11.65 15.7 1
181 | 11.8 16.05 1
182 | 11.85 16.5 1
183 | 12 16.75 1
184 | 12.15 17.2 1
185 | 12.3 17.6 1
186 | 12.55 17.85 1
187 | 12.8 18.05 1
188 | 13.1 18.4 1
189 | 13.3 18.6 1
190 | 13.55 18.85 1
191 | 13.8 19.05 1
192 | 14.15 19.25 1
193 | 14.45 19.5 1
194 | 14.85 19.55 1
195 | 15 19.7 1
196 | 15.25 19.7 1
197 | 15.55 19.85 1
198 | 15.95 19.9 1
199 | 16.2 19.9 1
200 | 16.55 19.9 1
201 | 16.85 19.9 1
202 | 17.2 19.9 1
203 | 17.4 19.8 1
204 | 17.65 19.75 1
205 | 17.8 19.7 1
206 | 18 19.6 1
207 | 18.2 19.55 1
208 | 3.9 9.6 2
209 | 3.55 10.65 2
210 | 3.35 11.4 2
211 | 3.1 12.35 2
212 | 3.1 13.25 2
213 | 3.05 14.15 2
214 | 3 15.1 2
215 | 3.1 16 2
216 | 3.2 16.85 2
217 | 3.45 17.75 2
218 | 3.7 18.7 2
219 | 3.95 19.55 2
220 | 4.35 20.25 2
221 | 4.7 21.1 2
222 | 5.15 21.8 2
223 | 5.6 22.5 2
224 | 6.2 23.3 2
225 | 6.8 23.85 2
226 | 7.35 24.45 2
227 | 8.05 24.95 2
228 | 8.8 25.45 2
229 | 9.5 26 2
230 | 10.2 26.35 2
231 | 10.9 26.75 2
232 | 11.7 27 2
233 | 12.45 27.25 2
234 | 13.3 27.6 2
235 | 14.05 27.6 2
236 | 14.7 27.75 2
237 | 15.55 27.75 2
238 | 16.4 27.75 2
239 | 17.1 27.75 2
240 | 17.9 27.75 2
241 | 18.55 27.7 2
242 | 19.35 27.6 2
243 | 20.1 27.35 2
244 | 20.7 27.1 2
245 | 21.45 26.8 2
246 | 22.05 26.5 2
247 | 22.7 26.15 2
248 | 23.35 25.65 2
249 | 23.8 25.3 2
250 | 24.3 24.85 2
251 | 24.75 24.35 2
252 | 25.25 23.95 2
253 | 25.65 23.45 2
254 | 26.05 23 2
255 | 26.2 22.3 2
256 | 26.6 21.8 2
257 | 26.75 21.25 2
258 | 27 20.7 2
259 | 27.15 20.15 2
260 | 27.15 19.6 2
261 | 27.35 19.1 2
262 | 27.35 18.45 2
263 | 27.4 18 2
264 | 27.3 17.4 2
265 | 27.15 16.9 2
266 | 27 16.4 2
267 | 27 15.9 2
268 | 26.75 15.35 2
269 | 26.55 14.85 2
270 | 26.3 14.45 2
271 | 25.95 14.1 2
272 | 25.75 13.7 2
273 | 25.35 13.3 2
274 | 25.05 12.95 2
275 | 24.8 12.7 2
276 | 24.4 12.45 2
277 | 24.05 12.2 2
278 | 23.55 11.85 2
279 | 23.2 11.65 2
280 | 22.75 11.4 2
281 | 22.3 11.3 2
282 | 21.9 11.1 2
283 | 21.45 11.05 2
284 | 21.1 11 2
285 | 20.7 10.95 2
286 | 20.35 10.95 2
287 | 19.95 11 2
288 | 19.55 11 2
289 | 19.15 11.05 2
290 | 18.85 11.1 2
291 | 18.45 11.25 2
292 | 18.15 11.35 2
293 | 17.85 11.5 2
294 | 17.5 11.7 2
295 | 17.2 11.95 2
296 | 17 12.05 2
297 | 16.75 12.2 2
298 | 16.65 12.35 2
299 | 16.5 12.5 2
300 | 16.35 12.7 2
301 | 16.2 12.8 2
302 | 16.15 12.95 2
303 | 16 13.1 2
304 | 15.95 13.25 2
305 | 15.9 13.4 2
306 | 15.8 13.5 2
307 | 15.8 13.65 2
308 | 15.75 13.85 2
309 | 15.65 14.05 2
310 | 15.65 14.25 2
311 | 15.65 14.5 2
312 | 15.65 14.6 2
313 |
--------------------------------------------------------------------------------
/Naive data.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 15,
6 | "metadata": {
7 | "collapsed": false
8 | },
9 | "outputs": [],
10 | "source": [
11 | "from numpy import genfromtxt\n",
12 | "import glob\n",
13 | "\n",
14 | "cluster_files = glob.glob(\"./Clustering Data/*\")\n",
15 | "\n",
16 | "clusters = []\n",
17 | "for i in cluster_files:\n",
18 | " clusters.append(genfromtxt(i))"
19 | ]
20 | },
21 | {
22 | "cell_type": "code",
23 | "execution_count": null,
24 | "metadata": {
25 | "collapsed": false
26 | },
27 | "outputs": [
28 | {
29 | "name": "stdout",
30 | "output_type": "stream",
31 | "text": [
32 | "Processed for 0samples\n",
33 | "Processed for 100samples\n",
34 | "Processed for 200samples"
35 | ]
36 | }
37 | ],
38 | "source": [
39 | "from dsc import DeepSubspaceClustering\n",
40 | "from sklearn.datasets import load_boston\n",
41 | "\n",
42 | "digits = load_boston().data\n",
43 | "dsc = DeepSubspaceClustering(digits, hidden_dims=[30], lambda1=0, lambda2=0, learning_rate=0.1)\n",
44 | "dsc.train(batch_size=400, epochs=500, print_step=10)\n",
45 | "\n",
46 | "result = dsc.result"
47 | ]
48 | },
49 | {
50 | "cell_type": "code",
51 | "execution_count": 25,
52 | "metadata": {
53 | "collapsed": false
54 | },
55 | "outputs": [
56 | {
57 | "data": {
58 | "text/plain": [
59 | "(1797, 64)"
60 | ]
61 | },
62 | "execution_count": 25,
63 | "metadata": {},
64 | "output_type": "execute_result"
65 | }
66 | ],
67 | "source": []
68 | },
69 | {
70 | "cell_type": "code",
71 | "execution_count": 16,
72 | "metadata": {
73 | "collapsed": false
74 | },
75 | "outputs": [
76 | {
77 | "data": {
78 | "text/plain": [
79 | "(788, 2)"
80 | ]
81 | },
82 | "execution_count": 16,
83 | "metadata": {},
84 | "output_type": "execute_result"
85 | }
86 | ],
87 | "source": [
88 | "clusters[0][:,[0,1]].shape"
89 | ]
90 | },
91 | {
92 | "cell_type": "code",
93 | "execution_count": 18,
94 | "metadata": {
95 | "collapsed": false
96 | },
97 | "outputs": [
98 | {
99 | "data": {
100 | "text/plain": [
101 | "array([[ 15.55, 28.65, 2. ],\n",
102 | " [ 14.9 , 27.55, 2. ],\n",
103 | " [ 14.45, 28.35, 2. ],\n",
104 | " ..., \n",
105 | " [ 8.5 , 3.25, 5. ],\n",
106 | " [ 8.1 , 3.55, 5. ],\n",
107 | " [ 8.15, 4. , 5. ]])"
108 | ]
109 | },
110 | "execution_count": 18,
111 | "metadata": {},
112 | "output_type": "execute_result"
113 | }
114 | ],
115 | "source": [
116 | "clusters[0]"
117 | ]
118 | },
119 | {
120 | "cell_type": "code",
121 | "execution_count": 11,
122 | "metadata": {
123 | "collapsed": false
124 | },
125 | "outputs": [
126 | {
127 | "data": {
128 | "text/html": [
129 | ""
130 | ],
131 | "text/plain": [
132 | ""
133 | ]
134 | },
135 | "execution_count": 11,
136 | "metadata": {},
137 | "output_type": "execute_result"
138 | }
139 | ],
140 | "source": [
141 | "import plotly.plotly as py\n",
142 | "import plotly.graph_objs as go\n",
143 | "\n",
144 | "# Create random data with numpy\n",
145 | "import numpy as np\n",
146 | "\n",
147 | "N = 1000\n",
148 | "\n",
149 | "cluster = clusters[5]\n",
150 | "# Create a trace\n",
151 | "trace = go.Scatter(\n",
152 | " x = cluster[:,0],\n",
153 | " y = cluster[:,1],\n",
154 | " mode = 'markers'\n",
155 | ")\n",
156 | "\n",
157 | "data = [trace]\n",
158 | "\n",
159 | "# Plot and embed in ipython notebook!\n",
160 | "py.iplot(data, filename='basic-scatter')"
161 | ]
162 | },
163 | {
164 | "cell_type": "code",
165 | "execution_count": null,
166 | "metadata": {
167 | "collapsed": true
168 | },
169 | "outputs": [],
170 | "source": [
171 | "Deep"
172 | ]
173 | },
174 | {
175 | "cell_type": "code",
176 | "execution_count": 12,
177 | "metadata": {
178 | "collapsed": true
179 | },
180 | "outputs": [],
181 | "source": [
182 | "import plotly.plotly as py\n",
183 | "import plotly.graph_objs as go\n",
184 | "import pandas as pd\n",
185 | "df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/iris.csv')\n",
186 | "df.head()\n",
187 | "\n",
188 | "data = []\n",
189 | "clusters = []\n",
190 | "colors = ['rgb(228,26,28)','rgb(55,126,184)','rgb(77,175,74)']\n",
191 | "\n",
192 | "for i in range(len(df['Name'].unique())):\n",
193 | " name = df['Name'].unique()[i]\n",
194 | " color = colors[i]\n",
195 | " x = df[ df['Name'] == name ]['SepalLength']\n",
196 | " y = df[ df['Name'] == name ]['SepalWidth']\n",
197 | " z = df[ df['Name'] == name ]['PetalLength']\n",
198 | " \n",
199 | " trace = dict(\n",
200 | " name = name,\n",
201 | " x = x, y = y, z = z,\n",
202 | " type = \"scatter3d\", \n",
203 | " mode = 'markers',\n",
204 | " marker = dict( size=3, color=color, line=dict(width=0) ) )\n",
205 | " data.append( trace )\n",
206 | " \n",
207 | " cluster = dict(\n",
208 | " color = color,\n",
209 | " opacity = 0.3,\n",
210 | " type = \"mesh3d\", \n",
211 | " x = x, y = y, z = z )\n",
212 | " data.append( cluster )\n",
213 | "\n",
214 | "layout = dict(\n",
215 | " width=800,\n",
216 | " height=550,\n",
217 | " autosize=False,\n",
218 | " title='Iris dataset',\n",
219 | " scene=dict(\n",
220 | " xaxis=dict(\n",
221 | " gridcolor='rgb(255, 255, 255)',\n",
222 | " zerolinecolor='rgb(255, 255, 255)',\n",
223 | " showbackground=True,\n",
224 | " backgroundcolor='rgb(230, 230,230)'\n",
225 | " ),\n",
226 | " yaxis=dict(\n",
227 | " gridcolor='rgb(255, 255, 255)',\n",
228 | " zerolinecolor='rgb(255, 255, 255)',\n",
229 | " showbackground=True,\n",
230 | " backgroundcolor='rgb(230, 230,230)'\n",
231 | " ),\n",
232 | " zaxis=dict(\n",
233 | " gridcolor='rgb(255, 255, 255)',\n",
234 | " zerolinecolor='rgb(255, 255, 255)',\n",
235 | " showbackground=True,\n",
236 | " backgroundcolor='rgb(230, 230,230)'\n",
237 | " ),\n",
238 | " aspectratio = dict( x=1, y=1, z=0.7 ),\n",
239 | " aspectmode = 'manual' \n",
240 | " ),\n",
241 | ")\n",
242 | "\n",
243 | "fig = dict(data=data, layout=layout)\n",
244 | "\n",
245 | "# IPython notebook\n",
246 | "# py.iplot(fig, filename='pandas-3d-scatter-iris', validate=False)\n",
247 | "\n",
248 | "url = py.plot(fig, filename='pandas-3d-scatter-iris', validate=False)"
249 | ]
250 | },
251 | {
252 | "cell_type": "code",
253 | "execution_count": null,
254 | "metadata": {
255 | "collapsed": true
256 | },
257 | "outputs": [],
258 | "source": []
259 | }
260 | ],
261 | "metadata": {
262 | "kernelspec": {
263 | "display_name": "Python 2",
264 | "language": "python",
265 | "name": "python2"
266 | },
267 | "language_info": {
268 | "codemirror_mode": {
269 | "name": "ipython",
270 | "version": 2
271 | },
272 | "file_extension": ".py",
273 | "mimetype": "text/x-python",
274 | "name": "python",
275 | "nbconvert_exporter": "python",
276 | "pygments_lexer": "ipython2",
277 | "version": "2.7.11"
278 | }
279 | },
280 | "nbformat": 4,
281 | "nbformat_minor": 0
282 | }
283 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Deep-Subspace-Clustering
2 |
3 | From the paper published on (IJCAI-16)
4 | Deep Subspace Clustering with Sparsity Prior
5 | http://www.ijcai.org/Proceedings/16/Papers/275.pdf
6 |
--------------------------------------------------------------------------------
/SSC_1.0/BuildAdjacency.m:
--------------------------------------------------------------------------------
1 | %--------------------------------------------------------------------------
2 | % This function takes a NxN coefficient matrix and returns a NxN adjacency
3 | % matrix by choosing only the K strongest connections in the similarity
4 | % graph
5 | % CMat: NxN coefficient matrix
6 | % K: number of strongest edges to keep; if K=0 use all the coefficients
7 | % CKSym: NxN symmetric adjacency matrix
8 | %--------------------------------------------------------------------------
9 | % Copyright @ Ehsan Elhamifar, 2010
10 | %--------------------------------------------------------------------------
11 |
12 |
13 | function CKSym = BuildAdjacency(CMat,K)
14 |
15 | N = size(CMat,1);
16 | CAbs = abs(CMat);
17 | for i = 1:N
18 | c = CAbs(:,i);
19 | [PSrt,PInd] = sort(c,'descend');
20 | CAbs(:,i) = CAbs(:,i) ./ abs( c(PInd(1)) );
21 | end
22 |
23 | CSym = CAbs + CAbs';
24 |
25 | if (K ~= 0)
26 | [Srt,Ind] = sort( CSym,1,'descend' );
27 | CK = zeros(N,N);
28 | for i = 1:N
29 | for j = 1:K
30 | CK( Ind(j,i),i ) = CSym( Ind(j,i),i ) ./ CSym( Ind(1,i),i );
31 | end
32 | end
33 | CKSym = CK + CK';
34 | else
35 | CKSym = CSym;
36 | end
--------------------------------------------------------------------------------
/SSC_1.0/DataProjection.m:
--------------------------------------------------------------------------------
1 | %--------------------------------------------------------------------------
2 | % This function takes the D x N data matrix with columns indicating
3 | % different data points and project the D dimensional data into the r
4 | % dimensional space. Different types of projections are possible:
5 | % (1) Projection using PCA
6 | % (2) Projection using random projections with iid elements from N(0,1/r)
7 | % (3) Projection using random projections with iid elements from symmetric
8 | % bernoulli distribution: +1/sqrt(r),-1/sqrt(r) elements with same probability
9 | % X: D x N data matrix of N data points
10 | % r: dimension of the space to project the data to
11 | % type: type of projection, {'PCA','NormalProj','BernoulliProj'}
12 | % Xp: r x N data matrix of N projectred data points
13 | %--------------------------------------------------------------------------
14 | % Copyright @ Ehsan Elhamifar, 2010
15 | %--------------------------------------------------------------------------
16 |
17 |
18 | function Xp = DataProjection(X,r,type)
19 |
20 | if r == 0
21 | Xp = X;
22 | else
23 | if (nargin < 3)
24 | type = 'NormalProj';
25 | end
26 | D = size(X,1);
27 |
28 | if ( strcmp(type , 'PCA') )
29 |
30 | [U,S,V] = svd(X',0);
31 | Xp = U(:,1:r)';
32 |
33 | elseif ( strcmp(type , 'NormalProj') )
34 |
35 | np = normrnd(0,1/sqrt(r),r*D,1);
36 | PrN = reshape(np,r,D);
37 | Xp = PrN * X;
38 |
39 | elseif( strcmp(type , 'BernoulliProj') )
40 |
41 | bp = rand(r*D,1);
42 | Bp = 1/sqrt(r) .* (bp >= .5) - 1/sqrt(r) .* (bp < .5);
43 | PrB = reshape(Bp,r,D);
44 | Xp = PrB * X;
45 |
46 | end
47 | end
--------------------------------------------------------------------------------
/SSC_1.0/Misclassification.m:
--------------------------------------------------------------------------------
1 | %--------------------------------------------------------------------------
2 | % This function takes the groups resulted from spectral clutsering and the
3 | % ground truth to compute the misclassification rate.
4 | % groups: [grp1,grp2,grp3] for three different forms of Spectral Clustering
5 | % s: ground truth vector
6 | % Missrate: 3x1 vector with misclassification rates of three forms of
7 | % spectral clustering
8 | %--------------------------------------------------------------------------
9 | % Copyright @ Ehsan Elhamifar, 2010
10 | %--------------------------------------------------------------------------
11 |
12 |
13 | function Missrate = Misclassification(groups,s)
14 |
15 | n = max(s);
16 | for i = 1:3
17 | Missrate(i,1) = missclassGroups( groups(:,i),s,n ) ./ length(s);
18 | end
--------------------------------------------------------------------------------
/SSC_1.0/OutlierDetection.m:
--------------------------------------------------------------------------------
1 | %--------------------------------------------------------------------------
2 | % This function takes the coefficient matrix resulted from sparse
3 | % representation using \ell_1 minimization. If a point cannot be written as
4 | % a linear combination of other points, it should be an outlier. The
5 | % function detects the indices of outliers and modifies the coefficient
6 | % matrix and the ground-truth accordingly.
7 | % CMat: NxN coefficient matrix
8 | % s: Nx1 ground-truth vector
9 | % CMatC: coefficient matrix after eliminating Nans
10 | % sc: ground-truth after eliminating outliers
11 | % OutlierIndx: indices of outliers in {1,2,...,N}
12 | % Fail: 1 if number of inliers is less than number of groups, 0 otherwise
13 | %--------------------------------------------------------------------------
14 | % Copyright @ Ehsan Elhamifar, 2010
15 | %--------------------------------------------------------------------------
16 |
17 |
18 | function [CMatC,sc,OutlierIndx,Fail] = OutlierDetection(CMat,s)
19 |
20 | n = max(s);
21 | N = size(CMat,2);
22 | NanIndx = [];
23 | FailCnt = 0;
24 | Fail = 0;
25 |
26 | for i = 1:N
27 | c = CMat(:,i);
28 | if( sum(isnan(c)) >= 1 )
29 | NanIndx = [NanIndx ; i];
30 | FailCnt = FailCnt + 1;
31 | end
32 | end
33 |
34 | sc = s;
35 | sc(NanIndx) = [];
36 | CMatC = CMat;
37 | CMatC(NanIndx,:) = [];
38 | CMatC(:,NanIndx) = [];
39 | OutlierIndx = NanIndx;
40 |
41 | if ( FailCnt > N - n )
42 | CMatC = [];
43 | sc = [];
44 | Fail = 1;
45 | end
--------------------------------------------------------------------------------
/SSC_1.0/Readme.tex:
--------------------------------------------------------------------------------
1 | Please run SSC.m to see the results.
2 | You can refer to the comments in the beginning of each m-file for more information.
--------------------------------------------------------------------------------
/SSC_1.0/SSC.m:
--------------------------------------------------------------------------------
1 | %--------------------------------------------------------------------------
2 | % This is the main function for running SSC.
3 | % Load the DxN matrix X representing N data points in the D dim. space
4 | % living in a union of n low-dim. subspaces.
5 | % The projection step onto the r-dimensional space is arbitrary and can
6 | % be skipped. In the case of using projection there are different types of
7 | % projections possible: 'NormalProj', 'BernoulliProj', 'PCA'. Please refer
8 | % to DataProjection.m for more information.
9 | %--------------------------------------------------------------------------
10 | % X: DxN matrix of N points in D-dim. space living in n low-dim. subspaces
11 | % s: groundtruth for the segmentation
12 | % n: number of subspaces
13 | % r: dimension of the projection e.g. r = d*n (d: max subspace dim.)
14 | % Cst: 1 if using the constraint sum(c)=1 in Lasso, else 0
15 | % OptM: optimization method {'L1Perfect','L1Noise','Lasso','L1ED'}, see
16 | % SparseCoefRecovery.m for more information
17 | % lambda: regularization parameter for 'Lasso' typically in [0.001,0.01]
18 | % or the noise level for 'L1Noise'. See SparseCoefRecovery.m for more
19 | % information.
20 | % K: number of largest coefficients to pick in order to build the
21 | % similarity graph, typically K = max{subspace dimensions}
22 | % Missrate: vector of misclassification rates
23 | %--------------------------------------------------------------------------
24 | % In order to run the code CVX package must be installed in Matlab. It can
25 | % be downlaoded from http://cvxr.com/cvx/download
26 | %--------------------------------------------------------------------------
27 | % Copyright @ Ehsan Elhamifar, 2010
28 | %--------------------------------------------------------------------------
29 |
30 | clc, clear all, close all
31 | D = 30; %Dimension of ambient space
32 | n = 2; %Number of subspaces
33 | d1 = 1; d2 = 1; %d1 and d2: dimension of subspace 1 and 2
34 | N1 = 20; N2 = 20; %N1 and N2: number of points in subspace 1 and 2
35 | X1 = randn(D,d1) * randn(d1,N1); %Generating N1 points in a d1 dim. subspace
36 | X2 = randn(D,d2) * randn(d2,N2); %Generating N2 points in a d2 dim. subspace
37 | X = [X1 X2];
38 | s = [1*ones(1,N1) 2*ones(1,N2)]; %Generating the ground-truth for evaluating clustering results
39 | r = 0; %Enter the projection dimension e.g. r = d*n, enter r = 0 to not project
40 | Cst = 0; %Enter 1 to use the additional affine constraint sum(c) == 1
41 | OptM = 'Lasso'; %OptM can be {'L1Perfect','L1Noise','Lasso','L1ED'}
42 | lambda = 0.001; %Regularization parameter in 'Lasso' or the noise level for 'L1Noise'
43 | K = max(d1,d2); %Number of top coefficients to build the similarity graph, enter K=0 for using the whole coefficients
44 | if Cst == 1
45 | K = max(d1,d2) + 1; %For affine subspaces, the number of coefficients to pick is dimension + 1
46 | end
47 |
48 | Xp = DataProjection(X,r,'NormalProj');
49 | CMat = SparseCoefRecovery(Xp,Cst,OptM,lambda);
50 | [CMatC,sc,OutlierIndx,Fail] = OutlierDetection(CMat,s);
51 | if (Fail == 0)
52 | CKSym = BuildAdjacency(CMatC,K);
53 | [Grps , SingVals, LapKernel] = SpectralClustering(CKSym,n);
54 | Missrate = Misclassification(Grps,sc);
55 | save Lasso_001.mat CMat CKSym Missrate SingVals LapKernel Fail
56 | else
57 | save Lasso_001.mat CMat Fail
58 | end
--------------------------------------------------------------------------------
/SSC_1.0/SparseCoefRecovery.m:
--------------------------------------------------------------------------------
1 | %--------------------------------------------------------------------------
2 | % This function takes the D x N matrix of N data points and write every
3 | % point as a sparse linear combination of other points.
4 | % Xp: D x N matrix of N data points
5 | % cst: 1 if using the affine constraint sum(c)=1, else 0
6 | % Opt: type of optimization, {'L1Perfect','L1Noisy','Lasso','L1ED'}
7 | % lambda: regularizartion parameter of LASSO, typically between 0.001 and
8 | % 0.1 or the noise level for 'L1Noise'
9 | % CMat: N x N matrix of coefficients, column i correspond to the sparse
10 | % coefficients of data point in column i of Xp
11 | %--------------------------------------------------------------------------
12 | % Copyright @ Ehsan Elhamifar, 2010
13 | %--------------------------------------------------------------------------
14 |
15 |
16 | function CMat = SparseCoefRecovery(Xp,cst,Opt,lambda)
17 |
18 | if (nargin < 2)
19 | cst = 0;
20 | end
21 | if (nargin < 3)
22 | Opt = 'Lasso';
23 | end
24 | if (nargin < 4)
25 | lambda = 0.001;
26 | end
27 |
28 | D = size(Xp,1);
29 | N = size(Xp,2);
30 |
31 | for i = 1:N
32 |
33 | y = Xp(:,i);
34 | if i == 1
35 | Y = Xp(:,i+1:end);
36 | elseif ( (i > 1) && (i < N) )
37 | Y = [Xp(:,1:i-1) Xp(:,i+1:N)];
38 | else
39 | Y = Xp(:,1:N-1);
40 | end
41 |
42 | % L1 optimization using CVX
43 | if cst == 1
44 | if ( strcmp(Opt , 'Lasso') )
45 | cvx_begin;
46 | cvx_precision high
47 | variable c(N-1,1);
48 | minimize( norm(c,1) + lambda * norm(Y * c - y) );
49 | subject to
50 | sum(c) == 1;
51 | cvx_end;
52 | elseif ( strcmp(Opt , 'L1Perfect') )
53 | cvx_begin;
54 | cvx_precision high
55 | variable c(N-1,1);
56 | minimize( norm(c,1) );
57 | subject to
58 | Y * c == y;
59 | sum(c) == 1;
60 | cvx_end;
61 | elseif ( strcmp(Opt , 'L1Noisy') )
62 | cvx_begin;
63 | cvx_precision high
64 | variable c(N-1,1);
65 | minimize( norm(c,1) );
66 | subject to
67 | norm( Y * c - y ) <= lambda;
68 | sum(c) == 1;
69 | cvx_end;
70 | elseif ( strcmp(Opt , 'L1ED') )
71 | cvx_begin;
72 | cvx_precision high
73 | variable c(N-1+D,1);
74 | minimize( norm(c,1) );
75 | subject to
76 | [Y eye(D)] * c == y;
77 | sum(c(1:N-1)) == 1;
78 | cvx_end;
79 | end
80 | else
81 | if ( strcmp(Opt , 'Lasso') )
82 | cvx_begin;
83 | cvx_precision high
84 | variable c(N-1,1);
85 | minimize( norm(c,1) + lambda * norm(Y * c - y) );
86 | cvx_end;
87 | elseif ( strcmp(Opt , 'L1Perfect') )
88 | cvx_begin;
89 | cvx_precision high
90 | variable c(N-1,1);
91 | minimize( norm(c,1) );
92 | subject to
93 | Y * c == y;
94 | cvx_end;
95 | elseif ( strcmp(Opt , 'L1Noisy') )
96 | cvx_begin;
97 | cvx_precision high
98 | variable c(N-1,1);
99 | minimize( norm(c,1) );
100 | subject to
101 | norm( Y * c - y ) <= lambda;
102 | cvx_end;
103 | elseif ( strcmp(Opt , 'L1ED') )
104 | cvx_begin;
105 | cvx_precision high
106 | variable c(N-1+D,1);
107 | minimize( norm(c,1) );
108 | subject to
109 | [Y eye(D)] * c == y;
110 | cvx_end;
111 | end
112 | end
113 |
114 | % place 0's in the diagonals of the coefficient matrix
115 | if i == 1
116 | CMat(1,1) = 0;
117 | CMat(2:N,1) = c(1:N-1);
118 | elseif ( (i > 1) && (i < N) )
119 | CMat(1:i-1,i) = c(1:i-1);
120 | CMat(i,i) = 0;
121 | CMat(i+1:N,i) = c(i:N-1);
122 | else
123 | CMat(1:N-1,N) = c(1:N-1);
124 | CMat(N,N) = 0;
125 | end
126 |
127 | end
--------------------------------------------------------------------------------
/SSC_1.0/SpectralClustering.m:
--------------------------------------------------------------------------------
1 | %--------------------------------------------------------------------------
2 | % This function takes a NxN matrix CMat as adjacency of a graph and
3 | % computes the segmentation of data from spectral clustering.
4 | % CMat: NxN adjacency matrix
5 | % n: number of groups for segmentation
6 | % K: number of largest coefficients to choose from each column of CMat
7 | % Grps: [grp1,grp2,grp3] for three different forms of Spectral Clustering
8 | % SingVals: [SV1,SV2,SV3] singular values for three different forms of SC
9 | % LapKernel(:,:,i): n last columns of kernel of laplacian to apply KMeans
10 | %--------------------------------------------------------------------------
11 | % Copyright @ Ehsan Elhamifar, 2010
12 | %--------------------------------------------------------------------------
13 |
14 |
15 | function [Grps , SingVals, LapKernel] = SpectralClustering(CKSym,n)
16 |
17 | N = size(CKSym,1);
18 | MAXiter = 1000; % Maximum iteration for KMeans Algorithm
19 | REPlic = 100; % Replication for KMeans Algorithm
20 |
21 | % Method 1: Unnormalized Method
22 | DKU = diag( sum(CKSym) );
23 | LapKU = DKU - CKSym;
24 | [uKU,sKU,vKU] = svd(LapKU);
25 | f = size(vKU,2);
26 | kerKU = vKU(:,f-n+1:f);
27 | svalKU = diag(sKU);
28 | group1 = kmeans(kerKU,n,'start','sample','maxiter',MAXiter,'replicates',REPlic,'EmptyAction','singleton');
29 |
30 | % Method 2: Random Walk Method
31 | DKN=( diag( sum(CKSym) ) )^(-1);
32 | LapKN = speye(N) - DKN * CKSym;
33 | [uKN,sKN,vKN] = svd(LapKN);
34 | f = size(vKN,2);
35 | kerKN = vKN(:,f-n+1:f);
36 | svalKN = diag(sKN);
37 | group2 = kmeans(kerKN,n,'start','sample','maxiter',MAXiter,'replicates',REPlic,'EmptyAction','singleton');
38 |
39 | % Method 3: Normalized Symmetric
40 | DKS = ( diag( sum(CKSym) ) )^(-1/2);
41 | LapKS = speye(N) - DKS * CKSym * DKS;
42 | [uKS,sKS,vKS] = svd(LapKS);
43 | f = size(vKS,2);
44 | kerKS = vKS(:,f-n+1:f);
45 | for i = 1:N
46 | kerKS(i,:) = kerKS(i,:) ./ norm(kerKS(i,:));
47 | end
48 | svalKS = diag(sKS);
49 | group3 = kmeans(kerKS,n,'start','sample','maxiter',MAXiter,'replicates',REPlic,'EmptyAction','singleton');
50 |
51 | %
52 | Grps = [group1,group2,group3];
53 | SingVals = [svalKU,svalKN,svalKS];
54 | LapKernel(:,:,1) = kerKU;
55 | LapKernel(:,:,2) = kerKN;
56 | LapKernel(:,:,3) = kerKS;
--------------------------------------------------------------------------------
/SSC_1.0/missclassGroups.m:
--------------------------------------------------------------------------------
1 | %--------------------------------------------------------------------------
2 | % [miss,index] = missclass(Segmentation,RefSegmentation,ngroups)
3 | % Computes the number of missclassified points in the vector Segmentation.
4 | % Segmentation: 1 by sum(npoints) or sum(ngroups) by 1 vector containing
5 | % the label for each group, ranging from 1 to n
6 | % npoints: 1 by ngroups or ngroups by 1 vector containing the number of
7 | % points in each group.
8 | % ngroups: number of groups
9 | %--------------------------------------------------------------------------
10 | % Copyright @ Ehsan Elhamifar, 2010
11 | %--------------------------------------------------------------------------
12 |
13 |
14 | function [miss,index] = missclassGroups(Segmentation,RefSegmentation,ngroups)
15 |
16 | Permutations = perms(1:ngroups);
17 | if(size(Segmentation,2)==1)
18 | Segmentation=Segmentation';
19 | end
20 | miss = zeros(size(Permutations,1),size(Segmentation,1));
21 | for k=1:size(Segmentation,1)
22 | for j=1:size(Permutations,1)
23 | miss(j,k) = sum(Segmentation(k,:)~=Permutations(j,RefSegmentation));
24 | end
25 | end
26 |
27 | [miss,temp] = min(miss,[],1);
28 | index = Permutations(temp,:);
29 |
--------------------------------------------------------------------------------
/dsc.py:
--------------------------------------------------------------------------------
1 | from sp import getSparcityPrior
2 | from sp_blitzl1 import sparseCoefRecovery
3 | import tensorflow as tf
4 | import numpy as np
5 | from supporting_files.nncomponents import *
6 | from supporting_files.helpers import *
7 |
8 | class DeepSubspaceClustering:
9 |
10 | def __init__(self, inputX, C=None, hidden_dims=[300,150,300], lambda1=0.01, lambda2=0.01, activation='tanh', \
11 | weight_init='uniform', noise=None, learning_rate=0.1, optimizer='Adam'):
12 |
13 | self.noise = noise
14 | n_sample, n_feat = inputX.shape
15 |
16 | # M must be a even number
17 | assert len(hidden_dims) % 2 == 1
18 |
19 | # Add the end layer
20 | hidden_dims.append(n_feat)
21 |
22 | # self.depth = len(dims)
23 |
24 | # This is not the symbolic variable of tensorflow, this is real!
25 | self.inputX = inputX
26 |
27 | if C is None:
28 | # Transpose the matrix first, and get the whole matrix of C
29 | self.inputC = sparseCoefRecovery(inputX.T)
30 | else:
31 | self.inputC = C
32 |
33 | self.C = tf.placeholder(dtype=tf.float32, shape=[None, None], name='C')
34 |
35 | self.hidden_layers = []
36 | self.X = self._add_noise(tf.placeholder(dtype=tf.float32, shape=[None, n_feat], name='X'))
37 |
38 | input_hidden = self.X
39 | weights, biases = init_layer_weight(hidden_dims, inputX, weight_init)
40 |
41 | # J3 regularization term
42 | J3_list = []
43 | for init_w, init_b in zip(weights, biases):
44 | self.hidden_layers.append(DenseLayer(input_hidden, init_w, init_b, activation=activation))
45 | input_hidden = self.hidden_layers[-1].output
46 | J3_list.append(tf.reduce_mean(tf.square(self.hidden_layers[-1].w)))
47 | J3_list.append(tf.reduce_mean(tf.square(self.hidden_layers[-1].b)))
48 |
49 | J3 = lambda2 * tf.add_n(J3_list)
50 |
51 | self.H_M = self.hidden_layers[-1].output
52 | # H(M/2) the output of the mid layer
53 | self.H_M_2 = self.hidden_layers[(len(hidden_dims)-1)/2].output
54 |
55 | # calculate loss J1
56 | # J1 = tf.nn.l2_loss(tf.sub(self.X, self.H_M))
57 |
58 | J1 = tf.sqrt(tf.reduce_mean(tf.square(tf.sub(self.X, self.H_M))))
59 |
60 | # calculate loss J2
61 | J2 = lambda1 * tf.sqrt(tf.reduce_mean(tf.square(tf.sub(tf.transpose(self.H_M_2), \
62 | tf.matmul(tf.transpose(self.H_M_2), self.C)))))
63 |
64 | self.cost = J1 + J2 + J3
65 |
66 | self.optimizer = optimize(self.cost, learning_rate, optimizer)
67 |
68 |
69 | def train(self, batch_size=100, epochs=100, print_step=100):
70 | sess = tf.Session()
71 | sess.run(tf.initialize_all_variables())
72 | batch_generator = GenBatch(self.inputX, C=self.inputC, batch_size=batch_size)
73 | n_batch = batch_generator.n_batch
74 |
75 | self.losses = []
76 | for i in xrange(epochs):
77 | # x_batch, y_batch = get_batch(self.X_train, self.y_train, batch_size)
78 | batch_generator.resetIndex()
79 | for j in xrange(n_batch+1):
80 | x_batch, c_batch = batch_generator.get_batch()
81 | sess.run(self.optimizer, feed_dict={self.X: x_batch, self.C: c_batch})
82 |
83 | self.losses.append(sess.run(self.cost, feed_dict={self.X: x_batch, self.C: c_batch}))
84 |
85 | if i % print_step == 0:
86 | print('epoch {0}: global loss = {1}'.format(i, self.losses[-1]))
87 |
88 | # for i in xrange(1, epochs+1):
89 | # x_batch, c_batch = get_batch_XC(self.inputX, self.inputC, batch_size)
90 | # self.losses.append(sess.run(self.cost, feed_dict={self.X: x_batch, self.C: c_batch}))
91 | # if i % print_step == 0:
92 | # print('epoch {0}: global loss = {1}'.format(i, self.losses[-1]))
93 |
94 | self.result = sess.run(self.H_M_2, feed_dict={self.X: x_batch, self.C: c_batch})
95 |
96 |
97 | def _add_noise(self, x):
98 | if self.noise is None:
99 | return x
100 | if self.noise == 'gaussian':
101 | n = np.random.normal(0, 0.1, (len(x), len(x[0])))
102 | return x + n
103 | if self.noise == 'mask':
104 | frac = float(self.noise.split('-')[1])
105 | temp = np.copy(x)
106 | for i in temp:
107 | n = np.random.choice(len(i), round(frac * len(i)), replace=False)
108 | i[n] = 0
109 | return temp
--------------------------------------------------------------------------------
/dsc.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/dsc.pyc
--------------------------------------------------------------------------------
/l1benchmark/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/l1benchmark/.DS_Store
--------------------------------------------------------------------------------
/l1benchmark/L1Solvers/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/l1benchmark/L1Solvers/.DS_Store
--------------------------------------------------------------------------------
/l1benchmark/L1Solvers/SolveAMP.m:
--------------------------------------------------------------------------------
1 | function [x_t, nIter, timeSteps, errorSteps, nShrinkage] = SolveAMP(A, b, varargin)
2 |
3 | % Solve
4 | % min_x ||x||_1 s.t. Ax = b
5 | t0 = tic;
6 |
7 | DEBUG = 0 ;
8 | DISPLAY = 0 ;
9 |
10 | STOPPING_TIME = -2;
11 | STOPPING_GROUND_TRUTH = -1;
12 | STOPPING_DUALITY_GAP = 1;
13 | STOPPING_SPARSE_SUPPORT = 2;
14 | STOPPING_OBJECTIVE_VALUE = 3;
15 | STOPPING_SUBGRADIENT = 4;
16 | STOPPING_INCREMENTS = 5 ;
17 | STOPPING_FEASIBILITY = 6 ;
18 | STOPPING_DEFAULT = STOPPING_FEASIBILITY;
19 |
20 | stoppingCriterion = STOPPING_DEFAULT;
21 |
22 | % Initialize parameters
23 | [m,n] = size(A) ;
24 | if m >= n
25 | error('The observation matrix must have more columns than rows.') ;
26 | end
27 |
28 | tol = 1e-6 ;
29 |
30 | eps = 1e-6 ;
31 |
32 | bNorm = norm(b) ;
33 |
34 | maxIter = 10000 ;
35 |
36 | % Parse the optional inputs.
37 | if (mod(length(varargin), 2) ~= 0 ),
38 | error(['Extra Parameters passed to the function ''' mfilename ''' must be passed in pairs.']);
39 | end
40 | parameterCount = length(varargin)/2;
41 |
42 | for parameterIndex = 1:parameterCount,
43 | parameterName = varargin{parameterIndex*2 - 1};
44 | parameterValue = varargin{parameterIndex*2};
45 | switch lower(parameterName)
46 | case 'stoppingcriterion'
47 | stoppingCriterion = parameterValue;
48 | case 'initialization'
49 | disp(['Ignoring ' parameterName ' for this method.']) ;
50 | % x0 = parameterValue;
51 | % if ~all(size(x0)==[n,1])
52 | % error('The dimension of the initial x0 does not match.');
53 | % end
54 | case 'groundtruth'
55 | xG = parameterValue;
56 | case 'mu'
57 | disp(['Ignoring ' parameterName ' for this method.']) ;
58 | case 'gamma'
59 | disp(['Ignoring ' parameterName ' for this method.']) ;
60 | case 'maxiteration'
61 | maxIter = parameterValue;
62 | case 'isnonnegative'
63 | disp(['Ignoring ' parameterName ' for this method.']) ;
64 | case 'tolerance'
65 | tol = parameterValue;
66 | case 'verbose'
67 | verbose = parameterValue;
68 | case 'maxtime'
69 | maxTime = parameterValue;
70 | otherwise
71 | error(['The parameter ''' parameterName ''' is not recognized by the function ''' mfilename '''.']);
72 | end
73 | end
74 | clear varargin
75 | timeSteps = nan(1,maxIter) ;
76 | errorSteps = nan(1,maxIter) ;
77 |
78 | converged = 0 ;
79 |
80 | nIter = 0 ;
81 | nShrinkage = 0 ;
82 |
83 | delta = m/n ;
84 |
85 | tau_t = 0.1*norm(A'*b,inf) ;
86 |
87 | deltaInv = n/m ;
88 |
89 | x_t = zeros(n,1) ; z_t = b ;
90 |
91 | f = norm(x_t,1) ;
92 |
93 | nz_x = (abs(x_t)>eps*10);
94 |
95 | while ~converged
96 |
97 | temp1 = A'*z_t ;
98 |
99 | if nIter == 0
100 |
101 | tau_t = 0.1*norm(temp1,inf) ;
102 |
103 | end
104 |
105 | temp2 = temp1 + x_t ;
106 |
107 | x_tp1 = shrink(temp2, tau_t) ;
108 |
109 | residual = b - A*x_tp1 ;
110 |
111 | z_tp1 = residual + deltaInv*mean(shrink_der(temp2, tau_t))*z_t ;
112 |
113 | tau_tp1 = deltaInv*tau_t*mean(shrink_der(temp1 + x_tp1, tau_t)) ;
114 |
115 | nShrinkage = nShrinkage + 3 ;
116 |
117 | nIter = nIter + 1 ;
118 |
119 | timeSteps(nIter) = toc(t0) ;
120 | errorSteps(nIter) = norm(x_tp1-xG) ;
121 |
122 |
123 | switch stoppingCriterion
124 | case STOPPING_GROUND_TRUTH
125 | if norm(xG-x_tp1) < tol
126 | converged = 1 ;
127 | end
128 | case STOPPING_SUBGRADIENT
129 | error('Vanishing subgradient is not a valid stopping criterion for AMP.');
130 | case STOPPING_SPARSE_SUPPORT
131 | % compute the stopping criterion based on the change
132 | % of the number of non-zero components of the estimate
133 | nz_x_prev = nz_x;
134 | nz_x = (abs(x_tp1)>eps*10);
135 | num_nz_x = sum(nz_x(:));
136 | num_changes_active = (sum(nz_x(:)~=nz_x_prev(:)));
137 | if num_nz_x >= 1
138 | criterionActiveSet = num_changes_active / num_nz_x;
139 | converged = ~(criterionActiveSet > tol);
140 | end
141 | case STOPPING_OBJECTIVE_VALUE
142 | % compute the stopping criterion based on the relative
143 | % variation of the objective function.
144 | prev_f = f;
145 | f = norm(x_tp1,1) ;
146 | criterionObjective = abs(f-prev_f)/(prev_f);
147 | converged = ~(criterionObjective > tol);
148 | case STOPPING_DUALITY_GAP
149 | error('Duality gap is not a valid stopping criterion for AMP.');
150 | case STOPPING_INCREMENTS
151 | if norm(x_tp1 - x_t) < tol*norm(x_t)
152 | converged = 1 ;
153 | end
154 | case STOPPING_FEASIBILITY
155 | if norm(residual) < tol*bNorm
156 | converged = 1 ;
157 | end
158 | case STOPPING_TIME
159 | converged = timeSteps(nIter) >= maxTime ;
160 | otherwise
161 | error('Undefined stopping criterion.');
162 | end
163 |
164 | if nIter >= maxIter && ~converged
165 |
166 | disp('Max. iterations reached.') ;
167 | converged = 1 ;
168 |
169 | end
170 |
171 | % if ~mod(nIter,2) && DISPLAY
172 | if DISPLAY > 1
173 | figure(100) ; clf ;
174 | stem(xtp1) ;
175 | title(['Iteration ' num2str(nIter)]) ;
176 | pause(0.1) ;
177 |
178 | end
179 |
180 | if DISPLAY > 0
181 |
182 | disp(['Iteration ' num2str(nIter) ' ||x||_0 ' num2str(sum(double(abs(x_tp1)>0)))]) ;
183 |
184 | end
185 |
186 | x_t = x_tp1 ;
187 | z_t = z_tp1 ;
188 | tau_t = tau_tp1 ;
189 |
190 | end
191 | timeSteps = timeSteps(1:nIter) ;
192 | errorSteps = errorSteps(1:nIter) ;
193 |
194 | function Y = shrink(X, alpha)
195 |
196 | Y = sign(X).*max(abs(X)-alpha,0) ;
197 |
198 | function Y = shrink_der(X, alpha)
199 |
200 | Y = double(abs(X) > alpha) ;
--------------------------------------------------------------------------------
/l1benchmark/L1Solvers/SolveDALM.m:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/l1benchmark/L1Solvers/SolveDALM.m
--------------------------------------------------------------------------------
/l1benchmark/L1Solvers/SolveDALM_CBM.m:
--------------------------------------------------------------------------------
1 | function [x, e, nIter, timeSteps, errorSteps, idSteps] = SolveDALM_CBM(A, b, varargin)
2 |
3 | t0 = tic ;
4 |
5 | STOPPING_TIME = -2;
6 | STOPPING_GROUND_TRUTH = -1;
7 | STOPPING_DUALITY_GAP = 1;
8 | STOPPING_SPARSE_SUPPORT = 2;
9 | STOPPING_OBJECTIVE_VALUE = 3;
10 | STOPPING_SUBGRADIENT = 4;
11 | STOPPING_INCREMENTS = 5 ;
12 | STOPPING_DEFAULT = STOPPING_DUALITY_GAP;
13 |
14 | stoppingCriterion = STOPPING_DEFAULT;
15 |
16 | maxIter = 500;
17 | nu = 1 ;
18 | VERBOSE = 0 ;
19 | tol = 1e-5 ;
20 |
21 | % Parse the optional inputs.
22 | if (mod(length(varargin), 2) ~= 0 ),
23 | error(['Extra Parameters passed to the function ''' mfilename ''' lambdast be passed in pairs.']);
24 | end
25 | parameterCount = length(varargin)/2;
26 |
27 | for parameterIndex = 1:parameterCount,
28 | parameterName = varargin{parameterIndex*2 - 1};
29 | parameterValue = varargin{parameterIndex*2};
30 | switch lower(parameterName)
31 | case 'stoppingcriterion'
32 | stoppingCriterion = parameterValue;
33 | case 'groundtruth'
34 | xG = parameterValue;
35 | case 'tolerance'
36 | tol = parameterValue;
37 | case 'maxiteration'
38 | maxIter = parameterValue;
39 | case 'nu'
40 | nu = parameterValue ;
41 | case 'maxtime'
42 | maxTime = parameterValue;
43 | case 'recdata'
44 | recData = parameterValue;
45 | otherwise
46 | error(['The parameter ''' parameterName ''' is not recognized by the function ''' mfilename '''.']);
47 | end
48 | end
49 | clear varargin
50 |
51 | % Initialize parameters
52 | [m,n] = size(A) ;
53 |
54 | Av = [A nu * eye(m)] / sqrt(1 + nu^2);
55 | bv = 1 / sqrt(1+nu^2) * b;
56 |
57 | G = (A * A' + nu^2 * eye(m))/ (1+nu^2);
58 | invG_Av = G \ Av;
59 | invG_bv = G \ bv;
60 |
61 | beta = norm(bv,1)/m;
62 | betaInv = 1/beta ;
63 |
64 | nIter = 0 ;
65 |
66 | if VERBOSE
67 | disp(['beta is: ' num2str(beta)]);
68 | end
69 |
70 | y = zeros(m,1);
71 | x = zeros(n,1); e = zeros(m,1);
72 | xv = [x; e];
73 | z = zeros(m+n,1);
74 |
75 | converged_main = 0 ;
76 |
77 | timeSteps = nan(1,maxIter) ;
78 | errorSteps = nan(1,maxIter) ;
79 | idSteps = nan(1,maxIter);
80 |
81 | temp = Av' * y;
82 | while ~converged_main
83 |
84 | nIter = nIter + 1 ;
85 |
86 | x_old = x;
87 | e_old = e;
88 |
89 | %update z
90 | temp1 = temp + xv * betaInv;
91 | z = sign(temp1) .* min(1,abs(temp1));
92 |
93 | %compute Av' * y
94 | y = invG_Av * (z - xv * betaInv) + invG_bv * betaInv;
95 | %temp = Av_invG_Av * (z - xv * betaInv) + Av_invG_bv * betaInv;
96 | %temp = Av' * (invG * (Av * (z - xv * betaInv))) + Av_invG_bv *
97 | %betaInv;
98 | temp = Av' * y;
99 |
100 | %update x_v
101 | xv = xv - beta * (z - temp);
102 |
103 | x = xv(1:n);
104 | e = xv(n+1:end);
105 |
106 | if VERBOSE && mod(nIter, 50) == 0
107 |
108 | disp(['Iteration ' num2str(nIter)]) ;
109 | disp([norm(x-x_old)/norm(x_old) norm(e-e_old)/norm(e_old)]);
110 |
111 | figure(1);
112 | subplot(3,1,1);
113 | plot(x);
114 | title('x');
115 | subplot(3,1,2);
116 | plot(e);
117 | title('e');
118 | subplot(3,1,3);
119 | plot(z);
120 | title('z');
121 | pause;
122 |
123 | end
124 |
125 | timeSteps(nIter) = toc(t0) ;
126 | errorSteps(nIter) = norm(x-xG) ;
127 | sci = zeros(1,recData.nSubj);
128 | norm_xk = norm(x,1);
129 | for i = 1:recData.nSubj
130 | sci(i) = (recData.nSubj * norm(x(((i-1)*recData.nImg+1):i*recData.nImg), 1)/norm_xk - 1) ...
131 | / (recData.nSubj - 1);
132 | end
133 | [dontCare, curId] = max(sci);
134 | idSteps(nIter) = (curId == recData.id);
135 |
136 | switch stoppingCriterion
137 | case STOPPING_GROUND_TRUTH
138 | if norm(xG-x) < tol
139 | converged_main = 1 ;
140 | end
141 | case STOPPING_SUBGRADIENT
142 | error('Duality gap is not a valid stopping criterion for ALM.');
143 | case STOPPING_SPARSE_SUPPORT
144 | % compute the stopping criterion based on the change
145 | % of the number of non-zero components of the estimate
146 | nz_x_prev = nz_x;
147 | nz_x = (abs([x; e])>eps*10);
148 | num_nz_x = sum(nz_x(:));
149 | num_changes_active = (sum(nz_x(:)~=nz_x_prev(:)));
150 | if num_nz_x >= 1
151 | criterionActiveSet = num_changes_active / num_nz_x;
152 | converged_main = ~(criterionActiveSet > tol);
153 | end
154 | case STOPPING_OBJECTIVE_VALUE
155 | % compute the stopping criterion based on the relative
156 | % variation of the objective function.
157 | prev_f = f;
158 | f = norm([x ; e],1);
159 | criterionObjective = abs(f-prev_f)/(prev_f);
160 | converged_main = ~(criterionObjective > tol);
161 | case STOPPING_DUALITY_GAP
162 | if abs(norm(xv,1)- y.'*b)= maxTime ;
171 | otherwise
172 | error('Undefined stopping criterion.');
173 | end
174 |
175 | if ~converged_main && nIter >= maxIter
176 | disp('Maximum Iterations Reached') ;
177 | converged_main = 1 ;
178 | end
179 |
180 | end
181 |
182 | x = xv(1:n);
183 | e = xv(n+1:end);
184 |
185 | timeSteps = timeSteps(1:nIter) ;
186 | errorSteps = errorSteps(1:nIter) ;
187 | idSteps = idSteps(1:nIter);
--------------------------------------------------------------------------------
/l1benchmark/L1Solvers/SolveFISTA.m:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/l1benchmark/L1Solvers/SolveFISTA.m
--------------------------------------------------------------------------------
/l1benchmark/L1Solvers/SolveFISTA_CBM.m:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/l1benchmark/L1Solvers/SolveFISTA_CBM.m
--------------------------------------------------------------------------------
/l1benchmark/L1Solvers/SolveL1LS.m:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/l1benchmark/L1Solvers/SolveL1LS.m
--------------------------------------------------------------------------------
/l1benchmark/L1Solvers/SolveOMP.m:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/l1benchmark/L1Solvers/SolveOMP.m
--------------------------------------------------------------------------------
/l1benchmark/L1Solvers/SolveOMP_CBM.m:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/l1benchmark/L1Solvers/SolveOMP_CBM.m
--------------------------------------------------------------------------------
/l1benchmark/L1Solvers/SolvePALM.m:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/l1benchmark/L1Solvers/SolvePALM.m
--------------------------------------------------------------------------------
/l1benchmark/L1Solvers/SolvePALM_CBM.m:
--------------------------------------------------------------------------------
1 | function [x, e, nIter, timeSteps, errorSteps, idSteps] = SolvePALM_CBM(A, b, varargin)
2 |
3 | t0 = tic ;
4 | DEBUG = 0 ;
5 |
6 | STOPPING_TIME = -2;
7 | STOPPING_GROUND_TRUTH = -1;
8 | STOPPING_DUALITY_GAP = 1;
9 | STOPPING_SPARSE_SUPPORT = 2;
10 | STOPPING_OBJECTIVE_VALUE = 3;
11 | STOPPING_SUBGRADIENT = 4;
12 | STOPPING_INCREMENTS = 5 ;
13 | STOPPING_DEFAULT = STOPPING_DUALITY_GAP;
14 |
15 | stoppingCriterion = STOPPING_DEFAULT;
16 |
17 | % Initialize parameters
18 | [m,n] = size(A) ;
19 |
20 | tol = 5e-2 ;
21 | tol_int = 1e-6 ;
22 |
23 | G = A'*A ;
24 | opts.disp = 0;
25 | tau = eigs(G,1,'lm',opts);
26 | tauInv = 1/tau ;
27 |
28 | nIter = 0 ;
29 |
30 | mu = 2 *m / norm(b,1);
31 |
32 | lambda = zeros(m,1);
33 | x = zeros(n,1) ;
34 | e = b ;
35 |
36 | converged_main = 0 ;
37 |
38 | maxIter = 200 ;
39 | maxIter_apg = 400;
40 |
41 | nz_x = (abs([x; e])>eps*10);
42 | f = norm([x;e],1) ;
43 |
44 | % Parse the optional inputs.
45 | if (mod(length(varargin), 2) ~= 0 ),
46 | error(['Extra Parameters passed to the function ''' mfilename ''' lambdast be passed in pairs.']);
47 | end
48 | parameterCount = length(varargin)/2;
49 |
50 | for parameterIndex = 1:parameterCount,
51 | parameterName = varargin{parameterIndex*2 - 1};
52 | parameterValue = varargin{parameterIndex*2};
53 | switch lower(parameterName)
54 | case 'stoppingcriterion'
55 | stoppingCriterion = parameterValue;
56 | case 'groundtruth'
57 | xG = parameterValue;
58 | case 'tolerance'
59 | tol = parameterValue;
60 | case 'maxiteration'
61 | maxIter = parameterValue;
62 | case 'maxtime'
63 | maxTime = parameterValue;
64 | case 'recdata'
65 | recData = parameterValue;
66 | otherwise
67 | error(['The parameter ''' parameterName ''' is not recognized by the function ''' mfilename '''.']);
68 | end
69 | end
70 | clear varargin
71 |
72 | timeSteps = nan(1,maxIter) ;
73 | errorSteps = nan(1,maxIter) ;
74 | idSteps = nan(1,maxIter) ;
75 |
76 | while ~converged_main
77 |
78 | muInv = 1/mu ;
79 | lambdaScaled = muInv*lambda ;
80 |
81 | nIter = nIter + 1 ;
82 |
83 | e_old_main = e ;
84 | x_old_main = x ;
85 |
86 | temp2 = b + lambdaScaled ;
87 | temp = temp2 - A*x ;
88 |
89 | e = shrink(temp,muInv) ;
90 |
91 | converged_apg = 0 ;
92 |
93 | temp1 = A'*(e - temp2) ;
94 |
95 | nIter_apg = 0 ;
96 |
97 | t1 = 1 ; z = x ;
98 |
99 | muTauInv = muInv*tauInv ;
100 |
101 | Gx = G * x;
102 | Gz = Gx;
103 |
104 | while ~converged_apg
105 |
106 | nIter_apg = nIter_apg + 1 ;
107 |
108 | x_old_apg = x ;
109 | Gx_old = Gx;
110 |
111 | temp = z - tauInv*(temp1 + Gz) ;
112 |
113 | %x = shrink(temp, muTauInv) ;
114 | x = sign(temp) .* max(abs(temp)-muTauInv, 0);
115 |
116 | Gx = G * x;
117 |
118 | s = tau * (z - x) + Gx - Gz;
119 | if norm(s) < tol_int * tau * max(1,norm(x))
120 | converged_apg = 1;
121 | end
122 |
123 | if nIter_apg >= maxIter_apg
124 | converged_apg = 1 ;
125 | end
126 |
127 | t2 = (1+sqrt(1+4*t1*t1))/2 ;
128 | z = x + ((t1-1)/t2)*(x-x_old_apg) ;
129 | Gz = Gx + ((t1-1)/t2) * (Gx - Gx_old);
130 | t1 = t2 ;
131 |
132 | end
133 |
134 | lambda = lambda + mu*(b - A*x - e) ;
135 |
136 | timeSteps(nIter) = toc(t0) ;
137 | errorSteps(nIter) = norm(x-xG) ;
138 | sci = zeros(1,recData.nSubj);
139 | norm_xk = norm(x,1);
140 | for i = 1:recData.nSubj
141 | sci(i) = (recData.nSubj * norm(x(((i-1)*recData.nImg+1):i*recData.nImg), 1)/norm_xk - 1) ...
142 | / (recData.nSubj - 1);
143 | end
144 | [dontCare, curId] = max(sci);
145 | idSteps(nIter) = (curId == recData.id);
146 |
147 | switch stoppingCriterion
148 | case STOPPING_GROUND_TRUTH
149 | if norm(xG-x) < tol
150 | converged_main = 1 ;
151 | end
152 | case STOPPING_SUBGRADIENT
153 | error('Duality gap is not a valid stopping criterion for ALM.');
154 | case STOPPING_SPARSE_SUPPORT
155 | % compute the stopping criterion based on the change
156 | % of the number of non-zero components of the estimate
157 | nz_x_prev = nz_x;
158 | nz_x = (abs([x; e])>eps*10);
159 | num_nz_x = sum(nz_x(:));
160 | num_changes_active = (sum(nz_x(:)~=nz_x_prev(:)));
161 | if num_nz_x >= 1
162 | criterionActiveSet = num_changes_active / num_nz_x;
163 | converged_main = ~(criterionActiveSet > tol);
164 | end
165 | case STOPPING_OBJECTIVE_VALUE
166 | % compute the stopping criterion based on the relative
167 | % variation of the objective function.
168 | prev_f = f;
169 | f = norm([x ; e],1);
170 | criterionObjective = abs(f-prev_f)/(prev_f);
171 | converged_main = ~(criterionObjective > tol);
172 | case STOPPING_DUALITY_GAP
173 | if abs(norm(x,1)- lambda.'*b)= maxTime ;
182 | otherwise
183 | error('Undefined stopping criterion.');
184 | end
185 |
186 | % if ~converged_main && norm(x_old_main-x) < 100*eps
187 | % if DEBUG
188 | % disp('The iteration is stuck.') ;
189 | % end
190 | % converged_main = 1 ;
191 | %
192 | % end
193 |
194 |
195 | if ~converged_main && nIter >= maxIter
196 | if DEBUG
197 | disp('Maximum Iterations Reached') ;
198 | end
199 | converged_main = 1 ;
200 |
201 | end
202 |
203 | end
204 | timeSteps = timeSteps(1:nIter) ;
205 | errorSteps = errorSteps(1:nIter) ;
206 |
207 | function Y = shrink(X, alpha)
208 |
209 | Y = sign(X).*max(abs(X)-alpha,0);
--------------------------------------------------------------------------------
/l1benchmark/L1Solvers/SolvePDIPA.m:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/l1benchmark/L1Solvers/SolvePDIPA.m
--------------------------------------------------------------------------------
/l1benchmark/L1Solvers/SolvePDIPA_CBM_std.m:
--------------------------------------------------------------------------------
1 | % The following primal-dual interior-point algorithm is modified from l1eq_pd.m
2 | %
3 | % Solve
4 | % min_x ||x||_1 s.t. Ax = b
5 | %
6 | % Recast as linear program
7 | % min_{x,u} sum(u) s.t. -u <= x <= u, Ax=b
8 | % and use primal-dual interior point method
9 | %
10 | % Usage: xp = l1eq_pd(x0, A, At, b, pdtol, pdmaxiter, cgtol, cgmaxiter)
11 | %
12 | % x0 - Nx1 vector, initial point.
13 | %
14 | % A - A KxN matrix.
15 | %
16 | %
17 | % b - Kx1 vector of observations.
18 | %
19 | % pdtol - Tolerance for primal-dual algorithm (algorithm terminates if
20 | % the duality gap is less than pdtol).
21 | % Default = 1e-3.
22 | %
23 | % pdmaxiter - Maximum number of primal-dual iterations.
24 | % Default = 50.
25 | %
26 | %
27 | % Written by: Justin Romberg, Caltech
28 | % Email: jrom@acm.caltech.edu
29 | % Created: October 2005
30 | %
31 |
32 | function [x_out, e_out, pditer] = SolvePDIPA_CBM(A, b, varargin)
33 |
34 | DEBUG = 1;
35 |
36 | STOPPING_GROUND_TRUTH = -1;
37 | STOPPING_DUALITY_GAP = 1;
38 | STOPPING_SPARSE_SUPPORT = 2;
39 | STOPPING_OBJECTIVE_VALUE = 3;
40 | STOPPING_SUBGRADIENT = 4;
41 | STOPPING_DEFAULT = STOPPING_DUALITY_GAP;
42 |
43 | stoppingCriterion = STOPPING_DEFAULT;
44 |
45 | tolerance = 1e-3;
46 | pdmaxiter = 50;
47 | [K, N] = size(A);
48 | n = K+N;
49 | x0 = zeros(n,1);
50 | At = A';
51 |
52 | % Parse the optional inputs.
53 | if (mod(length(varargin), 2) ~= 0 ),
54 | error(['Extra Parameters passed to the function ''' mfilename ''' must be passed in pairs.']);
55 | end
56 | parameterCount = length(varargin)/2;
57 |
58 | for parameterIndex = 1:parameterCount,
59 | parameterName = varargin{parameterIndex*2 - 1};
60 | parameterValue = varargin{parameterIndex*2};
61 | switch lower(parameterName)
62 | case 'stoppingcriterion'
63 | stoppingCriterion = parameterValue;
64 | case 'initialization'
65 | x0 = parameterValue;
66 | case 'groundtruth'
67 | xG = parameterValue;
68 | case 'lambda'
69 | lambda = parameterValue;
70 | case 'maxiteration'
71 | pdmaxiter = parameterValue;
72 | case 'isnonnegative'
73 | isNonnegative = parameterValue;
74 | case 'tolerance'
75 | tolerance = parameterValue;
76 | case 'verbose'
77 | verbose = parameterValue;
78 | otherwise
79 | error(['The parameter ''' parameterName ''' is not recognized by the function ''' mfilename '''.']);
80 | end
81 | end
82 | clear varargin
83 |
84 | alpha = 0.01;
85 | beta = 0.5;
86 | mu = 10;
87 |
88 | gradf0 = [zeros(n,1); ones(n,1)];
89 |
90 | x = x0;
91 | u = 1.01*max(abs(x))*ones(n,1) + 1e-2;
92 |
93 | fu1 = x - u;
94 | fu2 = -x - u;
95 |
96 | lamu1 = -1./fu1;
97 | lamu2 = -1./fu2;
98 | v = -A*(lamu1(1:N)-lamu2(1:N)) - (lamu1(N+1:end)-lamu2(N+1:end));
99 | Atv = [At*v; v];
100 | rpri = A*x(1:N) + x(N+1:end) - b;
101 |
102 | sdg = -(fu1'*lamu1 + fu2'*lamu2);
103 | tau = mu*2*N/sdg;
104 |
105 | rcent = [-lamu1.*fu1; -lamu2.*fu2] - (1/tau);
106 | rdual = gradf0 + [lamu1-lamu2; -lamu1-lamu2] + [Atv; zeros(n,1)];
107 | resnorm = norm([rdual; rcent; rpri]);
108 |
109 | pditer = 0;
110 | while (pditer < pdmaxiter)
111 |
112 | pditer = pditer + 1;
113 |
114 | w1 = -1/tau*(-1./fu1 + 1./fu2) - Atv;
115 | w2 = -1 - 1/tau*(1./fu1 + 1./fu2);
116 | w3 = -rpri;
117 |
118 | sig1 = -lamu1./fu1 - lamu2./fu2;
119 | sig2 = lamu1./fu1 - lamu2./fu2;
120 | sigx = sig1 - sig2.^2./sig1;
121 |
122 | if any(sigx==0)
123 | sigx = sigx + 100*eps;
124 | end
125 |
126 | H11p = -A*diag(1./sigx(1:N))*At-diag(1./sigx(N+1:end));
127 | w1p = (w1./sigx - w2.*sig2./(sigx.*sig1));
128 | w1p = w3 - A*w1p(1:N) - w1p(N+1:end);
129 | [dv,hcond] = linsolve(H11p,w1p);
130 | if (hcond < 1e-14)
131 | if DEBUG>0
132 | disp('Primal-dual: Matrix ill-conditioned. Returning previous iterate.');
133 | end
134 | x_out = x(1:N);
135 | e_out = x(N+1:end);
136 | return
137 | end
138 | dx = (w1 - w2.*sig2./sig1 - [At*dv; dv])./sigx;
139 | Adx = A*dx(1:N)+dx(N+1:end);
140 | Atdv = [At*dv; dv];
141 |
142 | du = (w2 - sig2.*dx)./sig1;
143 |
144 | dlamu1 = (lamu1./fu1).*(-dx+du) - lamu1 - (1/tau)*1./fu1;
145 | dlamu2 = (lamu2./fu2).*(dx+du) - lamu2 - 1/tau*1./fu2;
146 |
147 | % make sure that the step is feasible: keeps lamu1,lamu2 > 0, fu1,fu2 < 0
148 | indp = find(dlamu1 < 0); indn = find(dlamu2 < 0);
149 | s = min([1; -lamu1(indp)./dlamu1(indp); -lamu2(indn)./dlamu2(indn)]);
150 | indp = find((dx-du) > 0); indn = find((-dx-du) > 0);
151 | s = (0.99)*min([s; -fu1(indp)./(dx(indp)-du(indp)); -fu2(indn)./(-dx(indn)-du(indn))]);
152 |
153 | % backtracking line search
154 | backiter = 0;
155 | xp = x + s*dx; up = u + s*du;
156 | vp = v + s*dv; Atvp = Atv + s*Atdv;
157 | lamu1p = lamu1 + s*dlamu1; lamu2p = lamu2 + s*dlamu2;
158 | fu1p = xp - up; fu2p = -xp - up;
159 | rdp = gradf0 + [lamu1p-lamu2p; -lamu1p-lamu2p] + [Atvp; zeros(n,1)];
160 | rcp = [-lamu1p.*fu1p; -lamu2p.*fu2p] - (1/tau);
161 | rpp = rpri + s*Adx;
162 | while(norm([rdp; rcp; rpp]) > (1-alpha*s)*resnorm)
163 | s = beta*s;
164 | xp = x + s*dx; up = u + s*du;
165 | vp = v + s*dv; Atvp = Atv + s*Atdv;
166 | lamu1p = lamu1 + s*dlamu1; lamu2p = lamu2 + s*dlamu2;
167 | fu1p = xp - up; fu2p = -xp - up;
168 | rdp = gradf0 + [lamu1p-lamu2p; -lamu1p-lamu2p] + [Atvp; zeros(n,1)];
169 | rcp = [-lamu1p.*fu1p; -lamu2p.*fu2p] - (1/tau);
170 | rpp = rpri + s*Adx;
171 | backiter = backiter+1;
172 | if (backiter > 32)
173 | if DEBUG>0
174 | disp('Stuck backtracking, returning last iterate.')
175 | end
176 | x_out = x(1:N);
177 | e_out = x(N+1:end);
178 | return
179 | end
180 | end
181 |
182 | v = vp; Atv = Atvp;
183 | lamu1 = lamu1p; lamu2 = lamu2p;
184 | fu1 = fu1p; fu2 = fu2p;
185 |
186 | % surrogate duality gap
187 | sdg = -(fu1'*lamu1 + fu2'*lamu2);
188 | tau = mu*2*n/sdg;
189 | rpri = rpp;
190 | rcent = [-lamu1.*fu1; -lamu2.*fu2] - (1/tau);
191 | rdual = gradf0 + [lamu1-lamu2; -lamu1-lamu2] + [Atv; zeros(n,1)];
192 | resnorm = norm([rdual; rcent; rpri]);
193 |
194 | switch stoppingCriterion
195 | case STOPPING_GROUND_TRUTH
196 | done = norm(xp(1:N)-xG) maxTime
122 | break;
123 | end
124 | c_init=c;
125 | % disp(['relative error: ' num2str(norm(c00-c)/norm(c00))]);
126 |
127 | end
128 | x = c;
--------------------------------------------------------------------------------
/l1benchmark/L1Solvers/SolveSesopPCD_CBM.m:
--------------------------------------------------------------------------------
1 | function [x,e, nIter, timeSteps, errorSteps, idSteps] = SolveSesopPCD_CBM(A, b, varargin)
2 |
3 | STOPPING_TIME = -2;
4 | STOPPING_GROUND_TRUTH = -1;
5 | STOPPING_DUALITY_GAP = 1;
6 | STOPPING_SPARSE_SUPPORT = 2;
7 | STOPPING_OBJECTIVE_VALUE = 3;
8 | STOPPING_SUBGRADIENT = 4;
9 | STOPPING_INCREMENTS = 5 ;
10 | STOPPING_DEFAULT = STOPPING_DUALITY_GAP;
11 |
12 | stoppingCriterion = STOPPING_DEFAULT;
13 |
14 | % Parse the optional inputs.
15 | if (mod(length(varargin), 2) ~= 0 ),
16 | error(['Extra Parameters passed to the function ''' mfilename ''' lambdast be passed in pairs.']);
17 | end
18 | parameterCount = length(varargin)/2;
19 |
20 | for parameterIndex = 1:parameterCount,
21 | parameterName = varargin{parameterIndex*2 - 1};
22 | parameterValue = varargin{parameterIndex*2};
23 | switch lower(parameterName)
24 | case 'stoppingcriterion'
25 | stoppingCriterion = parameterValue;
26 | case 'groundtruth'
27 | xG = parameterValue;
28 | case 'tolerance'
29 | tol = parameterValue;
30 | case 'maxiteration'
31 | maxIter = parameterValue;
32 | case 'maxtime'
33 | maxTime = parameterValue;
34 | case 'recdata'
35 | recData = parameterValue;
36 | otherwise
37 | error(['The parameter ''' parameterName ''' is not recognized by the function ''' mfilename '''.']);
38 | end
39 | end
40 | clear varargin
41 |
42 | timeSteps = [] ;
43 | errorSteps = [] ;
44 | idSteps = [];
45 |
46 | t0 = tic;
47 |
48 | options=sesoptn_optionset; % Get default options structure (see comments in optionset_sesoptn.m)
49 | options.max_sesop_iter = 1e5; % Max SESOP iterations
50 | options.max_newton_iter=1; % Max Newton iterations in subspace optimization (one can play with this)
51 | options.max_iter_CGinTN=0; % Conj.Grad steps in Truncated Newton (if set to 0, TN not used)
52 |
53 | options.precond=1; % 1 - use user defined preconditioning, 0 - don't use preconditioning
54 |
55 | FlagPCD=1; % when options.precond=1:
56 | % 1 - PCD (parallel coord. descent)
57 | % 0 - diagonal precond.
58 |
59 | options.nLastSteps=1;
60 | options.sesop_figure_name=sprintf('SESOPtn %d CG steps per TN iter; FlagPCD=%d',options.max_iter_CGinTN, FlagPCD);
61 |
62 | options.ShowSesopPlots = 0 ;
63 |
64 | par.weight_abs_penalty= 1e-0; % Weight of smooth abs penalty (mu)
65 | par.eps_smooth_abs=1e-0; % Smoothing parameter of asbsolute value approximation
66 |
67 | par.multA= @(x,par) multMatr_zhou(A,x); % user function y=Ax
68 | par.multAt=@(x,par) multMatrAdj_zhou(A,x); % user function y=A'*x
69 |
70 | [n,k] = size(A);
71 |
72 | c_init = zeros(n+k,1); % Starting point for optimization
73 | par.y=b;
74 |
75 | % Compute diag_AtA to be used for preconditioning
76 | % diag_AtA=StochasticCalcDiagAtA(par.multAt,size(par.y),20,par); % For large matrices
77 | diag_AtA=[diag(A'*A); ones(n,1)]; % For small matrices
78 |
79 |
80 | par.func_u =@ls_fgh; % user function f(u) = 0.5*||u - y||^2
81 | par.func_x=@sum_abs_smooth; % user function f(x) = mu*sum(abs_smoothed_eps(x))
82 |
83 |
84 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
85 | %
86 | % User preconditioning function: d_new=mult_precond (-g, x, par)
87 | %
88 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
89 |
90 |
91 | if FlagPCD % PCD direction
92 |
93 | options.mult_precond =@(g, x, Ax, InsideCGforTN, par) mult_precond_pcd(g, x, Ax, InsideCGforTN, par,diag_AtA);
94 |
95 | else % Diagonal preconditioning
96 |
97 | options.mult_precond = @(g, x, Ax, InsideCGforTN, par) mult_diag_precond(g, x, Ax, InsideCGforTN, par,diag_AtA);
98 |
99 | end
100 |
101 |
102 |
103 |
104 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
105 | %
106 | %
107 | % Perform SESOP optimization
108 | %
109 | %
110 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
111 |
112 | nIter = 0;
113 | for i=1:6
114 | par.weight_abs_penalty = 1e-1 *par.weight_abs_penalty; % update weight of smooth abs penalty (mu)
115 | par.eps_smooth_abs = 1e-1 *par.eps_smooth_abs; % update smoothing parameter of asbsolute value approximation
116 |
117 | [c, diff_x, temp_t, temp_id] = sesoptn_t(c_init, xG, recData, t0, par.func_u, par.func_x, par.multA, par.multAt,options,par);
118 | timeSteps = [timeSteps temp_t];
119 | errorSteps = [errorSteps diff_x] ;
120 | idSteps = [idSteps temp_id];
121 | nIter = nIter + length(diff_x);
122 | if timeSteps(nIter) > maxTime
123 | break;
124 | end
125 | c_init=c;
126 | % disp(['relative error: ' num2str(norm(c00-c)/norm(c00))]);
127 | end
128 |
129 | x = c(1:k);
130 | e = c(k+1:end);
--------------------------------------------------------------------------------
/l1benchmark/L1Solvers/SolveSpaRSA.m:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/l1benchmark/L1Solvers/SolveSpaRSA.m
--------------------------------------------------------------------------------
/l1benchmark/L1Solvers/SolveTFOCS.m:
--------------------------------------------------------------------------------
1 | % Copyright �2011. The Regents of the University of California (Regents).
2 | % All Rights Reserved. Contact The Office of Technology Licensing,
3 | % UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620,
4 | % (510) 643-7201, for commercial licensing opportunities.
5 |
6 | % Authors: Arvind Ganesh, Allen Y. Yang, Zihan Zhou.
7 | % Contact: Allen Y. Yang, Department of EECS, University of California,
8 | % Berkeley.
9 |
10 | % IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
11 | % SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS,
12 | % ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF
13 | % REGENTS HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
14 |
15 | % REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED
16 | % TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
17 | % PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY,
18 | % PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO
19 | % PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
20 |
21 | function [x, nIter, timeSteps, errorSteps] = SolveTFOCS(A, b, varargin)
22 |
23 | % Solve
24 | % min_x ||x||_1 s.t. Ax = b
25 | t0 = tic;
26 |
27 | STOPPING_TIME = -2;
28 | STOPPING_GROUND_TRUTH = -1;
29 | STOPPING_DUALITY_GAP = 1;
30 | STOPPING_SPARSE_SUPPORT = 2;
31 | STOPPING_OBJECTIVE_VALUE = 3;
32 | STOPPING_SUBGRADIENT = 4;
33 | STOPPING_INCREMENTS = 5 ;
34 | STOPPING_DEFAULT = STOPPING_INCREMENTS;
35 |
36 | stoppingCriterion = STOPPING_DEFAULT;
37 |
38 | % Parse the optional inputs.
39 | if (mod(length(varargin), 2) ~= 0 ),
40 | error(['Extra Parameters passed to the function ''' mfilename ''' must be passed in pairs.']);
41 | end
42 | parameterCount = length(varargin)/2;
43 |
44 | for parameterIndex = 1:parameterCount,
45 | parameterName = varargin{parameterIndex*2 - 1};
46 | parameterValue = varargin{parameterIndex*2};
47 | switch lower(parameterName)
48 | case 'stoppingcriterion'
49 | stoppingCriterion = parameterValue;
50 | case 'initialization'
51 | x0 = parameterValue;
52 | if ~all(size(x0)==[n,1])
53 | error('The dimension of the initial x0 does not match.');
54 | end
55 | case 'groundtruth'
56 | xG = parameterValue;
57 | case 'mu'
58 | mu = parameterValue;
59 | case 'gamma'
60 | gamma = parameterValue;
61 | case 'maxiteration'
62 | maxIterOuter = parameterValue;
63 | case 'isnonnegative'
64 | isNonnegative = parameterValue;
65 | case 'tolerance'
66 | tol = parameterValue;
67 | case 'verbose'
68 | verbose = parameterValue;
69 | case 'maxtime'
70 | maxTime = parameterValue;
71 | otherwise
72 | error(['The parameter ''' parameterName ''' is not recognized by the function ''' mfilename '''.']);
73 | end
74 | end
75 | clear varargin
76 |
77 | mu = .01;
78 | opts.tol = 1e-10;
79 | opts.xG = xG;
80 | opts.maxTime = maxTime;
81 | opts.t0 = t0;
82 |
83 | [x, odata, opts] = tfocs_SCD( prox_l1, { A, -b }, prox_l2(1e-6), mu, [], [], opts );
84 |
85 | timeSteps = odata.timeSteps;
86 | errorSteps = odata.errorSteps;
87 | nIter = odata.niter;
--------------------------------------------------------------------------------
/l1benchmark/L1Solvers/SolveTFOCS_CBM.m:
--------------------------------------------------------------------------------
1 | % Copyright �2011. The Regents of the University of California (Regents).
2 | % All Rights Reserved. Contact The Office of Technology Licensing,
3 | % UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620,
4 | % (510) 643-7201, for commercial licensing opportunities.
5 |
6 | % Authors: Arvind Ganesh, Allen Y. Yang, Zihan Zhou.
7 | % Contact: Allen Y. Yang, Department of EECS, University of California,
8 | % Berkeley.
9 |
10 | % IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
11 | % SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS,
12 | % ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF
13 | % REGENTS HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
14 |
15 | % REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED
16 | % TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
17 | % PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY,
18 | % PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO
19 | % PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
20 |
21 | function [x_out, e_out, nIter, timeSteps, errorSteps, idSteps] = SolveTFOCS_CBM(A, b, varargin)
22 |
23 | % Solve
24 | % min_x ||x||_1 s.t. Ax = b
25 | t0 = tic;
26 |
27 | STOPPING_TIME = -2;
28 | STOPPING_GROUND_TRUTH = -1;
29 | STOPPING_DUALITY_GAP = 1;
30 | STOPPING_SPARSE_SUPPORT = 2;
31 | STOPPING_OBJECTIVE_VALUE = 3;
32 | STOPPING_SUBGRADIENT = 4;
33 | STOPPING_INCREMENTS = 5 ;
34 | STOPPING_DEFAULT = STOPPING_INCREMENTS;
35 |
36 | stoppingCriterion = STOPPING_DEFAULT;
37 |
38 | % Parse the optional inputs.
39 | if (mod(length(varargin), 2) ~= 0 ),
40 | error(['Extra Parameters passed to the function ''' mfilename ''' must be passed in pairs.']);
41 | end
42 | parameterCount = length(varargin)/2;
43 |
44 | for parameterIndex = 1:parameterCount,
45 | parameterName = varargin{parameterIndex*2 - 1};
46 | parameterValue = varargin{parameterIndex*2};
47 | switch lower(parameterName)
48 | case 'stoppingcriterion'
49 | stoppingCriterion = parameterValue;
50 | case 'initialization'
51 | x0 = parameterValue;
52 | if ~all(size(x0)==[n,1])
53 | error('The dimension of the initial x0 does not match.');
54 | end
55 | case 'groundtruth'
56 | xG = parameterValue;
57 | case 'mu'
58 | mu = parameterValue;
59 | case 'gamma'
60 | gamma = parameterValue;
61 | case 'maxiteration'
62 | maxIterOuter = parameterValue;
63 | case 'isnonnegative'
64 | isNonnegative = parameterValue;
65 | case 'tolerance'
66 | tol = parameterValue;
67 | case 'verbose'
68 | verbose = parameterValue;
69 | case 'maxtime'
70 | maxTime = parameterValue;
71 | case 'recdata'
72 | recData = parameterValue;
73 | otherwise
74 | error(['The parameter ''' parameterName ''' is not recognized by the function ''' mfilename '''.']);
75 | end
76 | end
77 | clear varargin
78 |
79 | B = [A, eye(size(A,1))];
80 |
81 | mu = .01;
82 | opts.tol = 1e-10;
83 | opts.xG = xG;
84 | opts.maxTime = maxTime;
85 | opts.t0 = t0;
86 |
87 | opts.recData = recData;
88 |
89 | [x, odata, opts] = tfocs_SCD( prox_l1, { B, -b }, prox_l2(1e-6), mu, [], [], opts );
90 |
91 | timeSteps = odata.timeSteps;
92 | errorSteps = odata.errorSteps;
93 | idSteps = odata.idSteps;
94 | nIter = odata.niter;
95 |
96 | x_out = x(1:size(A,2));
97 | e_out = x((size(A,2)+1):end);
--------------------------------------------------------------------------------
/l1benchmark/L1Solvers/multMatrAdj_zhou.m:
--------------------------------------------------------------------------------
1 | function y=multMatrAdj_zhou(A,x)
2 | %y=A'*x
3 | %
4 | % Call: y=multMatrAdj(A,x)
5 |
6 | y=([x'*A x'])';
--------------------------------------------------------------------------------
/l1benchmark/L1Solvers/multMatr_zhou.m:
--------------------------------------------------------------------------------
1 | function y=multMatr_zhou(A,x)
2 | %y=A*x
3 | %
4 | % Call: y=multMatr(A,x)
5 |
6 | k = size(A,2);
7 | y=A*x(1:k) + x(k+1:end);
--------------------------------------------------------------------------------
/l1benchmark/compare_noise_free.m:
--------------------------------------------------------------------------------
1 | %% Compare speeds of algorithms on Basis Pursuit problem
2 | % min ||x||_1 s.t. b = Ax
3 |
4 | clear ;
5 | clc ;
6 |
7 | addpath L1Solvers;
8 | % You need to download the TFOCS and SESOP_PACK packages from their respective authors
9 | addpath \TFOCS;
10 | addpath(genpath('\SESOP_PACK'));
11 |
12 |
13 | %% Initialize variables
14 |
15 | % l1Method = {'Homotopy'};
16 | l1Method = {'Homotopy', 'PALM', 'DALM', 'PDIPA', 'L1LS', 'FISTA', 'SesopPCD','AMP', 'TFOCS'} ;
17 | methodColor = {'r','g','b','m','c','k','r--','b--','k--'} ;
18 |
19 | numTrials = 20 ; % no. of trials for averaging speed
20 |
21 | numMethods = length(l1Method) ;
22 |
23 | timeTaken = zeros(numTrials,numMethods) ;
24 | errorEst = zeros(numTrials,numMethods) ;
25 |
26 | avgTime = zeros(1,numMethods) ;
27 |
28 | avgError = zeros(1,numMethods) ;
29 |
30 | d = 800 ; % no. of observations
31 | n = 1000 ; % length of unknown signal (x)
32 | k = 100 ; % no. of non-zero entries in x
33 |
34 | STOPPING_TIME = -2 ;
35 | STOPPING_GROUND_TRUTH = -1;
36 | STOPPING_DUALITY_GAP = 1;
37 | STOPPING_SPARSE_SUPPORT = 2;
38 | STOPPING_OBJECTIVE_VALUE = 3;
39 | STOPPING_SUBGRADIENT = 4;
40 |
41 | maxTime = 8;
42 |
43 | %% Run various algorithms
44 | timeEst = cell(numTrials,numMethods);
45 | errEst = cell(numTrials,numMethods);
46 |
47 | for trialIndex = 1 : numTrials
48 |
49 | disp(['After Trial ' num2str(trialIndex)]) ;
50 |
51 | % Generate random data and observations
52 | A = randn(d,n) ; % observation matrix
53 |
54 | for i = 1 : n
55 | A(:,i) = A(:,i)/norm(A(:,i)) ;
56 | end
57 |
58 | x = zeros(n,1) ;
59 | p = randperm(n) ;
60 | x(p(1:k)) = 20*(rand(k,1)-0.5) ; % signal to be estimated
61 |
62 | b = A * x ; % observation vector b
63 |
64 | for methodIndex = 1 : numMethods
65 |
66 | methodName = ['Solve' l1Method{methodIndex}] ;
67 |
68 | methodHandle = str2func(methodName) ;
69 |
70 | tic ;
71 | [xEst, nIter, curTimeEst, curErrEst] = methodHandle(A, b, 'stoppingCriterion', STOPPING_TIME,...
72 | 'groundTruth', x, 'maxtime', maxTime, 'maxiteration', 1e6) ;
73 | tEst = toc ;
74 |
75 | timeEst{trialIndex,methodIndex} = curTimeEst;
76 | errEst{trialIndex,methodIndex} = curErrEst /norm(x);
77 |
78 | estError = norm(x-xEst)/norm(x) ;
79 | timeTaken(trialIndex,methodIndex) = tEst ;
80 | errorEst(trialIndex,methodIndex) = estError ;
81 |
82 | avgTime(methodIndex) = avgTime(methodIndex) + tEst ;
83 | avgError(methodIndex) = avgError(methodIndex) + estError ;
84 |
85 | fprintf(1,'%10s\t\t%e\t\t%e\n',l1Method{methodIndex},avgTime(methodIndex)/trialIndex,...
86 | avgError(methodIndex)/trialIndex) ;
87 | pause(0.2) ;
88 |
89 | end
90 |
91 | disp('----------------') ;
92 |
93 | avgCurve = cell(1, numMethods);
94 | avgCount = cell(1, numMethods);
95 |
96 | figure(1) ; clf ;
97 | for methodIndex = 1 : numMethods
98 | avgCurve{methodIndex} = zeros(1,maxTime * 100);
99 | avgCount{methodIndex} = zeros(1,maxTime * 100);
100 | for tIdx = 1 : trialIndex
101 | curTimeEst = timeEst{tIdx,methodIndex};
102 | curErrEst = errEst{tIdx,methodIndex};
103 |
104 | idx_length = min(maxTime * 100, (100*max(curTimeEst)+1));
105 | cur_pt = 1;
106 | for idx0 = 1:idx_length
107 | while cur_pt <= length(curErrEst) && curTimeEst(cur_pt) < .01 * idx0
108 | cur_pt = cur_pt + 1;
109 | end
110 | if cur_pt > 1
111 | avgCurve{methodIndex}(idx0) = avgCurve{methodIndex}(idx0) + curErrEst(cur_pt-1);
112 | else
113 | avgCurve{methodIndex}(idx0) = avgCurve{methodIndex}(idx0) + 1;
114 | end
115 | avgCount{methodIndex}(idx0) = avgCount{methodIndex}(idx0) + 1;
116 | end
117 |
118 | end
119 | maxCount = max(find(avgCount{methodIndex} ~= 0));
120 | avgCount{methodIndex} = avgCount{methodIndex}(1:maxCount);
121 | avgCurve{methodIndex} = avgCurve{methodIndex}(1:maxCount) ./ avgCount{methodIndex};
122 |
123 | semilogy(0.01*(1:maxCount),avgCurve{methodIndex},methodColor{methodIndex},'LineWidth',3) ;
124 | hold on;
125 | end
126 | xlim([0,maxTime]);
127 | ylim([1e-20 1]);
128 | set(gca,'fontsize',16);
129 | legend(l1Method,'fontsize',12);
130 | xlabel('Time(s)','fontsize',20);
131 | ylabel('Relative Error of x','fontsize',20);
132 | hold off;
133 | pause(.1);
134 | end
135 |
136 | avgTime = avgTime/numTrials ;
137 | avgError = avgError/numTrials ;
138 |
--------------------------------------------------------------------------------
/smop/__init__.py:
--------------------------------------------------------------------------------
1 | # SMOP compiler -- Simple Matlab/Octave to Python compiler
2 | # Copyright 2011-2014 Victor Leikehman
3 |
4 | import version
5 | import parse,resolve,backend,main
6 | from version import __version__
7 |
--------------------------------------------------------------------------------
/smop/__init__.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/smop/__init__.pyc
--------------------------------------------------------------------------------
/smop/backend.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/smop/backend.pyc
--------------------------------------------------------------------------------
/smop/benchmark5/benchmark2.m:
--------------------------------------------------------------------------------
1 | %***************************************************************************
2 | %* Matlab Benchmark program version 5.0 *
3 | %* Author : Stefan Steinhaus *
4 | %*
5 | EMAIL : stefan@steinhaus
6 | -
7 | net.de *
8 | %* This program is public domain. Feel free to copy it freely. *
9 | %***************************************************************************
10 | clc
11 | disp('The following b
12 | enchmark program will print the average timings')
13 | disp('to calculate the functions by 3 runs.')
14 | disp(' ')
15 | disp(' ')
16 | disp('!!! MATLAB
17 | -
18 | Benchmarkprogram !!!')
19 | disp('=================================')
20 | disp(' ')
21 | %* Misc. operation *
22 | result=0; runs=3;
23 | for i
24 | =1:runs;
25 | tic;
26 | datmat=importdata('currency2.txt',' ');
27 | Limits_Area=[1,261,522,784,1045,1305,1565,1827,2088,2349,2610,2871,3131,3158];
28 | for k=1:2000;
29 | for j=1:13;
30 | Year_Data=datmat.data(Limits_Area(j):Limits
31 | _Area(j+1)
32 | -
33 | 1,4:37);
34 | Min_Year=min(Year_Data);
35 | Max_Year=max(Year_Data);
36 | Mean_Year=mean(Year_Data);
37 | GainLoss_Year=100
38 | -
39 | (Year_Data(1,:)+0.00000001)./(Year_Data(end,:)+0.00000001)*100;
40 | end;
41 | end;
42 | result=result+toc;
43 | e
44 | nd;
45 | result=result/runs;
46 | disp(['IO test & descriptive statistics_____________________ : ' num2str(result) ' sec.'])
47 | result=0; a=1;
48 | for i=1:runs
49 | tic;
50 | for x=1:15000;
51 | for y=1:15000;
52 | a=a+x+y;
53 | end;
54 | end;
55 | result=result+toc;
56 | en
57 | d;
58 | result=result/runs;
59 | disp(['Loop testing_________________________________________ : ' num2str(result) ' sec.'])
60 | result=0;
61 | for i=1:runs
62 | for j=1:200
63 | b=1+(rand(2000,2000)/100);
64 | tic;
65 | a=b.^1000;
66 | result=result+toc;
67 | end
68 | end
69 | re
70 | sult=result/runs;
71 | disp(['2000x2000 normal distributed random matrix^1000______ : ' num2str(result) ' sec.'])
72 | clear a; clear b; result=0;
73 | for i=1:runs
74 | for j=1:100
75 | a=rand(1000000,1);
76 | tic;
77 | b=sort(a);
78 | result=result+toc;
79 | end
80 | end
81 | result=result/runs;
82 | disp(['1000000 values sorted ascending______________________ : ' num2str(result) ' sec.'])
83 | %* Analysis *
84 | clear a; clear b; result=0;
85 | for i=1:runs
86 | for j=1:100
87 | a=rand(1048576,1);
88 | tic;
89 | b=fft(a);
90 | result=
91 | result+toc;
92 | end
93 | end
94 | result=result/runs;
95 | disp(['FFT over 1048576 values (2^20)_______________________ : ' num2str(result) ' sec.'])
96 | %* Algebra *
97 | clear a; clear b; result=0;
98 | for i=1:runs
99 | for j=1:100
100 | a=rand(1500,1500);
101 | tic;
102 | b=d
103 | et(a);
104 | -
105 | 10
106 | -
107 | result=result+toc;
108 | end
109 | end
110 | result=result/runs;
111 | disp(['Determinant of a 1500x1500 random matrix_____________ : ' num2str(result) ' sec.'])
112 | clear a; clear b; result=0;
113 | for i=1:runs
114 | for j=1:100
115 | a=rand(1500,1500);
116 | tic;
117 | b=inv(a);
118 | result=result+toc;
119 | end
120 | end
121 | result=result/runs;
122 | disp(['Inverse of a 1500x1500 uniform distr. random matrix__ : ' num2str(result) ' sec.'])
123 | clear a; clear b; result=0;
124 | for i=1:runs
125 | a=rand(1200,1200);
126 | tic;
127 | c=eig(a);
128 | re
129 | sult=result+toc;
130 | end
131 | result=result/runs;
132 | disp(['Eigenval. of a normal distr. 1200x1200 randommatrix___ : ' num2str(result) ' sec.'])
133 | clear a; clear b; result=0;
134 | for i=1:runs
135 | for j=1:100
136 | a=rand(1500,1500);
137 | a=a'*a;
138 | tic;
139 | b=chol(
140 | a);
141 | result=result+toc;
142 | end
143 | end
144 | result=result/runs;
145 | disp(['Cholesky decomposition of a 1500x1500
146 | -
147 | matrix_________ : ' num2str(result) ' sec.'])
148 | clear a; clear b; result=0;
149 | for i=1:runs
150 | for j=1:100
151 | a=rand(1500,1500);
152 | tic;
153 | b=
154 | a'*a;
155 | result=result+toc;
156 | end
157 | end
158 | result=result/runs;
159 | disp(['1500x1500 cross
160 | -
161 | product matrix_______________________ : ' num2str(result) ' sec.'])
162 | %* Number theory *
163 | clear a; clear b;
164 | phi = 1.6180339887498949; result=0;
165 | for i=1:runs;
166 | a=floor(1
167 | 000*rand(10000000,1));
168 | tic;
169 | b=(phi.^a
170 | -
171 | (
172 | -
173 | phi).^(
174 | -
175 | a))/sqrt(5);
176 | result=result+toc;
177 | end
178 | result=result/runs;
179 | disp(['Calculation of 10000000 fibonacci numbers____________ : ' num2str(result) ' sec.'])
180 | %* Stochastic
181 | -
182 | statistic *
183 | result=0; a=0; b=0;
184 | for
185 | i=1:runs;
186 | a=rand(10000,1000);
187 | tic;
188 | b=princomp(a);
189 | result=result+toc;
190 | end;
191 | result=result/runs;
192 | disp(['Calc. of the principal components of a 10000x1000 matrix : ' num2str(result) ' sec.'])
193 | clear a; clear b; result=0;
194 | for i=1:runs
195 | for j=1:100
196 | a=rand(1500,1500);
197 | tic;
198 | b=gamma(a);
199 | result=result+toc;
200 | end
201 | end
202 | result=result/runs;
203 | disp(['Gamma function over a 1500x1500 matrix_______________ : ' num2str(result) ' sec.'])
204 | clear a; clear b; result=0;
205 | for i=1:runs
206 | for j
207 | =1:100
208 | a=rand(1500,1500);
209 | tic;
210 | b=erf(a);
211 | result=result+toc;
212 | end
213 | end
214 | result=result/runs;
215 | disp(['Gaussian error function over a 1500x1500 matrix______ : ' num2str(result) ' sec.'])
216 | clear a; clear b; result=0;
217 | for i=1:runs
218 | f
219 | or j=1:100
220 | -
221 | 11
222 | -
223 | a=randn(1000,1000);
224 | b=1:1000;
225 | tic;
226 | b=a
227 | \
228 | b';
229 | result=result+toc;
230 | end
231 | end
232 | result=result/runs;
233 | disp(['Linear regression over a 1000x1000 matrix____________ : ' num2r(result) ' sec.']s
234 |
--------------------------------------------------------------------------------
/smop/benchmark5/database.mdb:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/smop/benchmark5/database.mdb
--------------------------------------------------------------------------------
/smop/callgraph.py:
--------------------------------------------------------------------------------
1 | import parse,sys
2 | import networkx as nx
3 | import node,resolve
4 |
5 | def callgraph(func_list):
6 | """
7 | Build callgraph of func_list, ignoring
8 | built-in functions
9 | """
10 | G = nx.DiGraph()
11 | for func in func_list:
12 | G.add_node(func.head.ident.name)
13 | for func in func_list:
14 | assert isinstance(func,node.function)
15 | func_name = func.head.ident.name
16 | resolve.resolve(func)
17 | for s in node.postorder(func):
18 | if (s.__class__ is node.funcall and
19 | s.func_expr.__class__ is node.ident and
20 | s.func_expr.name in G.nodes()):
21 | G.add_edge(func_name,s.func_expr.name)
22 | return G
23 |
24 | G = nx.DiGraph()
25 |
26 | def postorder_edge(u):
27 | if isinstance(u,node.node):
28 | for v in u:
29 | for t in postorder_edge(v):
30 | yield (v,t)
31 | yield (u,u) # returns only traversible objects
32 |
33 | def foo(tree):
34 | G = nx.DiGraph()
35 | for u,v in postorder_edge(tree):
36 | G.add_edge(id(u),id(v))
37 | return G
38 |
39 | def main():
40 | func_list = parse.parse(open(sys.argv[1]).read())
41 | G = foo(func_list)
42 | #G = callgraph(func_list)
43 | nx.write_dot(G,"G.dot")
44 | #H = nx.dfs_tree(G,'solver')
45 | #nx.write_dot(H,"H.dot")
46 | #print nx.topological_sort(H)
47 |
48 | if __name__ == '__main__':
49 | main()
50 |
--------------------------------------------------------------------------------
/smop/core.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/smop/core.pyc
--------------------------------------------------------------------------------
/smop/foo.pyx:
--------------------------------------------------------------------------------
1 | # -*- python -*-
2 | #import runtime as rt
3 | import numpy as np
4 | cimport numpy as np
5 | from runtime import *
6 |
7 | cdef class foo:
8 | cdef int[:,:] data
9 |
10 | def __init__(self, int[:,:] a):
11 | self.data = a
12 |
13 | def __getitem__(self,tuple index):
14 | cdef int i = index[0]
15 | cdef int j = index[1]
16 | return self.data[i-1,j-1]
17 |
18 | cdef class bar:
19 | cdef double[:,:] data
20 |
21 | def __init__(self, double[:,:] a):
22 | self.data = a
23 |
24 | def __getitem__(self,tuple index):
25 | cdef int i = index[0]
26 | cdef int j = index[1]
27 | return self.data[i-1,j-1]
28 |
29 | cdef tuple find(np.ndarray[char,ndim=2,cast=True] a): # 38
30 | #cdef tuple find(np.ndarray a):
31 | cdef int i,j,k
32 | cdef char* p = a.data
33 | for k in range(a.size):
34 | print(k,p[k])
35 | if p[k]:
36 | i,j = np.unravel_index(k, (a.shape[0],a.shape[1]), order="F")
37 | return i+1,j+1
38 | return 0,0
39 |
40 | a = matlabarray([[1,2],[3,4]],dtype=int)
41 | print(a)
42 | b = matlabarray([[1,2],[3,4]],dtype=float)
43 | print(b)
44 | c = foo(a)
45 | d = bar(b)
46 |
47 | a[1,1] = 11
48 | b[1,1] = 1.11
49 |
50 | print(a[1,1], b[1,1], c[1,1], d[1,1])
51 |
52 | i,j = find(a == 1)
53 | print(i,j)
54 |
--------------------------------------------------------------------------------
/smop/go.m:
--------------------------------------------------------------------------------
1 | ai = zeros(10,10);
2 | af = ai;
3 |
4 | ai(1,1)=2;
5 | ai(2,2)=3;
6 | ai(3,3)=4;
7 | ai(4,4)=5;
8 | ai(5,5)=1;
9 |
10 | af(9,9)=1;
11 | af(8,8)=2;
12 | af(7,7)=3;
13 | af(6,6)=4;
14 | af(10,10)=5;
15 |
16 | tic;
17 | mv = solver(ai,af,0);
18 | toc
19 |
20 | disp(size(mv));
21 |
--------------------------------------------------------------------------------
/smop/go.py:
--------------------------------------------------------------------------------
1 | import pstats,cProfile
2 | import numpy,time
3 | import solver as a
4 | from core import *
5 |
6 | def main():
7 | ai = matlabarray(zeros (10,10,dtype=int),dtype=int)
8 | af = copy(ai)
9 |
10 | ai[1,1]=2
11 | ai[2,2]=3
12 | ai[3,3]=4
13 | ai[4,4]=5
14 | ai[5,5]=1
15 |
16 | af[9,9]=1
17 | af[8,8]=2
18 | af[7,7]=3
19 | af[6,6]=4
20 | af[10,10]=5
21 |
22 | t0 = time.clock()
23 | mv = a.solver(ai,af,0)
24 | t1 = time.clock()
25 | print t1-t0
26 | print mv.shape
27 |
28 | if __name__ == '__main__':
29 | main()
30 | """
31 | cProfile.runctx('main()',globals(),locals(),"Profile.prof")
32 | s = pstats.Stats("Profile.prof")
33 | s.strip_dirs().sort_stats("time").print_stats()
34 | """
35 |
--------------------------------------------------------------------------------
/smop/graphviz.py:
--------------------------------------------------------------------------------
1 | import parse,sys
2 | import node
3 | from node import extend
4 | import networkx as nx
5 |
6 |
7 |
8 | def resolve(t,fp,func_name):
9 | fp.write("digraph %s {\n" % func_name)
10 | fp.write('graph [rankdir="LR"];\n')
11 | for u in node.postorder(t):
12 | if u.__class__ in (node.ident,node.param):
13 | fp.write("%s [label=%s_%s_%s];\n" % (u.lexpos,u.name,u.lineno,u.column))
14 | if u.defs:
15 | for v in u.defs:
16 | fp.write("%s -> %s" % (u.lexpos,v.lexpos))
17 | if u.lexpos < v.lexpos:
18 | fp.write('[color=red]')
19 | #else:
20 | # fp.write('[label=%s.%s]' % (v.lineno,v.column))
21 | fp.write(';\n')
22 | fp.write("}\n")
23 |
24 | @extend(node.node)
25 | def _graphviz(self,fp):
26 | if getattr(self,"__slots__",False):
27 | fp.write('"%s" [' % id(self))
28 | fp.write('label = " %s|' % self.__class__.__name__)
29 | fp.write('|'.join([' %s' % (i+1,s) for i,s in enumerate(self.__slots__)]))
30 | fp.write('"]\n')
31 | else:
32 | fp.write('"%s" [' % id(self))
33 | fp.write('label = " %s|' % self.__class__.__name__)
34 | fp.write('|'.join([' %d' % (i+1,i+1) for i in range(len(self))]))
35 | fp.write('"]\n')
36 |
37 | for i,s in enumerate(self):
38 | if not isinstance(s,node.node):
39 | fp.write('"%s" [label="%s",shape=ellipse]\n' % (id(s),s))
40 | if not s in [None,0,"",False]:
41 | fp.write('"%s":f%d -> "%s";\n' % (id(self), i+1, id(s)))
42 |
43 | @extend(node.number)
44 | def _graphviz(self,fp):
45 | fp.write('"%s" [label=" %s| %s"]\n' %
46 | (id(self), self.__class__.__name__,self.value))
47 |
48 | @extend(node.ident)
49 | def _graphviz(self,fp):
50 | fp.write('"%s" [label=" %s| %s"]\n' %
51 | (id(self), self.__class__.__name__,self.name))
52 |
53 | def graphviz(tree,fp):
54 | fp.write('''strict digraph g {
55 | graph [rankdir="LR"];
56 | node [shape=record];
57 | ''')
58 | for u in node.postorder(tree):
59 | u._graphviz(fp)
60 | fp.write("}\n")
61 |
62 | if __name__ == '__main__':
63 | main()
64 |
--------------------------------------------------------------------------------
/smop/graphviz.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/smop/graphviz.pyc
--------------------------------------------------------------------------------
/smop/homo.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/smop/homo.pyc
--------------------------------------------------------------------------------
/smop/lex.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/smop/lex.pyc
--------------------------------------------------------------------------------
/smop/lexer.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/smop/lexer.pyc
--------------------------------------------------------------------------------
/smop/lm.py:
--------------------------------------------------------------------------------
1 | import a,os
2 | u = set()
3 | for f in [s for s in dir(a) if not s.startswith("__")]:
4 | try:
5 | codeobj = a.__dict__[f].func_code
6 | filename = os.path.split(codeobj.co_filename)[1]
7 | print "%s %s:%d" % (f, filename, codeobj.co_firstlineno)
8 | print "\t"+"\n\t".join(sorted(codeobj.co_names))
9 | except:
10 | print f, "***"
11 | #print sorted(u)
12 |
--------------------------------------------------------------------------------
/smop/main.py:
--------------------------------------------------------------------------------
1 | # SMOP compiler -- Simple Matlab/Octave to Python compiler
2 | # Copyright 2011-2013 Victor Leikehman
3 |
4 | import version
5 | import sys,cPickle,glob,os
6 | import getopt,re
7 | import lexer,parse,resolve,backend,options,node,graphviz
8 | import networkx as nx
9 | import readline
10 | #from runtime import *
11 | #from version import __version__
12 | __version__ = version.__version__
13 |
14 | def usage():
15 | print "SMOP compiler version " + __version__
16 | print """Usage: smop [options] file-list
17 | Options:
18 | -V --version
19 | -X --exclude=FILES Ignore files listed in comma-separated list FILES.
20 | Can be used several times.
21 | -S --syntax-errors=FILES Ignore syntax errors in comma-separated list of FILES.
22 | Can be used several times.
23 | -S. Always gnore syntax errors
24 | -d --dot=REGEX For functions whose names match REGEX, save debugging
25 | information in "dot" format (see www.graphviz.org).
26 | You need an installation of graphviz to use --dot
27 | option. Use "dot" utility to create a pdf file.
28 | For example:
29 | $ python main.py fastsolver.m -d "solver|cbest"
30 | $ dot -Tpdf -o resolve_solver.pdf resolve_solver.dot
31 | -h --help
32 | -o --output=FILENAME By default create file named a.py
33 | -o- --output=- Use standard output
34 | -s --strict Stop on the first error
35 | -v --verbose
36 | """
37 |
38 | def main():
39 | """
40 | !a="def f(): \\n\\treturn 123"
41 | !exec a
42 | !print f
43 | !print f()
44 | !reload(backend)
45 | =>> function t=foo(a) \\
46 | ... t=123
47 | !exec foo(3)
48 | """
49 | try:
50 | opts, args = getopt.gnu_getopt(sys.argv[1:],
51 | "d:ho:vVsr:S:X:",
52 | [
53 | "dot=",
54 | "exclude=",
55 | "help",
56 | "syntax-errors=",
57 | "output=",
58 | "runtime=",
59 | "strict",
60 | "verbose",
61 | "version",
62 | ])
63 | except getopt.GetoptError, err:
64 | # print help information and exit:
65 | print str(err) # will print something like "option -a not recognized"
66 | usage()
67 | sys.exit(2)
68 |
69 | exclude_list = []
70 | output = None
71 | strict = 0
72 | dot = None
73 | runtime = []
74 |
75 | for o, a in opts:
76 | if o in ("-r", "--runtime"):
77 | runtime += [a]
78 | elif o in ("-s", "--strict"):
79 | strict = 1
80 | elif o in ("-S", "--syntax-errors"):
81 | options.syntax_errors += a.split(",")
82 | elif o in ("-d", "--dot"):
83 | dot = re.compile(a)
84 | elif o in ("-X", "--exclude"):
85 | exclude_list += [ "%s.m" % b for b in a.split(",")]
86 | elif o in ("-v", "--verbose"):
87 | options.verbose += 1
88 | elif o in ("-V", "--version"):
89 | print "SMOP compiler version " + __version__
90 | sys.exit()
91 | elif o in ("-h", "--help"):
92 | usage()
93 | sys.exit()
94 | elif o in ("-o", "--output"):
95 | output = a
96 | else:
97 | assert False, "unhandled option"
98 |
99 | """
100 | if not args:
101 | usage()
102 | sys.exit()
103 | """
104 | if not args:
105 | symtab = {}
106 | print "? for help"
107 | while 1:
108 | try:
109 | buf = raw_input("octave: ")
110 | if not buf:
111 | continue
112 | while buf[-1] == "\\":
113 | buf = buf[:-1] + "\n" + raw_input("... ")
114 | if buf[0] == '?':
115 | print main.__doc__
116 | continue
117 | if buf[0] == "!":
118 | try:
119 | exec buf[1:]
120 | except Exception as ex:
121 | print ex
122 | continue
123 | t = parse.parse(buf if buf[-1]=='\n' else buf+'\n')
124 | if not t:
125 | continue
126 | print "t=", repr(t)
127 | print 60*"-"
128 | resolve.resolve(t,symtab)
129 | print "t=", repr(t)
130 | print 60*"-"
131 | print "symtab:",symtab
132 | s = backend.backend(t)
133 | print "python:",s.strip()
134 | try:
135 | print eval(s)
136 | except SyntaxError:
137 | exec s
138 | except EOFError:
139 | return
140 | except Exception as ex:
141 | print ex
142 |
143 | if not output:
144 | output = "a.py"
145 | fp = open(output,"w") if output != "-" else sys.stdout
146 | print >> fp, "# Autogenerated with SMOP version " + __version__
147 | print >> fp, "# " + " ".join(sys.argv)
148 | print >> fp, "from __future__ import division"
149 | for a in runtime:
150 | print >> fp, "from %s import *" % a
151 |
152 | for pattern in args:
153 | for filename in glob.glob(os.path.expanduser(pattern)):
154 | if not filename.endswith(".m"):
155 | print "\tIngored file: '%s'" % filename
156 | continue
157 | if os.path.basename(filename) in exclude_list:
158 | print "\tExcluded file: '%s'" % filename
159 | continue
160 | if options.verbose:
161 | print filename
162 | buf = open(filename).read().replace("\r\n","\n")
163 | func_list = parse.parse(buf if buf[-1]=='\n' else buf+'\n',filename)
164 | if not func_list and strict:
165 | sys.exit(-1)
166 |
167 | for func_obj in func_list:
168 | try:
169 | func_name = func_obj.head.ident.name
170 | if options.verbose:
171 | print "\t",func_name
172 | except AttributeError:
173 | if options.verbose:
174 | print "\tJunk ignored"
175 | if strict:
176 | sys.exit(-1)
177 | continue
178 | fp0 = open("parse_"+func_name+".dot","w") if dot and dot.match(func_name) else None
179 | if fp0:
180 | graphviz.graphviz(func_obj,fp0)
181 | if options.do_resolve:
182 | G = resolve.resolve(func_obj)
183 |
184 | for func_obj in func_list:
185 | s = backend.backend(func_obj)
186 | print >> fp, s
187 |
188 | if __name__ == "__main__":
189 | main()
190 |
--------------------------------------------------------------------------------
/smop/main.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/smop/main.pyc
--------------------------------------------------------------------------------
/smop/node.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/smop/node.pyc
--------------------------------------------------------------------------------
/smop/options.py:
--------------------------------------------------------------------------------
1 | subscripts = "square" # "round"
2 |
3 | """Row vectors in Matlab can be expressed in Fortran as
4 | either one or two dimensional arrays"""
5 | row_vector_ndim = 1 # 2
6 |
7 | """Given Matlab code such as [a b c]=size(X) we heuristically
8 | decide that X is 3-dimensional."""
9 | rank_guess_from_size = True
10 |
11 | """Given false(1,n), deduce that n is a scalar"""
12 | rank_backward_propagate=1
13 |
14 | """0=not even constants
15 | 1=char constants supported
16 | """
17 | has_char_constants = 0
18 |
19 | do_allocate = 0
20 | do_resolve = 1
21 | do_rewrite = 0
22 | do_rename = 0 # SSA
23 | do_typeof = 0
24 | do_listing = 0
25 |
26 | debug = False
27 |
28 | uppercase=True # active only with f90
29 |
30 | line_numbering=True #True # uses either # or ! or %
31 |
32 | syntax_errors = []
33 | verbose = 0
34 | filename = ""
35 |
--------------------------------------------------------------------------------
/smop/options.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/smop/options.pyc
--------------------------------------------------------------------------------
/smop/parse.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/smop/parse.pyc
--------------------------------------------------------------------------------
/smop/parsetab.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/smop/parsetab.pyc
--------------------------------------------------------------------------------
/smop/r8_random.m:
--------------------------------------------------------------------------------
1 | function [ r, s1, s2, s3 ] = r8_random ( s1, s2, s3 )
2 |
3 | %*****************************************************************************80
4 | %
5 | %% R8_RANDOM returns a pseudorandom number between 0 and 1.
6 | %
7 | % Discussion:
8 | %
9 | % This function returns a pseudo-random number rectangularly distributed
10 | % between 0 and 1. The cycle length is 6.95E+12. (See page 123
11 | % of Applied Statistics (1984) volume 33), not as claimed in the
12 | % original article.
13 | %
14 | % Licensing:
15 | %
16 | % This code is distributed under the GNU LGPL license.
17 | %
18 | % Modified:
19 | %
20 | % 08 July 2008
21 | %
22 | % Author:
23 | %
24 | % Original FORTRAN77 version by Brian Wichman, David Hill.
25 | % MATLAB version by John Burkardt.
26 | %
27 | % Reference:
28 | %
29 | % Brian Wichman, David Hill,
30 | % Algorithm AS 183: An Efficient and Portable Pseudo-Random
31 | % Number Generator,
32 | % Applied Statistics,
33 | % Volume 31, Number 2, 1982, pages 188-190.
34 | %
35 | % Parameters:
36 | %
37 | % Input, integer S1, S2, S3, three values used as the
38 | % seed for the sequence. These values should be positive
39 | % integers between 1 and 30,000.
40 | %
41 | % Output, real R, the next value in the sequence.
42 | %
43 | % Output, integer S1, S2, S3, updated seed values.
44 | %
45 | s1 = mod ( 171 * s1, 30269 );
46 | s2 = mod ( 172 * s2, 30307 );
47 | s3 = mod ( 170 * s3, 30323 );
48 |
49 | r = mod ( s1 / 30269.0 ...
50 | + s2 / 30307.0 ...
51 | + s3 / 30323.0, 1.0 );
52 |
53 | return
54 | end
55 |
56 |
--------------------------------------------------------------------------------
/smop/rank.py:
--------------------------------------------------------------------------------
1 | import operator
2 |
3 | from constraint import Problem,RecursiveBacktrackingSolver
4 | import networkx as nx
5 | import node
6 | from node import extend
7 | import resolve
8 |
9 | def rank(tree):
10 | @extend(node.number)
11 | def _rank(self):
12 | problem.addVariable(id(self),[0])
13 |
14 | @extend(node.let)
15 | def _rank(self):
16 | if isinstance(self.ret,node.ident):
17 | # plain assignment -- not a field, lhs indexing
18 | vars = [id(self.ret), id(self.args)]
19 | try:
20 | problem.addVariables(vars,range(4))
21 | problem.addConstraint(operator.__eq__,vars)
22 | except ValueError:
23 | pass
24 | else:
25 | # lhs indexing or field
26 | pass
27 |
28 | @extend(node.for_stmt)
29 | def _rank(self):
30 | vars = [id(self.ident), id(self.expr)]
31 | problem.addVariables(vars,range(4))
32 | problem.addConstraint((lambda u,v: u+1==v),vars)
33 |
34 | @extend(node.if_stmt)
35 | def _rank(self):
36 | # could use operator.__not__ instead of lambda expression
37 | problem.addVariable(id(self.cond_expr),range(4))
38 | problem.addConstraint(lambda t: t==0, [id(self.cond_expr)])
39 |
40 | @extend(node.ident)
41 | def _rank(self):
42 | try:
43 | x = id(self)
44 | problem.addVariable(x,range(4))
45 | for other in self.defs:
46 | y = id(other)
47 | try:
48 | problem.addVariable(y,range(4))
49 | except ValueError:
50 | pass
51 | problem.addConstraint(operator.__eq__, [x,y])
52 | except:
53 | print "Ignored ",self
54 | """
55 |
56 | @extend(funcall)
57 | def rank(self,problem):
58 | if not isinstance(self.func_expr,ident):
59 | # In MATLAB, chaining subscripts, such as size(a)(1)
60 | # is not allowed, so only fields and dot expressions
61 | # go here. In Octave, chaining subscripts is allowed,
62 | # and such expressions go here.
63 | return
64 | try:
65 | if defs.degree(self.func_expr):
66 | # If a variable is defined, it is not a function,
67 | # except function handle usages, such as
68 | # foo=@size; foo(17)
69 | # which is not handled properly yet.
70 | x = id(self.func_expr)
71 | n = len(self.args)
72 | problem.addVariable(x,range(4))
73 | problem.addConstraint((lambda u: u>=n),[x])
74 | return
75 | except TypeError: # func_expr is unhashable
76 | # For example [10 20 30](2)
77 | return
78 | except KeyError:
79 | # See tests/clear_margins.m
80 | return
81 | assert getattr(self.func_expr,"name",None)
82 | # So func_expr is an undefined variable, and we understand
83 | # it's a function call -- either builtin or user-defined.
84 | name = self.func_expr.name
85 | # if name not in builtins:
86 | # # User-defined function
87 | # return
88 | # builtins[name](self,problem)
89 | #
90 | #@extend(expr)
91 | #def rank(self,problem):
92 | # try:
93 | # builtins[self.op](self,problem)
94 | # except:
95 | # pass
96 | """
97 |
98 | problem = Problem(RecursiveBacktrackingSolver())
99 | for v in node.postorder(tree):
100 | for u in v:
101 | try:
102 | u._rank()
103 | except AttributeError:
104 | pass
105 | s = problem.getSolution()
106 | if not s:
107 | print "No solutions"
108 | else:
109 | d = set()
110 | #for k in sorted(G.nodes(), key=lambda t: (t.name,t.lexpos)):
111 | for k in node.postorder(tree):
112 | if isinstance(k,node.ident):
113 | print k.name,k.lineno, s.get(id(k),-1)
114 | #if not k.name in d and s[id(k)]:
115 | # print "%s(%d)" % (k.name,s[id(k)])
116 | # d.add(k.name)
117 |
--------------------------------------------------------------------------------
/smop/recipes.py:
--------------------------------------------------------------------------------
1 | __all__ = ['recordtype']
2 |
3 | import sys
4 | from textwrap import dedent
5 | from keyword import iskeyword
6 |
7 |
8 | def recordtype(typename, field_names, verbose=False, **default_kwds):
9 | '''Returns a new class with named fields.
10 |
11 | @keyword field_defaults: A mapping from (a subset of) field names to default
12 | values.
13 | @keyword default: If provided, the default value for all fields without an
14 | explicit default in `field_defaults`.
15 |
16 | >>> Point = recordtype('Point', 'x y', default=0)
17 | >>> Point.__doc__ # docstring for the new class
18 | 'Point(x, y)'
19 | >>> Point() # instantiate with defaults
20 | Point(x=0, y=0)
21 | >>> p = Point(11, y=22) # instantiate with positional args or keywords
22 | >>> p[0] + p.y # accessible by name and index
23 | 33
24 | >>> p.x = 100; p[1] =200 # modifiable by name and index
25 | >>> p
26 | Point(x=100, y=200)
27 | >>> x, y = p # unpack
28 | >>> x, y
29 | (100, 200)
30 | >>> d = p.todict() # convert to a dictionary
31 | >>> d['x']
32 | 100
33 | >>> Point(**d) == p # convert from a dictionary
34 | True
35 | '''
36 | # Parse and validate the field names. Validation serves two purposes,
37 | # generating informative error messages and preventing template injection attacks.
38 | if isinstance(field_names, basestring):
39 | # names separated by whitespace and/or commas
40 | field_names = field_names.replace(',', ' ').split()
41 | field_names = tuple(map(str, field_names))
42 | if not field_names:
43 | raise ValueError('Records must have at least one field')
44 | for name in (typename,) + field_names:
45 | if not min(c.isalnum() or c=='_' for c in name):
46 | raise ValueError('Type names and field names can only contain '
47 | 'alphanumeric characters and underscores: %r' % name)
48 | if iskeyword(name):
49 | raise ValueError('Type names and field names cannot be a keyword: %r'
50 | % name)
51 | if name[0].isdigit():
52 | raise ValueError('Type names and field names cannot start with a '
53 | 'number: %r' % name)
54 | seen_names = set()
55 | for name in field_names:
56 | if name.startswith('_'):
57 | raise ValueError('Field names cannot start with an underscore: %r'
58 | % name)
59 | if name in seen_names:
60 | raise ValueError('Encountered duplicate field name: %r' % name)
61 | seen_names.add(name)
62 | # determine the func_defaults of __init__
63 | field_defaults = default_kwds.pop('field_defaults', {})
64 | if 'default' in default_kwds:
65 | default = default_kwds.pop('default')
66 | init_defaults = tuple(field_defaults.get(f,default) for f in field_names)
67 | elif not field_defaults:
68 | init_defaults = None
69 | else:
70 | default_fields = field_names[-len(field_defaults):]
71 | if set(default_fields) != set(field_defaults):
72 | raise ValueError('Missing default parameter values')
73 | init_defaults = tuple(field_defaults[f] for f in default_fields)
74 | if default_kwds:
75 | raise ValueError('Invalid keyword arguments: %s' % default_kwds)
76 | # Create and fill-in the class template
77 | numfields = len(field_names)
78 | argtxt = ', '.join(field_names)
79 | reprtxt = ', '.join('%s=%%r' % f for f in field_names)
80 | dicttxt = ', '.join('%r: self.%s' % (f,f) for f in field_names)
81 | tupletxt = repr(tuple('self.%s' % f for f in field_names)).replace("'",'')
82 | inittxt = '; '.join('self.%s=%s' % (f,f) for f in field_names)
83 | itertxt = '; '.join('yield self.%s' % f for f in field_names)
84 | eqtxt = ' and '.join('self.%s==other.%s' % (f,f) for f in field_names)
85 | template = dedent('''
86 | class %(typename)s(object):
87 | '%(typename)s(%(argtxt)s)'
88 |
89 | __slots__ = %(field_names)r
90 |
91 | def __init__(self, %(argtxt)s):
92 | %(inittxt)s
93 |
94 | def __len__(self):
95 | return %(numfields)d
96 |
97 | def __iter__(self):
98 | %(itertxt)s
99 |
100 | def __getitem__(self, index):
101 | return getattr(self, self.__slots__[index])
102 |
103 | def __setitem__(self, index, value):
104 | return setattr(self, self.__slots__[index], value)
105 |
106 | def todict(self):
107 | 'Return a new dict which maps field names to their values'
108 | return {%(dicttxt)s}
109 |
110 | def __repr__(self):
111 | return '%(typename)s(%(reprtxt)s)' %% %(tupletxt)s
112 |
113 | def __eq__(self, other):
114 | return isinstance(other, self.__class__) and %(eqtxt)s
115 |
116 | def __ne__(self, other):
117 | return not self==other
118 |
119 | def __getstate__(self):
120 | return %(tupletxt)s
121 |
122 | def __setstate__(self, state):
123 | %(tupletxt)s = state
124 | ''') % locals()
125 | # Execute the template string in a temporary namespace
126 | namespace = {}
127 | try:
128 | exec template in namespace
129 | if verbose: print template
130 | except SyntaxError, e:
131 | raise SyntaxError(e.message + ':\n' + template)
132 | cls = namespace[typename]
133 | cls.__init__.im_func.func_defaults = init_defaults
134 | # For pickling to work, the __module__ variable needs to be set to the frame
135 | # where the named tuple is created. Bypass this step in enviroments where
136 | # sys._getframe is not defined (Jython for example).
137 | if hasattr(sys, '_getframe') and sys.platform != 'cli':
138 | cls.__module__ = sys._getframe(1).f_globals['__name__']
139 | return cls
140 |
141 |
142 | if __name__ == '__main__':
143 | import doctest
144 | TestResults = recordtype('TestResults', 'failed, attempted')
145 | print TestResults(*doctest.testmod())
146 |
--------------------------------------------------------------------------------
/smop/recipes.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/smop/recipes.pyc
--------------------------------------------------------------------------------
/smop/resolve.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/smop/resolve.pyc
--------------------------------------------------------------------------------
/smop/rewrite.py:
--------------------------------------------------------------------------------
1 | """
2 | SMOP compiler -- Simple Matlab/Octave to Python compiler
3 | Copyright 2011-2013 Victor Leikehman
4 | """
5 |
6 | import inspect
7 | import sys
8 | import node
9 | from node import extend
10 |
11 | import options,parse
12 |
13 | ZERO = node.number(0)
14 |
15 | def rewrite(t):
16 | global modified; modified = []
17 | while do_rewrite(t) or modified:
18 | modified = []
19 |
20 | def do_rewrite(t):
21 | for u in node.postorder(t):
22 | try:
23 | u._rewrite()
24 | except:
25 | assert 0
26 |
27 | def lineno():
28 | """Returns the current line number in our program."""
29 | return inspect.currentframe().f_back.f_lineno
30 |
31 | @extend(node.node)
32 | def _rewrite(self):
33 | pass
34 |
35 | @extend(node.sum)
36 | @extend(node.min)
37 | @extend(node.max)
38 | def _rewrite(self):
39 | """max(foo[:]) --> max(foo)
40 | """
41 | cls = self.__class__
42 | if (self.args[0].__class__ is node.arrayref
43 | and self.args[0].args[0].__class__ is node.expr
44 | and self.args[0].args[0].op == "::"
45 | and len(self.args[0].args[0].args) == 0):
46 |
47 | self.become(cls(self.args[0].func_expr))
48 |
49 | # """
50 | # @extend(node.false)
51 | # @extend(node.inf)
52 | # @extend(node.ones)
53 | # @extend(node.true)
54 | # @extend(node.zeros)
55 | # def _rewrite(self):
56 | # if self.__class__ == node.false:
57 | # v = node.logical(0)
58 | # elif self.__class__ == node.true:
59 | # v = node.logical(1)
60 | # elif self.__class__ == node.zeros:
61 | # v = node.number(0)
62 | # elif self.__class__ == node.ones:
63 | # v = node.number(1)
64 | # elif self.__class__ == node.inf:
65 | # v = node.number(0) # FIXME FIXME FIXme
66 | # s = self._shape()
67 | # if not s:
68 | # self.become(v)
69 | # modified.append(lineno())
70 | # return
71 |
72 | # if options.row_vector_ndim == 1 and s and s[0].__class__ == node.number:
73 | # del s[0]
74 |
75 | # self.become(node.RESHAPE(node.matrix(v),
76 | # node.matrix(*s),
77 | # node.matrix(v)))
78 | # modified.append(lineno())
79 |
80 |
81 |
82 | # # elemental monadic
83 | # @extend(node.isnan)
84 | # @extend(node.sign)
85 | # def _rewrite(self):
86 | # cls = getattr(node,self.__class__.__name__.upper(),None)
87 | # assert issubclass(cls,node.builtins)
88 | # self.become(cls(self.args[0]))
89 | # modified.append(lineno())
90 |
91 |
92 | # @extend(node.rand)
93 | # def _rewrite(self):
94 | # # r = rand(n)
95 | # # r = rand(m,n)
96 | # # r = rand([m,n])
97 | # # r = rand(m,n,p,...)
98 | # # r = rand([m,n,p,...])
99 | # # r = rand
100 | # # r = rand(size(A))
101 | # # r = rand(..., 'double')
102 | # # r = rand(..., 'single')
103 |
104 | # cls = getattr(node,self.__class__.__name__.upper(),None)
105 | # assert issubclass(cls,node.builtins)
106 | # self.become(cls(*self.args))
107 | # modified.append(lineno())
108 |
109 | # @extend(node.length)
110 | # def _rewrite(self):
111 | # if not self.args[0]._shape():
112 | # self.become(ONE)
113 | # modified.append(lineno())
114 | # else:
115 | # self.become(node.MAXVAL(node.SHAPE(self.args[0])))
116 | # modified.append(lineno())
117 |
118 | # @extend(node.ceil)
119 | # def _rewrite(self):
120 | # self.become(node.CEILING(self.args[0]))
121 | # modified.append(lineno())
122 |
123 | # @extend(node.mod)
124 | # def _rewrite(self):
125 | # self.become(node.MODULO(self.args[0],self.args[1]))
126 | # modified.append(lineno())
127 |
128 | # @extend(node.func_decl)
129 | # def _rewrite(self):
130 | # for s in self.args:
131 | # if s.__class__ == node.ident:
132 | # s.__class__ = node.param
133 | # modified.append(lineno())
134 |
135 | # # @extend(node.stmt_list)
136 | # # @extend(node.global_list)
137 | # # @extend(node.concat_list)
138 | # # @extend(node.expr_list)
139 | # # def _rewrite(self):
140 | # # for expr in self:
141 | # # expr._rewrite()
142 |
143 | # class LET(node.let): # final fortran version of assignment
144 | # def _rewrite(self):
145 | # pass
146 |
147 | # @extend(node.let)
148 | # def _rewrite(self):
149 | # global debug_variables
150 |
151 | # if self.ret.__class__ == node.ident:
152 | # if self.ret.name in debug_variables:
153 | # pdb.set_trace()
154 | # try:
155 | # self.ret._t = self.args._type()
156 | # self.ret._r = self.args._rank()
157 | # self.ret._s = node.expr_list() if self.ret._r == 0 else self.args._shape()
158 | # assert isinstance(self.ret._s,node.expr_list)
159 | # #assert all([not d._shape() for d in self.ret._s])
160 | # for d in self.ret._s:
161 | # assert not d._shape()
162 |
163 | # ndims = len(self.ret._s)
164 | # if ndims:
165 | # #dim_list = node.expr_list([node.arrayref(self.ret, node.number(i+1)) for i in range(len(self.ret._s))]),
166 | # new_self = node.stmt_list([node.allocate_stmt(self.ret, self.ret._s),
167 | # LET(self.ret,self.args)])
168 | # logging.debug("%s ==> %s" % (self,new_self))
169 | # self.become(new_self)
170 | # modified.append(lineno())
171 | # except:
172 | # logging.exception("")
173 | # pass
174 |
175 |
176 | # # @extend(node.func_decl)
177 | # # def _rewrite(self):
178 | # # #We dont know a priori type and rank
179 | # # #of function arguments. We do know that
180 | # # #to get their size we use SIZE(args[i])
181 | # #
182 | # # for a in self.args:
183 | # # if a.__class__ == node.ident:
184 | # # a.__class__ = node.param
185 |
186 | # @extend(node.call_stmt)
187 | # def _rewrite(self):
188 | # if (self.func_expr.__class__ == node.ident and
189 | # self.func_expr.name == "size" and
190 | # self.func_expr.defs == set()):
191 | # self.become(node.stmt_list([node.let(self.ret[0],node.SIZE(self.args,ONE)),
192 | # node.let(self.ret[1],node.SIZE(self.args,TWO))]))
193 | # modified.append(lineno())
194 | # elif (self.func_expr.__class__ == node.ident and
195 | # self.func_expr.name == "find" and
196 | # self.func_expr.defs == set()):
197 | # self.func_expr.name = "FIND"
198 | # self.ret[0]._s = node.expr_list()
199 | # self.ret[1]._s = node.expr_list()
200 | # modified.append(lineno())
201 |
202 | # @extend(node.arrayref)
203 | # def _rewrite(self):
204 | # pass
205 |
206 |
207 | # @extend(node.funcall)
208 | # def _rewrite(self):
209 | # if self.func_expr.__class__ == node.ident:
210 | # # Convert funcall nodes to array references.
211 | # if self.func_expr.defs:
212 | # self.__class__ = node.arrayref
213 | # modified.append(lineno())
214 | # elif self.func_expr.defs == set():
215 | # # Convert recognized builtin functions to builtin nodes.
216 | # cls = getattr(node,self.func_expr.name,None)
217 | # if cls and issubclass(cls,node.builtins) and self.__class__ != cls:
218 | # logging.debug("%s ==> %s",self.__class__, cls)
219 | # self.__class__ = cls
220 | # modified.append(lineno())
221 | # else:
222 | # pass
223 | # #raise Exception("'%s' used but not defined" %
224 | # # self.func_expr.name)
225 |
226 |
227 |
228 |
229 | # @extend(node.matrix)
230 | # def _rewrite(self):
231 | # pass
232 |
233 |
234 |
235 |
--------------------------------------------------------------------------------
/smop/solver.m:
--------------------------------------------------------------------------------
1 | function mv = solver(ai,af,w)
2 | rand(1,2,3);
3 | %
4 | % Copyright 2004 The MathWorks, Inc.
5 |
6 | nBlocks = max(ai(:));
7 | [m,n] = size(ai);
8 |
9 | % Make increment tables
10 | % N=1, E=2, S=3, W=4
11 | I = [0 1 0 -1];
12 | J = [1 0 -1 0];
13 |
14 | a = ai;
15 | mv = [];
16 |
17 | while ~isequal(af,a)
18 |
19 | % Pick a random block
20 | bid = ceil(rand()*nBlocks);
21 | [i,j] = find(a==bid);
22 |
23 | % Move it in a random direction
24 | r = ceil(rand()*4);
25 | ni = i + I(r);
26 | nj = j + J(r);
27 |
28 | % Is it a legal move? Check edges
29 | if (ni<1) || (ni>m) || (nj<1) || (nj>n)
30 | continue
31 | end
32 |
33 | % Is it a legal move? Check for collision
34 | if a(ni,nj)>0
35 | continue
36 | end
37 |
38 | % Check distance
39 | % Get the target location
40 | [ti,tj] = find(af==bid);
41 | d = (ti-i)^2 + (tj-j)^2;
42 | dn = (ti-ni)^2 + (tj-nj)^2;
43 | % Have we moved closer to the target location?
44 | if (d0.05)
45 | continue
46 | end
47 |
48 | % Move the block
49 | a(ni,nj) = bid;
50 | a(i,j) = 0;
51 |
52 | % Record the move
53 | mv(end+1,[1 2]) = [bid r];
54 | end
55 | end
56 |
57 | function r = rand(varargin)
58 | global s1 s2 s3
59 | if nargin != 0
60 | r=0;
61 | s1=varargin{1};
62 | s2=varargin{2};
63 | s3=varargin{3};
64 | else
65 | [r,s1,s2,s3] = r8_random(s1,s2,s3);
66 | end
67 | end
68 |
69 |
--------------------------------------------------------------------------------
/smop/solver.py:
--------------------------------------------------------------------------------
1 | from __future__ import division
2 | import pstats,cProfile
3 | import numpy,time
4 | import solver as a
5 | from core import *
6 |
7 | # Autogenerated with SMOP version 0.1.1
8 | # main.py solver.m
9 | def solver(ai=None,af=None,w=None,*args,**kwargs):
10 | nargout = kwargs["nargout"] if kwargs else None
11 | varargin = cellarray(args)
12 | nargin = 3-[ai,af,w].count(None)+len(args)
13 |
14 | rand(1,2,3)
15 | nBlocks=max(ai[:])
16 | m,n=size(ai,nargout=2)
17 | I=matlabarray(cat(0,1,0,- 1))
18 | J=matlabarray(cat(1,0,- 1,0))
19 | a=copy(ai)
20 | mv=matlabarray([])
21 | while not isequal(af,a):
22 |
23 | bid=ceil(rand() * nBlocks)
24 | i,j=find(a == bid,nargout=2)
25 | r=ceil(rand() * 4)
26 | ni=i + I[r]
27 | nj=j + J[r]
28 | if (ni < 1) or (ni > m) or (nj < 1) or (nj > n):
29 | continue
30 | if a[ni,nj] > 0:
31 | continue
32 | ti,tj=find(af == bid,nargout=2)
33 | d=(ti - i) ** 2 + (tj - j) ** 2
34 | dn=(ti - ni) ** 2 + (tj - nj) ** 2
35 | if (d < dn) and (rand() > 0.05):
36 | continue
37 | a[ni,nj]=bid
38 | a[i,j]=0
39 | mv[end() + 1,cat(1,2)]=cat(bid,r)
40 |
41 | return mv
42 | def rand(*args,**kwargs):
43 | nargout = kwargs["nargout"] if kwargs else None
44 | varargin = cellarray(args)
45 | nargin = 0-[].count(None)+len(args)
46 |
47 | global s1,s2,s3
48 | if nargin != 0:
49 | r=0
50 | s1=varargin[1]
51 | s2=varargin[2]
52 | s3=varargin[3]
53 | else:
54 | r,s1,s2,s3=r8_random(s1,s2,s3,nargout=4)
55 | return r
56 |
--------------------------------------------------------------------------------
/smop/solver.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/smop/solver.pyc
--------------------------------------------------------------------------------
/smop/solver.pyx:
--------------------------------------------------------------------------------
1 | # cython: profile=True
2 | from __future__ import division
3 | import numpy as np
4 | cimport numpy as np
5 | from runtime import *
6 |
7 | cdef extern from "math.h":
8 | double ceil(double a)
9 |
10 | cdef double s1 = 1.0
11 | cdef double s2 = 2.0
12 | cdef double s3 = 3.0
13 |
14 | cdef double r8_random():
15 | global s1, s2, s3
16 | s1 = 171 * s1 % 30269
17 | s2 = 172 * s2 % 30307
18 | s3 = 170 * s3 % 30323
19 | return (s1 / 30269.0 + s2 / 30307.0 + s3 / 30323.0) % 1.0
20 |
21 |
22 | #def find(np.ndarray a):
23 | # cdef int i,j
24 | # cdef int m = a.shape[0]
25 | # cdef int n = a.shape[1]
26 | #
27 | # for i in range(m):
28 | # for j in range(n):
29 | # if a[i,j]:
30 | # return i+1,j+1
31 | # else:
32 | # raise ValueError
33 |
34 | cdef tuple find(np.ndarray[char,ndim=2,cast=True] a): # 38
35 | cdef int i,j,k
36 | cdef char* p = a.data
37 | for k in range(a.size):
38 | if p[k]:
39 | i,j = np.unravel_index(k, (a.shape[0],a.shape[1]), order="F")
40 | return i+1,j+1
41 | raise ValueError
42 |
43 | #cdef tuple find(np.ndarray a): # 50
44 | # cdef int k=0
45 | # s = (a.shape[0],a.shape[1])
46 | # for x in np.nditer(a):
47 | # if x:
48 | # i,j = np.unravel_index(k,s,order="F")
49 | # return i+1,j+1
50 | # k = k+1
51 | # raise ValueError
52 |
53 | def solver_(np.ndarray ai,
54 | np.ndarray af,
55 | int w,
56 | int nargout=1):
57 | cdef int nBlocks,m,n,i,j,r,ni,nj,ti,tj,d,dn
58 | cdef np.ndarray mv
59 | cdef int bid
60 | #rand_(1,2,3)
61 | nBlocks=max_(ai[:])
62 | m,n=size_(ai,nargout=2)
63 | cdef np.ndarray I = matlabarray([0,1,0,- 1])
64 | cdef np.ndarray J = matlabarray([1,0,- 1,0])
65 | cdef np.ndarray a = copy_(ai)
66 | mv=matlabarray([])
67 | while not isequal_(af,a):
68 |
69 | bid = int(ceil(r8_random() * nBlocks))
70 | i,j=find(a == bid)
71 | r=int(ceil(r8_random() * 4))
72 | ni=i + I[r]
73 | nj=j + J[r]
74 | if (ni < 1) or (ni > m) or (nj < 1) or (nj > n):
75 | continue
76 | if a[ni,nj] > 0:
77 | continue
78 | ti,tj=find(af == bid)
79 | d=(ti - i) ** 2 + (tj - j) ** 2
80 | dn=(ti - ni) ** 2 + (tj - nj) ** 2
81 | if (d < dn) and (r8_random() > 0.05):
82 | continue
83 | a[ni,nj]=bid
84 | a[i,j]=0
85 | mv[mv.shape[0] + 1,[1,2]]=[bid,r]
86 |
87 | return mv
88 |
--------------------------------------------------------------------------------
/smop/sparsearray.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import sys
3 |
4 | class sparsearray(dict):
5 | def __init__(self,input_array=[[]]):
6 | #import pdb;pdb.set_trace()
7 | dict.__init__(self)
8 | a = np.atleast_2d(input_array)
9 | self.dtype = a.dtype
10 | self.ndim = a.ndim
11 | self._shape = None
12 | if not a.size:
13 | return
14 | it = np.nditer(a, flags=['multi_index'])
15 | while not it.finished:
16 | index = tuple([i+1 for i in it.multi_index])
17 | self.setdefault(index,it[0].item())
18 | it.iternext()
19 |
20 | @property
21 | def shape(self):
22 | #import pdb;pdb.set_trace()
23 | if self._shape is None:
24 | s = [0] * self.ndim
25 | for key in self.keys():
26 | for i,k in enumerate(key):
27 | s[i] = max(k,s[i])
28 | self._shape = tuple(s)
29 | return self._shape
30 |
31 | def todense(self):
32 | a = np.zeros(self.shape,dtype=self.dtype)
33 | for key,value in self.iteritems():
34 | key = tuple([i-1 for i in key])
35 | a.__setitem__(key,value)
36 | return a
37 |
38 | def __str__(self):
39 | return str(self.todense())
40 |
41 | def __repr__(self):
42 | return repr(self.todense())
43 |
44 | def copy(self):
45 | #return copy.copy(self)
46 | return self.todense()
47 |
48 | def __setitem__(self,index,value):
49 | if np.isscalar(value):
50 | for key in self.iterkeys(index):
51 | dict.__setitem__(self,key,value)
52 | self._shape = None
53 | else:
54 | raise NotImplementedError
55 |
56 | def __getslice__(self,i,j):
57 | if j == sys.maxint:
58 | j = None
59 | return self.__getitem__(slice(i,j,None))
60 |
61 | def __getitem__(self,index):
62 | try:
63 | #a = [dict.__getitem__(self,key) for key in self.iterkeys(index)]
64 | a = [self.get(key,0) for key in self.iterkeys(index)]
65 | if len(a) == 1:
66 | return a[0]
67 | except ValueError:
68 | raise IndexError # out of bound rhs indexing
69 | #return a
70 | #return sparsearray([a])
71 | return np.array(a)
72 |
73 | def iterkeys(self,index):
74 | #import pdb; pdb.set_trace()
75 | if not isinstance(index,tuple) and self.shape[0] == 1:
76 | index = (1,index)
77 | if isinstance(index, int):
78 | key = np.unravel_index(index-1, self.shape, order='F')
79 | yield tuple(k+1 for k in key)
80 | elif isinstance(index,slice):
81 | index = range((index.start or 1)-1,
82 | index.stop or np.prod(self.shape),
83 | index.step or 1)
84 | for key in np.transpose(np.unravel_index(index, self.shape, order='F')): # 0-based
85 | yield tuple(k+1 for k in key)
86 | elif isinstance(index,(list,np.ndarray)):
87 | index = np.asarray(index)-1
88 | for key in np.transpose(np.unravel_index(index, self.shape, order='F')):
89 | yield tuple(k+1 for k in key)
90 | else:
91 | assert isinstance(index,tuple),index.__class__
92 | indices = [] # 1-based
93 | for i,ix in enumerate(index):
94 | if isinstance(ix,slice):
95 | indices.append(np.arange((ix.start or 1),
96 | (ix.stop or self.shape[i]) + 1,
97 | ix.step or 1,
98 | dtype=int))
99 | else:
100 | indices.append(np.asarray(ix))
101 | assert len(index) == 2
102 | indices[0].shape = (-1,1)
103 | for key in np.broadcast(*indices):
104 | yield tuple(map(int,key))
105 |
106 |
--------------------------------------------------------------------------------
/smop/test_core.py:
--------------------------------------------------------------------------------
1 | # SMOP compiler runtime support library
2 | # Copyright 2014 Victor Leikehman
3 |
4 | # MIT license
5 | import numpy as np
6 | import unittest
7 | from core import *
8 |
9 | class Getitem(unittest.TestCase):
10 | def setUp(self):
11 | self.a = matlabarray(arange(1,20).reshape(4,5,order="F"))
12 | # 2-dim
13 | def test01(self):
14 | self.assertEqual(self.a[1,1],1)
15 |
16 | def test02(self):
17 | self.assertEqual(self.a[4,5],20)
18 |
19 | def test03(self):
20 | self.assertTrue(isequal(self.a[:,:],self.a))
21 |
22 | # 1-dim
23 | def test04(self):
24 | a = matlabarray(arange(1,20).reshape(4,5,order="F"))
25 | aa = a[:]
26 | bb = matlabarray(arange(1,20).reshape(-1,1,order="F"))
27 | self.assertTrue(isequal(aa,bb))
28 |
29 | def test05(self):
30 | #import pdb; pdb.set_trace()
31 | z = [[11,22,33,44],
32 | [11,22,33,44],
33 | [11,22,33,44],
34 | [11,22,33,44]]
35 | a = matlabarray([11,22,33,44], dtype=int)
36 | self.assertTrue(isequal(a[ [1,1,1,1] , 1:4], matlabarray(z)))
37 | self.assertTrue(isequal(a[ [1,1,1,1], : ], matlabarray(z)))
38 | #self.assertTrue(isequal(a[ [[1,1,1,1]], 1:4], matlabarray([z])))
39 |
40 | def test06(self):
41 | a=copy(0)
42 | a[6]=666
43 | self.assertTrue(isequal(a, [[0.,0.,0.,0.,0.,666.]]))
44 |
45 | class Expand(unittest.TestCase):
46 | """
47 | Expand on index error
48 | """
49 | def setUp(self):
50 | self.a = matlabarray(zeros(1,4))
51 |
52 | def test01(self):
53 | self.a[1,5]=1
54 | self.assertTrue(isequal(self.a,
55 | matlabarray([[0,0,0,0,1]])))
56 |
57 | def test02(self):
58 | #with self.assertRaises(IndexError):
59 | a = matlabarray(zeros(1,4))
60 | a[5]=1 # single index
61 | self.assertTrue(isequal(a,
62 | matlabarray([[0,0,0,0,1]])))
63 |
64 | #@unittest.skip("")
65 | def test03(self):
66 | a=zeros(1,4)
67 | a[1:10:4]=1
68 | "[[ 1. 0. 0. 0. 1. 0. 0. 0. 1. 0.]]"
69 |
70 | #@unittest.skip("")
71 | def test04(self):
72 | a=zeros(1,4)
73 | with self.assertRaises(IndexError):
74 | a[5,5]=1
75 | b = matlabarray(
76 | [[0,0,0,0,0],
77 | [0,0,0,0,0],
78 | [0,0,0,0,0],
79 | [0,0,0,0,0],
80 | [0,0,0,0,1]])
81 | self.assertTrue(isequal(a,b))
82 |
83 | class Strread(unittest.TestCase):
84 | def test01(self):
85 | a = strread("0.11 0.22 0.33")
86 | self.assertTrue(isequal(a,[[0.11,0.22,0.33]]))
87 |
88 | def test02(self):
89 | a,b,c = strread("0.11 0.22 0.33",nargout=3)
90 | self.assertEqual(a,0.11)
91 | self.assertEqual(b,0.22)
92 | self.assertEqual(c,0.33)
93 |
94 | class Core(unittest.TestCase):
95 | def setUp(self):
96 | self.a = arange(1,10).reshape(2,5,order="F")
97 |
98 | def test01(self):
99 | b = abs(-self.a)
100 | self.assertTrue(isequal(self.a,b))
101 |
102 | def test02(self):
103 | b = ceil(self.a)
104 | self.assertTrue(isequal(self.a,b))
105 |
106 | def test03(self):
107 | b = false(2,3)
108 | self.assertTrue(isequal(size(b), [[2,3]]))
109 |
110 | def test_zeros(self):
111 | self.assertEqual(zeros(), 0.0)
112 | self.assertTrue(isequal(zeros(2), zeros(2,2)))
113 | self.assertTrue(isequal(zeros(2,2), zeros(2,2)))
114 |
115 |
116 | #class Copy(unittest.TestCase):
117 | # def setUp(self):
118 | # self.a = zeros(1,4)
119 | #
120 | # def test01(self):
121 | # b=self.a.copy()
122 | # print self.a
123 | # print b
124 | # self.assertTrue(isequal(self.a,b))
125 | # b[1,1]=123
126 | # self.assertTrue(not isequal(self.a,b))
127 | # #c=b.copy()
128 | # #c[1,:]=1
129 | # #self.assertTrue(not isequal(b,c))
130 |
131 | if __name__ == "__main__":
132 | unittest.main()
133 |
--------------------------------------------------------------------------------
/smop/test_lexer.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import lexer
3 |
4 | class TestLexer(unittest.TestCase):
5 | def setUp(self):
6 | self.lexer = lexer.new()
7 |
8 | def test010(self):
9 | """Reserved words are not reserved as field names"""
10 | self.lexer.input("for foo.for")
11 | t = [tok.type for tok in self.lexer]
12 | u = ["FOR","IDENT","FIELD"]
13 | self.assertEqual(t,u)
14 |
15 | def test020(self):
16 | """Fields are recognized beyond continuation lines"""
17 | self.lexer.input("foo ... hello \n\t .\t ... world\n \t ... \n bar")
18 | t = [tok.type for tok in self.lexer]
19 | u = ["IDENT","FIELD"]
20 | self.assertEqual(t,u)
21 |
22 | ### commands are now recognized in the parser
23 | # def test_t03(self):
24 | # """Name following another name is a command. Names might be
25 | # separated by whitespace, but must be on the same line . Once
26 | # we detect a command, every name (including keywords) is
27 | # reported as a string.
28 | # """
29 | # self.lexer.input("foo bar for")
30 | # t = [tok.type for tok in self.lexer]
31 | # u = ["COMMAND","STRING","STRING"]
32 | # self.assertEqual(t,u)
33 |
34 | # def test_t03a(self):
35 | # self.lexer.input("dir '/windows'")
36 | # t = [tok.type for tok in self.lexer]
37 | # u = ["COMMAND","STRING"]
38 | # self.assertEqual(t,u)
39 |
40 | def test030(self):
41 | self.lexer.input("for bar =")
42 | t = [tok.type for tok in self.lexer]
43 | u = ["FOR", "IDENT", "EQ"]
44 | self.assertEqual(t,u)
45 |
46 | def test040(self):
47 | "Ignore anything from ... to end of line"
48 | self.lexer.input("foo ... hello world \n bar")
49 | # line continuation
50 | t = [tok.type for tok in self.lexer]
51 | u = ["IDENT", "IDENT"]
52 | self.assertEqual(t,u)
53 |
54 | def test042(self):
55 | "backslash-newline continues a line"
56 | s = "foo \\\n bar ... hello\n bzz"
57 | #print 43,`s`
58 | #print '#', s.count('\n')
59 | self.lexer.input(s)
60 | # line continuation
61 | t = [tok.type for tok in self.lexer]
62 | u = ["IDENT", "IDENT", "IDENT"]
63 | self.assertEqual(t,u)
64 |
65 | def test044(self):
66 | "backslash-newline continues a line"
67 | self.lexer.input(r'puts ("hel\"zvvzcvnzvzlozz")')
68 | # line continuation
69 | t = [tok.type for tok in self.lexer]
70 | u = ["IDENT", "LPAREN", "STRING","RPAREN"]
71 | self.assertEqual(t,u)
72 |
73 | def test050(self):
74 | self.lexer.input("clear all, figure on")
75 | t = [tok.type for tok in self.lexer]
76 | u = ["IDENT","IDENT","SEMI","IDENT","IDENT"]
77 | self.assertEqual(t,u)
78 |
79 | def test060(self):
80 | self.lexer.input("dir '.', for i=foo")
81 | t = [tok.type for tok in self.lexer]
82 | u = ["IDENT","STRING","SEMI","FOR","IDENT","EQ","IDENT"]
83 | self.assertEqual(t,u)
84 |
85 | def test070(self):
86 | """Line continuation is allowed between command arguments,
87 | but not between the command and the first argument."""
88 | self.lexer.input("dir 'hello' ... adfadfsda \n fooobar")
89 | t = [tok.type for tok in self.lexer]
90 | u = ["IDENT","STRING","IDENT"]
91 | self.assertEqual(t,u)
92 |
93 | def test080(self):
94 | """Terms may start and end with a dot, when a floating
95 | point number starts or ends with a decimal point"""
96 | self.lexer.input("[1. .2]")
97 | t = [tok.type for tok in self.lexer]
98 | u = ["LBRACKET","NUMBER","COMMA","NUMBER","RBRACKET"]
99 | self.assertEqual(t,u)
100 |
101 | def test090(self):
102 | """Ops that start with a dot (./ etc) should not
103 | be understood as terms"""
104 | self.lexer.input("[1. ./ .2]")
105 | t = [tok.type for tok in self.lexer]
106 | u = ["LBRACKET","NUMBER","DOTDIV","NUMBER","RBRACKET"]
107 | self.assertEqual(t,u)
108 |
109 | def test100(self):
110 | "Complex constants, such as 1i and 1j"
111 | self.lexer.input("1i+1j")
112 | t = [tok.type for tok in self.lexer]
113 | u = ["NUMBER","PLUS","NUMBER"]
114 | self.assertEqual(t,u)
115 |
116 | def test110(self):
117 | "Quotes and backslashes in matlab strings"
118 | self.lexer.input(r"'hello''world'")
119 | tok = self.lexer.next()
120 | self.assertEqual(tok.value,r"hello'world")
121 |
122 | def test112(self):
123 | "Quotes and backslashes in octave strings"
124 | self.lexer.input(r'"hello\"world"')
125 | tok = self.lexer.next()
126 | self.assertEqual(tok.value,r'hello"world')
127 |
128 | def test114(self):
129 | "Quotes and backslashes in octave strings"
130 | self.lexer.input('''"hello\
131 | world"''')
132 | tok = self.lexer.next()
133 | self.assertEqual(tok.value,r'helloworld')
134 |
135 | def test116(self):
136 | "Quotes and backslashes in octave strings"
137 | self.lexer.input(r'"hello\n"')
138 | tok = self.lexer.next()
139 | self.assertEqual(tok.value,'hello\n')
140 |
141 | def test118(self):
142 | "Quotes and backslashes in octave strings"
143 | self.lexer.input(r'"hello\\world"')
144 | tok = self.lexer.next()
145 | self.assertEqual(tok.value,r'hello\world')
146 |
147 | def test119(self):
148 | "Quotes and backslashes in octave strings"
149 | self.lexer.input(r'"hello""world"')
150 | tok = self.lexer.next()
151 | self.assertEqual(tok.value,r'hello"world')
152 |
153 | def test120(self):
154 | "d5ef: cell arrays nested in regular arrays"
155 | self.lexer.input(r"[foo{i} bar]")
156 | t = [tok.type for tok in self.lexer]
157 | u = ["LBRACKET","IDENT","LBRACE","IDENT","RBRACE","COMMA","IDENT","RBRACKET"]
158 | self.assertEqual(t,u)
159 |
160 | def test130(self):
161 | "d5ef: Another one"
162 | self.lexer.input("[y / (x - x) ]")
163 | t = [tok.type for tok in self.lexer]
164 | u = ["LBRACKET","IDENT","DIV","LPAREN","IDENT","MINUS","IDENT","RPAREN","RBRACKET"]
165 | self.assertEqual(t,u)
166 |
167 | def test140(self):
168 | "quote immediatly following a keyword is always a string"
169 | self.lexer.input("case'abc'")
170 | t = [tok.type for tok in self.lexer]
171 | u = ["CASE","STRING"]
172 | self.assertEqual(t,u)
173 |
174 | def test150(self):
175 | s = "g = (x < 3/8) .* (7/8 * x)\\\n + (x >= 3/8 & x < 3/4) .* (29/24 * x - 1/8)\\\n + (x >= 3/4) .* (7/8 * x + 1/8)"
176 | self.lexer.input(s)
177 | toklist = [tok.type for tok in self.lexer]
178 | self.assertEqual(True, "BACKSLASH" not in toklist)
179 | self.assertEqual(True, "SEMI" not in toklist)
180 |
181 | def test160(self):
182 | "comment must leave a newline"
183 | s = """if (ld > 127) # convert 16 to 8 bit
184 | if (ld < 16384)
185 | """
186 | self.lexer.input(s)
187 | t = [tok.type for tok in self.lexer]
188 | self.assertEqual("SEMI",t[6])
189 |
190 | if __name__ == "__main__":
191 | unittest.main()
192 |
--------------------------------------------------------------------------------
/smop/test_matlabarray.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from core import *
3 |
4 | class test_matlabarray(unittest.TestCase):
5 | """Expanding matlabarray"""
6 | def test010(self):
7 | """Two-dimensional assignment to []"""
8 | a = matlabarray()
9 | a[a.shape[0]+1,[1,2,3]] = [123,456,789]
10 | a[a.shape[0]+1,[1,2,3]] = [123,456,789]
11 | a[a.shape[0]+1,[1,2,3]] = [123,456,789]
12 | self.assertTrue(isequal (a, [[123,456,789],
13 | [123,456,789],
14 | [123,456,789]]))
15 | def test020(self):
16 | """Two-dimensional assignment to []
17 | Expand, use list [1,2,3] for indexing"""
18 | a = matlabarray()
19 | a[a.shape[0]+1,[1,2,3]] = 123
20 | a[a.shape[0]+1,[1,2,3]] = 123
21 | a[a.shape[0]+1,[1,2,3]] = 123
22 | self.assertTrue(isequal (a, [[123,123,123],
23 | [123,123,123],
24 | [123,123,123]]))
25 |
26 | def test030(self):
27 | """Two-dimensional assignment to []
28 | Expand, use slice 1:3 for indexing"""
29 | a = matlabarray()
30 | #import pdb; pdb.set_trace()
31 | a[a.shape[0]+1,1:3] = 123
32 | a[a.shape[0]+1,1:3] = 123
33 | a[a.shape[0]+1,1:3] = 123
34 | #print a.shape
35 | self.assertTrue(isequal (a, [[123,123,123],
36 | [123,123,123],
37 | [123,123,123]]))
38 | #@unittest.skip("FIXME")
39 | def test040(self):
40 | a = matlabarray()
41 | with self.assertRaises(IndexError):
42 | a[a.shape[0]+1,:] = 123
43 | a[a.shape[0]+1,:] = 123
44 | a[a.shape[0]+1,:] = 123
45 | self.assertTrue(isequal (a, [[123],
46 | [123],
47 | [123]]))
48 | @unittest.skip("wonders of matlab")
49 | def test050(self):
50 | """
51 | Compare to test060
52 |
53 | octave> a=[]
54 | a = []
55 | octave> a(:,:)=99
56 | a = 99
57 | octave> a
58 | a = 99
59 | octave> size(a)
60 | ans =
61 |
62 | 1 1
63 | """
64 | a = matlabarray()
65 | a[:,:] = 99
66 | self.assertTrue(isequal (a.item(0), 99))
67 |
68 | def test060(self):
69 | """One-dimensional assignment to empty array
70 |
71 | octave> a=[]
72 | a = []
73 | octave> a(:)=99
74 | a = []
75 | octave> a
76 | a = []
77 | """
78 | a = matlabarray()
79 | with self.assertRaises(IndexError):
80 | a[:] = 99
81 | self.assertTrue(isempty (a))
82 |
83 | #@unittest.skip("wonders of matlab")
84 | def test062(self):
85 | """One-dimensional assignment to empty array
86 |
87 | octave> a=[]
88 | a = []
89 | octave> a(:)=[1 2 3]
90 | error: A(I) = X: X must have the same size as I
91 | """
92 | a = matlabarray()
93 | with self.assertRaises(Exception):
94 | a[:] = [1,2,3]
95 |
96 | #@unittest.skip("wonders of matlab")
97 | def test064(self):
98 | """One-dimensional assignment to empty array
99 |
100 | octave> a=[]
101 | a = []
102 | octave> a(1:3)=[1 2 3]
103 | 1 2 3
104 | """
105 | a = matlabarray()
106 | a[1:3] = [1,2,3]
107 | self.assertTrue(isequal (a, [1,2,3]))
108 |
109 | def test070(self):
110 | """
111 | wonders of matlab
112 |
113 | octave> c=[]
114 | c = []
115 | octave> c(1:end)=9
116 | c = []
117 | """
118 | a = matlabarray()
119 | a[1:a.shape[0]] = 9
120 | self.assertTrue(isempty (a))
121 |
122 | @unittest.skip("wonders of matlab")
123 | def test080(self):
124 | """
125 | octave> a=[]
126 | a = []
127 | octave> a(1:end,5) = 5
128 | a = [](0x5) % notice 0x5
129 | """
130 |
131 | def test084(self):
132 | """
133 | octave> a=[]
134 | a = []
135 | octave> a(:,5) = 5
136 | a =
137 |
138 | 0 0 0 0 5
139 | """
140 |
141 | def test090(self):
142 | a = matlabarray([[11,22,33]])
143 | a[4] = 44
144 | self.assertTrue(isequal (a,[[11,22,33,44]]))
145 |
146 | def test092(self):
147 | a = matlabarray([[11,22,33,44]])
148 | a[5:7] = [55,66,77]
149 | self.assertTrue(isequal (a,[[11,22,33,44,55,66,77]]))
150 |
151 | def test094(self):
152 | a = matlabarray([[11,22,33,44,55,66,77]])
153 | a[[8,9]] = [88,99]
154 | self.assertTrue(isequal (a,[[11,22,33,44,55,66,77,88,99]]))
155 |
156 | def test100(self):
157 | a = matlabarray([[1,3],
158 | [2,4]])
159 | #a[: , a.shape[1]+1] = [5,6]
160 | a[: , 3] = [5,6]
161 | self.assertTrue(isequal (a,[[1,3,5],
162 | [2,4,6]]))
163 |
164 | def test110(self):
165 | a = zeros (4,4,dtype=int)
166 | a[2:3,2:3] = 1
167 | #print a
168 | self.assertTrue(isequal (a,[[0,0,0,0],
169 | [0,1,1,0],
170 | [0,1,1,0],
171 | [0,0,0,0]]))
172 | if __name__ == "__main__":
173 | unittest.main()
174 | # vim:et:sw=4:si:
175 |
--------------------------------------------------------------------------------
/smop/test_parse.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import parse
3 |
4 | class TestParse(unittest.TestCase):
5 | def test_p03(self):
6 | """Expected failure"""
7 | s = """[1 ; 1; 1 ; ];"""
8 | t = parse.parse(s)
9 | self.assert_(t)
10 |
11 | def test_p04(self):
12 | """Dot has higher precedence than other operations"""
13 | s = "a+b.c.d;"
14 | t = parse.parse(s)
15 | u = [parse.expr_stmt(id=1, expr=[('+', parse.ident(name='a',
16 | lineno=1, lexpos=0),
17 | parse.field(expr=parse.field(expr=parse.ident(name='b',
18 | lineno=1, lexpos=2), ident=parse.ident(name='.c', lineno=1,
19 | lexpos=3)), ident=parse.ident(name='.d', lineno=1, lexpos=5)))])]
20 | self.assertEqual(t,u)
21 |
22 | # def test_p05(self):
23 | # """Iterate over LHS nodes (TBD)"""
24 | # s = "[foo(A.x(B.y)).bar(C.z).bzz,hello.world] =1;"
25 | # t = parse.parse_buf(s)
26 | # u = ["foo",".bar",".bzz","hello",".world"]
27 | # self.assertEqual([v[1] for v in dataflow.lhs(t[1][1])],u)
28 | #
29 | def test_p06(self):
30 | """Cell arrays"""
31 | s = """
32 | {1 ...
33 | 'foo' ...
34 | 'bar' ...
35 | 'bzz'};
36 | """
37 | t = parse.parse(s)
38 | self.assert_(t)
39 |
40 | if __name__ == "__main__":
41 | unittest.main()
42 |
--------------------------------------------------------------------------------
/smop/test_sparsearray.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import unittest
3 | from core import *
4 | from sparsearray import sparsearray
5 |
6 | class Slice(unittest.TestCase):
7 | def setUp(self):
8 | self.a = sparsearray(arange (1,4).reshape(2,2,order="F"))
9 |
10 | def test01(self):
11 | #self.assertTrue(isequal (self.a[1:4],[1,2,3,4]))
12 | self.assertEqual(self.a[1,1], 1)
13 | self.assertEqual(self.a[2,1], 2)
14 | self.assertEqual(self.a[1,2], 3)
15 | self.assertEqual(self.a[2,2], 4)
16 | #
17 | # def test02(self):
18 | # self.a[1,1] = 11
19 | # self.a[2,1] = 22
20 | # self.a[1,2] = 33
21 | # self.a[2,2] = 44
22 | # self.assertTrue(isequal (self.a[1:4],[11,22,33,44]))
23 | #
24 | # def test03(self):
25 | # self.a[1] = 11
26 | # self.a[2] = 22
27 | # self.a[3] = 33
28 | # self.a[4] = 44
29 | # self.assertTrue(isequal (self.a[1:4],[11,22,33,44]))
30 | #
31 | # def test04(self):
32 | # #with self.assertRaises(ValueError):
33 | # self.a[1:4]=[11,22,33,44]
34 | # self.assertTrue(isequal (self.a[1:4],[11,22,33,44]))
35 | #
36 | # def test05(self):
37 | # self.a[:,:]=[[11,33],[22,44]]
38 | # self.assertTrue(isequal (self.a[1:4],[11,22,33,44]))
39 | #
40 | # def test06(self):
41 | # self.a[:]=[11,22,33,44]
42 | # self.assertTrue(isequal (self.a[1:4],[11,22,33,44]))
43 | #
44 | # def test07(self):
45 | # self.a[::3]=[11,44]
46 | # self.assertTrue(isequal (self.a[1:4],[11,2,3,44]))
47 | #
48 | # def test08(self):
49 | # self.a[1:4:3]=[11,44]
50 | # self.assertTrue(isequal (self.a[1:4],[11,2,3,44]))
51 | #
52 | # def test_007(self):
53 | # """
54 | # One-dimensional LHS indexing without expansion,
55 | # using list index.
56 | # """
57 | #
58 | # a[[1,4]]=1
59 | # self.assertEqual(str(a), "[[ 1. 0.]\n [ 0. 1.]]")
60 | #
61 | # @unittest.skip("")
62 | # def test_008(self):
63 | # a=zeros (2,2)
64 | # a[[4,3,2,1]]=[1,2,3,4]
65 | # self.assertEqual(str(a), "[[ 1. 2.]\n [ 3. 4.]]")
66 | #
67 | #
68 | #
69 | # @unittest.skip("")
70 | # def test_010(self):
71 | # a=zeros (2,2)
72 | # a[2,:]=[1,2]
73 | # self.assertEqual(str(a), "[[ 0. 0.]\n [ 1. 2.]]")
74 | #
75 | # @unittest.skip("")
76 | # def test_011(self):
77 | # a=zeros (2,2)
78 | # a[2,1:2]=[1,2]
79 | # self.assertEqual(str(a), "[[ 0. 0.]\n [ 1. 2.]]")
80 | #
81 | # @unittest.skip("")
82 | # def test_012(self):
83 | # a=zeros (2,2)
84 | # a[2,[1,2]]=[1,2]
85 | # self.assertEqual(str(a), "[[ 0. 0.]\n [ 1. 2.]]")
86 | #
87 | # @unittest.skip("")
88 | # def test_013(self):
89 | # a=zeros (2,2)
90 | # a[:,:]=[[1,2],[3,4]]
91 | # self.assertEqual(str(a), "[[ 1. 2.]\n [ 3. 4.]]")
92 | #
93 | # @unittest.skip("")
94 | # def test_014(self):
95 | # a=zeros (2,2)
96 | # with self.assertRaises(ValueError):
97 | # a[:,:]=[1,2,3,4]
98 |
99 | class Sparsearray(unittest.TestCase):
100 | def setUp(self):
101 | self.a = np.arange(1,5).reshape(2,2,order="F")
102 | self.b = sparsearray(self.a)
103 |
104 | def test01(self):
105 | self.assertEqual(self.b[1,1],1)
106 | self.assertEqual(self.b[2,1],2)
107 | self.assertEqual(self.b[1,2],3)
108 | self.assertEqual(self.b[2,2],4)
109 |
110 | class Index(unittest.TestCase):
111 | def setUp(self):
112 | self.a = sparsearray(arange (1,4).reshape(2,2,order="F"))
113 |
114 | def test01(self):
115 | """
116 | One-dimensional LHS indexing of multi-dimensional array,
117 | without expansion. Using scalar index.
118 | """
119 | self.a[4]=9
120 | self.assertTrue(isequal (self.a, sparsearray([[1, 3],[2, 9]])))
121 |
122 | def test02(self):
123 | """
124 | Multi-dimensional LHS indexing, without expansion.
125 | """
126 | self.a[1,1]=11
127 | self.a[2,2]=22
128 | self.a[1,2]=12
129 | self.a[2,1]=21
130 | self.assertTrue(isequal (self.a, sparsearray([[11,12],[21,22]])))
131 |
132 | class Indices(unittest.TestCase):
133 | def setUp(self):
134 | self.a = sparsearray(np.arange(1,5).reshape(2,2,order="F"))
135 | self.c = sparsearray(arange (1,20).reshape(4,5,order="F"))
136 |
137 | def test01(self):
138 | with self.assertRaises(ValueError):
139 | self.a[100]=100
140 | self.a[3,3]=33
141 | self.assertTrue(isequal (self.a.todense(), np.asarray([[1,3,0],[2,4,0],[0,0,33]])))
142 |
143 | def test02(self):
144 | self.assertTrue(isequal (self.a[:], [1,2,3,4]))
145 | self.assertTrue(isequal (self.a[1:4], [1,2,3,4]))
146 |
147 | @unittest.skip("")
148 | def test03(self):
149 | self.assertTrue(isequal (self.a[:,:], [[1,3],[2,4]]))
150 | self.assertTrue(isequal (self.a[1:2,1:2], [1,2,3,4]))
151 |
152 | def test04(self):
153 | a = sparsearray([1,2,3])
154 | a[5]=100
155 | a[3,3]=33
156 | self.assertEqual(a.shape, (3,5))
157 | self.assertTrue(isequal (a.todense(), [[1,2,3,0,100],[0,0,0,0,0],[0,0,33,0,0]]))
158 |
159 |
160 | class Slice(unittest.TestCase):
161 | def setUp(self):
162 | self.a = sparsearray(arange (1,4).reshape(2,2,order="F"))
163 |
164 | def test01(self):
165 | #self.assertTrue(isequal (self.a[1:4],[1,2,3,4]))
166 | self.assertEqual(self.a[1,1], 1)
167 | self.assertEqual(self.a[2,1], 2)
168 | self.assertEqual(self.a[1,2], 3)
169 | self.assertEqual(self.a[2,2], 4)
170 |
171 | class Sparsearray(unittest.TestCase):
172 | def setUp(self):
173 | self.a = np.arange(1,5).reshape(2,2,order="F")
174 | self.b = sparsearray(self.a)
175 |
176 | def test01(self):
177 | self.assertEqual(self.b[1,1],1)
178 | self.assertEqual(self.b[2,1],2)
179 | self.assertEqual(self.b[1,2],3)
180 | self.assertEqual(self.b[2,2],4)
181 |
182 | class Index(unittest.TestCase):
183 | def setUp(self):
184 | self.a = sparsearray(arange (1,4).reshape(2,2,order="F"))
185 |
186 | def test01(self):
187 | """
188 | One-dimensional LHS indexing of multi-dimensional array,
189 | without expansion. Using scalar index.
190 | """
191 | self.a[4]=9
192 | self.assertTrue(isequal (self.a, sparsearray([[1, 3],[2, 9]])))
193 |
194 | def test02(self):
195 | """
196 | Multi-dimensional LHS indexing, without expansion.
197 | """
198 | self.a[1,1]=11
199 | self.a[2,2]=22
200 | self.a[1,2]=12
201 | self.a[2,1]=21
202 | self.assertTrue(isequal (self.a, sparsearray([[11,12],[21,22]])))
203 |
204 | class Indices(unittest.TestCase):
205 | def setUp(self):
206 | self.a = sparsearray(np.arange(1,5).reshape(2,2,order="F"))
207 | self.c = sparsearray(arange (1,20).reshape(4,5,order="F"))
208 |
209 | def test01(self):
210 | with self.assertRaises(ValueError):
211 | self.a[100]=100
212 | self.a[3,3]=33
213 | self.assertTrue(isequal (self.a.todense(), np.asarray([[1,3,0],[2,4,0],[0,0,33]])))
214 |
215 | def test02(self):
216 | self.assertTrue(isequal (self.a[:], [1,2,3,4]))
217 | self.assertTrue(isequal (self.a[1:4], [1,2,3,4]))
218 |
219 | @unittest.skip("")
220 | def test03(self):
221 | self.assertTrue(isequal (self.a[:,:], [[1,3],[2,4]]))
222 | self.assertTrue(isequal (self.a[1:2,1:2], [1,2,3,4]))
223 |
224 | def test04(self):
225 | a = sparsearray([1,2,3])
226 | a[5]=100
227 | a[3,3]=33
228 | self.assertEqual(a.shape, (3,5))
229 | self.assertTrue(isequal (a.todense(), [[1,2,3,0,100],[0,0,0,0,0],[0,0,33,0,0]]))
230 |
231 | if __name__ == "__main__":
232 | unittest.main()
233 |
--------------------------------------------------------------------------------
/smop/typeof.py:
--------------------------------------------------------------------------------
1 | """
2 | Given node.function instance, apply it
3 | to argument list, such as ["i","d"]
4 | and return the result.
5 | """
6 |
7 | import node,options
8 | from node import extend,exceptions
9 |
10 | callstack = set()
11 |
12 | @extend(node.function)
13 | @exceptions
14 | def apply(self,args,symtab):
15 | name = self.head.ident.name
16 | if name in callstack:
17 | return
18 | print "%%%", self.head.ident.name
19 | callstack.add(name)
20 |
21 | params = [(u.name,u.lineno) for u in self.head.args]
22 | if len(args) < len(self.head.args): # head.args are formal params
23 | args += [''] * (len(self.head.args)-len(args))
24 | symtab.update(zip(params,args))
25 | self.body._typeof(symtab)
26 | #return [symtab[u.name,u.lineno] for u in self.ret]
27 | # for u in node.postorder(self):
28 | # if u.__class__ is node.ident:
29 | # try:
30 | # print u.name,u.lineno,symtab[u.name,u.lineno]
31 | # except:
32 | # print u.name,u.lineno,'?'
33 | return '' #[u._typeof(symtab) for u in self.head.ret]
34 |
35 | @extend(node.node)
36 | @exceptions
37 | def _typeof(self,symtab):
38 | for u in self:
39 | u._typeof(symtab)
40 |
41 | @extend(node.for_stmt)
42 | @exceptions
43 | def _typeof(self,symtab):
44 | symtab[self.ident.name,self.ident.lineno] = 'i'
45 | self.stmt_list._typeof(symtab)
46 |
47 | @extend(node.func_decl)
48 | @exceptions
49 | def _typeof(self,symtab):
50 | pass # ignore ident,args,ret
51 |
52 | @extend(node.let)
53 | @exceptions
54 | def _typeof(self,symtab):
55 | if self.ret.__class__ is node.ident:
56 | symtab[self.ret.name,self.ret.lineno] = self.args._typeof(symtab)
57 |
58 | @extend(node.param)
59 | @extend(node.cellfun) # FIXME
60 | @exceptions
61 | def _typeof(self,symtab):
62 | return ''
63 |
64 | @extend(node.ident)
65 | @exceptions
66 | def _typeof(self,symtab):
67 | """
68 | symtab maps pairs (name,lineno) to string
69 | encoding of type.
70 | """
71 | if self.defs is None:
72 | try:
73 | return symtab[self.name,self.lineno]
74 | except:
75 | print '+++ Missing type for',self.name
76 | return ''
77 | ts = set([u._typeof(symtab) for u in self.defs if u.defs is None])
78 | if '' in ts:
79 | ts.remove('')
80 | if 'i' in ts and 'd' in ts:
81 | ts.remove('i')
82 | if len(ts) == 0:
83 | #print '+++','No definition of variable',self.name,self.lineno
84 | return ''
85 | if len(ts) == 1:
86 | self._t = ts.pop()
87 | return self._t
88 | if len(ts) > 1:
89 | print '+++','Conflicting defs for', self.name,self.lineno,ts
90 | return ''
91 |
92 | return ''
93 |
94 | @extend(node.matrix)
95 | @exceptions
96 | def _typeof(self,symtab):
97 | ts = set([u._typeof(symtab) for u in self.args if u._typeof(symtab)])
98 | if len(ts) >= 1:
99 | return ts.pop()
100 | return 'd'
101 |
102 | # ts = set([u._typeof(symtab) for u in self])
103 | # if len(ts) == 1:
104 | # return ts.pop()
105 | # return ''
106 |
107 | @extend(node.arrayref)
108 | @extend(node.cellarrayref)
109 | @exceptions
110 | def _typeof(self,symtab):
111 | return self.func_expr._typeof(symtab)
112 |
113 | @extend(node.expr)
114 | @exceptions
115 | def _typeof(self,symtab):
116 | nargs = len(self.args)
117 | if nargs == 0:
118 | assert 0
119 | if nargs == 1:
120 | if self.op in ("!","~"):
121 | return 'l'
122 | if self.op in ("-","parens"):
123 | return self.args[0]._typeof(symtab)
124 | assert 0, self.op
125 | if len(self.args) == 2:
126 | if self.op == ":":
127 | return 'i'
128 | if self.op in ("<",">","<=",">=","==","~=","!=","&&","||"):
129 | return 'l'
130 | if self.op in ("*","+","-","/","^",".*","./"):
131 | if (self.args[0]._typeof(symtab) == 'i' and
132 | self.args[1]._typeof(symtab) == 'i'):
133 | return 'i'
134 | else:
135 | return 'd'
136 |
137 | @extend(node.add)
138 | @extend(node.sub)
139 | @exceptions
140 | def _typeof(self,symtab):
141 | if (self.args[0]._typeof(symtab) == 'i' and
142 | self.args[1]._typeof(symtab) == 'i'):
143 | return 'i'
144 | else:
145 | return 'd'
146 |
147 | @extend(node.ones)
148 | @extend(node.zeros)
149 | @exceptions
150 | def _typeof(self,symtab):
151 | if self.args[-1]._typeof(symtab) == 'c':
152 | raise Exception("Only 'double' is implemented")
153 | return 'i'
154 |
155 | @extend(node.RESHAPE)
156 | @extend(node.MAXVAL)
157 | @extend(node.MINVAL)
158 | @extend(node.CEILING)
159 | @extend(node.abs)
160 | @extend(node.ceil)
161 | @extend(node.sign)
162 | @extend(node.min)
163 | @extend(node.max)
164 | @extend(node.transpose)
165 | @exceptions
166 | def _typeof(self,symtab):
167 | return self.args[0]._typeof(symtab)
168 |
169 | # sum is different from similar funcs.
170 | # re-check this
171 | @extend(node.sum)
172 | @extend(node.cumsum)
173 | @extend(node.cumprod)
174 | @exceptions
175 | def _typeof(self,symtab):
176 | if self.args[0]._typeof(symtab) == 'l':
177 | return 'i'
178 | return self.args[0]._typeof(symtab)
179 |
180 | @extend(node.number)
181 | @exceptions
182 | def _typeof(self,symtab):
183 | return 'i' if isinstance(self.value,int) else 'd'
184 |
185 | @extend(node.diff)
186 | @extend(node.RAND)
187 | @extend(node.rand)
188 | @extend(node.inf)
189 | @extend(node.dot)
190 | @extend(node.nnz)
191 | @extend(node.mod)
192 | @exceptions
193 | def _typeof(self,symtab):
194 | return 'd'
195 |
196 | @extend(node.rem)
197 | @extend(node.SIZE)
198 | @extend(node.size)
199 | @extend(node.find)
200 | #@extend(node.findone)
201 | @exceptions
202 | def _typeof(self,symtab):
203 | return 'ii'
204 |
205 | @extend(node.sort)
206 | @exceptions
207 | def _typeof(self,symtab):
208 | return self.args[0]._typeof(symtab)
209 | #if len(self.ret) == 2:
210 | # return self.args[0]._typeof(symtab) + 'i'
211 |
212 | @extend(node.numel)
213 | @extend(node.floor)
214 | @extend(node.length)
215 | @extend(node.range_)
216 | @exceptions
217 | def _typeof(self,symtab):
218 | return 'i'
219 |
220 | @extend(node.false)
221 | @extend(node.true)
222 | @extend(node.isequal)
223 | @extend(node.isnan)
224 | @extend(node.isinf)
225 | @extend(node.isempty)
226 | @extend(node.all)
227 | @extend(node.any)
228 | @exceptions
229 | def _typeof(self,symtab):
230 | return 'l'
231 |
232 | @extend(node.funcall) # func_expr,args
233 | @exceptions
234 | def _typeof(self,symtab):
235 | func_obj = symtab[self.func_expr.name]
236 | #if not self.ret:
237 | return func_obj.apply(self.args,symtab)
238 | #return ['' for i in self.ret]
239 |
240 | @extend(node.ravel)
241 | @exceptions
242 | def _typeof(self,symtab):
243 | return self.args[0]._typeof(symtab)
244 |
245 | @extend(node.continue_stmt)
246 | @extend(node.break_stmt)
247 | @exceptions
248 | def _typeof(self,symtab):
249 | pass
250 |
--------------------------------------------------------------------------------
/smop/yacc.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/smop/yacc.pyc
--------------------------------------------------------------------------------
/solveHomotopy.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/solveHomotopy.pyc
--------------------------------------------------------------------------------
/sp.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 |
3 | import tensorflow as tf
4 | import numpy as np
5 | from supporting_files.helpers import optimize
6 |
7 | def getSparcityPrior(inputX, C_init=None, lambda1=0.01, lambda2=10000, optimizer='Adam', epochs=10000, learning_rate=0.1, print_step=50):
8 | tf.reset_default_graph()
9 |
10 | n_feat, n_sample = inputX.shape
11 |
12 | X = tf.placeholder(dtype=tf.float32, shape=[n_feat, n_sample], name='X')
13 |
14 | if C_init is None:
15 | C = tf.Variable(tf.random_uniform([n_sample, n_sample], -1, 1), name='C')
16 | else:
17 | C = tf.Variable(C_init, name='C')
18 |
19 | loss = X - tf.matmul(X, C)
20 | loss = tf.reduce_mean(tf.square(loss))
21 |
22 | # Create sparseness in C
23 | reg_lossC = tf.reduce_mean(abs(C)) # L1 loss for C
24 |
25 | # Force the entries in the diagonal of C to be zero
26 | reg_lossD = tf.trace(tf.square(C))/n_sample
27 |
28 | cost = loss + lambda1 * reg_lossC + lambda2 * reg_lossD
29 | optimizer = optimize(cost, learning_rate, optimizer)
30 |
31 | saver = tf.train.Saver()
32 | # Optimizing the function
33 | with tf.Session() as sess:
34 | sess.run(tf.initialize_all_variables())
35 | print("Calculating C ...")
36 | for i in xrange(1, epochs+1):
37 | sess.run(optimizer, feed_dict={X: inputX})
38 | loss = sess.run(cost, feed_dict={X: inputX})
39 | if i % print_step == 0:
40 | print('epoch {0}: global loss = {1}'.format(i, loss))
41 | if i % 50 == 0:
42 | save_path = saver.save(sess, "./model_C_"+str(i)+".ckpt")
43 | print("Model saved in file: %s" % save_path)
44 |
45 | C_val = sess.run(C)
46 |
47 | return C_val
48 | # Add ops to save and restore all the variables.
49 |
50 |
51 | # Save the variables to disk.
52 |
53 |
54 |
55 |
--------------------------------------------------------------------------------
/sp.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/sp.pyc
--------------------------------------------------------------------------------
/sp_blitzl1.py:
--------------------------------------------------------------------------------
1 | import cvxpy as cp
2 | import numpy as np
3 | import blitzl1
4 |
5 | def sparseCoefRecovery(X, l=0.001):
6 | d, n = X.shape
7 | C = np.zeros((n, n))
8 |
9 | for i in xrange(n):
10 | if i % 100 == 0:
11 | print "Processed for " + str(i) + "samples"
12 |
13 | A = np.delete(X, (i), axis=1)
14 | b = X[:, i]
15 |
16 | prob = blitzl1.LogRegProblem(A, b)
17 | lammax = prob.compute_lambda_max()
18 | sol = prob.solve(l * lammax)
19 |
20 | c_val = sol.x
21 |
22 | if i > 1:
23 | C[:i-1,i] = c_val[:i-1]
24 | if i < n:
25 | C[i+1:n,i] = c_val[i:n]
26 | C[i,i] = 0
27 |
28 | return C
--------------------------------------------------------------------------------
/sp_blitzl1.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/sp_blitzl1.pyc
--------------------------------------------------------------------------------
/sp_cvx.py:
--------------------------------------------------------------------------------
1 | import cvxpy as cp
2 | import numpy as np
3 |
4 |
5 | def sparseCoefRecovery(X, Opt, l=0.001):
6 | d, n = X.shape
7 | C = np.zeros((n, n))
8 | # Minimize(sum_squares(X - X*C))
9 |
10 | for i in xrange(n):
11 | print i
12 |
13 | y = X[:, i]
14 | Y = np.delete(X, (i), axis=1)
15 |
16 | c = cp.Variable(n-1,1)
17 | if Opt == 'Lasso':
18 | objective = cp.Minimize(cp.norm(c,1) + l*cp.norm(Y*c -y))
19 | constraints = []
20 |
21 | elif Opt =='L1Perfect':
22 | objective = cp.Minimize(cp.norm(c, 1))
23 | constraints = [Y * c == y]
24 |
25 | prob = cp.Problem(objective, constraints)
26 | prob.solve()
27 |
28 | c_val = np.array(c.value)[:,0]
29 |
30 | if i > 1:
31 | C[:i-1,i] = c_val[:i-1]
32 | if i < n:
33 | C[i+1:n,i] = c_val[i:n]
34 | C[i,i] = 0
35 |
36 | return C
37 |
--------------------------------------------------------------------------------
/sp_cvx.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/sp_cvx.pyc
--------------------------------------------------------------------------------
/supporting_files/__init__.py:
--------------------------------------------------------------------------------
1 | # kekekekekekekeke some tools
2 | #
3 | # Xupeng Tong 2016
--------------------------------------------------------------------------------
/supporting_files/__init__.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/supporting_files/__init__.pyc
--------------------------------------------------------------------------------
/supporting_files/helpers.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | from sda import StackedDenoisingAutoencoder
3 |
4 | import tensorflow as tf
5 | import numpy as np
6 |
7 | def activate(layer, name):
8 | if name == 'sigmoid':
9 | return tf.nn.sigmoid(layer)
10 | elif name == 'softmax':
11 | return tf.nn.softmax(layer)
12 | elif name == 'tanh':
13 | return tf.nn.tanh(layer)
14 | elif name == 'relu':
15 | return tf.nn.relu(layer)
16 | elif name == 'linear':
17 | return layer
18 |
19 | def optimize(cost, learning_rate, optimizer):
20 | optimizer = {'FTRL':tf.train.FtrlOptimizer, 'Adam':tf.train.AdamOptimizer, \
21 | 'SGD':tf.train.GradientDescentOptimizer}[optimizer]
22 |
23 | return optimizer(learning_rate=learning_rate).minimize(cost)
24 |
25 | def one_hot(y):
26 | n_classes = len(np.unique(y))
27 | one_hot_Y = np.zeros((len(y), n_classes))
28 | for i,j in enumerate(y):
29 | one_hot_Y[i][j] = 1
30 |
31 | return one_hot_Y
32 |
33 | def init_layer_weight(dims, X, name):
34 | weights, biases = [], []
35 | if name == 'sda':
36 | sda = StackedDenoisingAutoencoder(dims=dims)
37 | sda._fit(X)
38 | weights, biases = sda.weights, sda.biases
39 | elif name == 'uniform':
40 | n_in = X.shape[1]
41 | for d in dims:
42 | r = 4*np.sqrt(6.0/(n_in+d))
43 | weights.append(tf.random_uniform([n_in, d], minval=-r, maxval=r))
44 | biases.append(tf.zeros([d,]))
45 | n_in = d
46 |
47 | return weights, biases
48 |
49 | def get_batch(X, Y, size):
50 | assert len(X) == len(Y)
51 | a = np.random.choice(len(X), size, replace=False)
52 | return X[a], Y[a]
53 |
54 | def get_batch_XC(X, C, size):
55 | assert len(X) == len(C)
56 | a = np.random.choice(len(X), size, replace=False)
57 | return X[a], C[a, a]
58 |
59 | class GenBatch():
60 | """
61 | class for generating batches
62 | """
63 | def __init__(self, X, y=None, C=None, batch_size=500):
64 | self.X = X
65 | self.Y = y
66 | self.C = C
67 | self.batch_size = batch_size
68 | self.n_batch = (len(X) / batch_size)
69 | self.index = 0
70 |
71 | def get_batch(self):
72 | start, end = self.index, (self.index+1)*self.batch_size
73 | batch_range = xrange(start, end)
74 | if self.index == self.n_batch:
75 | end = len(self.X)
76 | batch_range = xrange(self.index, len(self.X))
77 | self.index += 1
78 |
79 | return_list = [self.X[batch_range]]
80 | if self.Y is not None:
81 | return_list.append(self.Y[batch_range])
82 | if self.C is not None:
83 | return_list.append(self.C[start:end, start:end])
84 |
85 | return return_list
86 |
87 | def resetIndex(self):
88 | self.index = 0
--------------------------------------------------------------------------------
/supporting_files/helpers.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/supporting_files/helpers.pyc
--------------------------------------------------------------------------------
/supporting_files/nncomponents.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | from helpers import *
4 |
5 | class One2OneInputLayer(object):
6 | # One to One Mapping!
7 | def __init__(self, input, weight_init=None):
8 | """
9 | The second dimension of the input,
10 | for each input, each row is a sample
11 | and each column is a feature, since
12 | this is one to one mapping, n_in equals
13 | the number of features
14 | """
15 | n_in = input.get_shape()[1].value
16 |
17 | self.input = input
18 |
19 | # Initiate the weight for the input layer
20 | r = 4*np.sqrt(3.0/n_in)
21 |
22 | if weight_init is None:
23 | self.w = tf.Variable(tf.random_uniform([n_in,],-r, r), name='w')
24 | else:
25 | self.w = tf.Variable(weight_init, name='w')
26 |
27 | self.output = self.w * self.input
28 |
29 | class DenseLayer(object):
30 | # Canonical dense layer
31 | def __init__(self, input, init_w, init_b, activation='sigmoid'):
32 | """
33 | The second dimension of the input,
34 | for each input, each row is a sample
35 | and each column is a feature, since
36 | this is one to one mapping, n_in equals
37 | the number of features
38 |
39 | n_out defines how many nodes are there in the
40 | hidden layer
41 | """
42 |
43 | n_in = input.get_shape()[1].value
44 | self.input = input
45 |
46 | # Initiate the weight for the input layer
47 |
48 | w = tf.Variable(init_w, name='w')
49 | b = tf.Variable(init_b, name='b')
50 |
51 | output = tf.add(tf.matmul(input, w), b)
52 | output = activate(output, activation)
53 |
54 | self.w = w
55 | self.b = b
56 | self.output = output
57 | self.params = [w]
58 |
59 | class SoftmaxLayer(object):
60 | def __init__(self, input, n_out, y):
61 | """
62 | The second dimension of the input,
63 | for each input, each row is a sample
64 | and each column is a feature, since
65 | this is one to one mapping, n_in equals
66 | the number of features
67 |
68 | n_out defines how many nodes are there in the
69 | hidden layer
70 | """
71 | n_in = input.get_shape()[1].value
72 | self.input = input
73 |
74 | # Initiate the weight and biases for this layer
75 | r = 4*np.sqrt(6.0/(n_in + n_out))
76 | w = tf.Variable(tf.random_uniform([n_in, n_out], minval=-r, maxval=r))
77 | b = tf.Variable(tf.zeros([n_out]), name='b')
78 |
79 | pred = tf.add(tf.matmul(input, w), b)
80 | ################
81 | temp = tf.nn.softmax(pred)
82 |
83 | cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
84 |
85 | # Evaluate model
86 | correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
87 | self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
88 |
89 | self.y = y
90 | self.w = w
91 | self.b = b
92 | self.cost = cost
93 | ###############
94 | self.temp = temp
95 | self.params= [w]
--------------------------------------------------------------------------------
/supporting_files/nncomponents.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/supporting_files/nncomponents.pyc
--------------------------------------------------------------------------------
/supporting_files/sda.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 |
3 | import numpy as np
4 | import tensorflow as tf
5 |
6 | class StackedDenoisingAutoencoder:
7 | """A stacked deep autoencoder with denoising capability"""
8 |
9 | def __init__(self, dims=[100,100,100], epochs=[100,100,100], activations=['sigmoid']*3, noise=None, loss='rmse', lr=0.001, batch_size=100, print_step=50):
10 | self.print_step = print_step
11 | self.batch_size = batch_size
12 | self.lr = lr
13 | self.loss = loss
14 | self.activations = activations
15 | self.noise = noise
16 | self.epochs = epochs
17 | self.dims = dims
18 | self.depth = len(dims)
19 | self.weights, self.biases = [], []
20 | epochs = [100 for i in xrange(len(dims))]
21 | # assert len(dims) == len(epochs)
22 |
23 | def _fit(self, x):
24 | for i in range(self.depth):
25 | print('Layer {0}'.format(i + 1))
26 | x = self._run(data_x=self._add_noise(x), activation=self.activations[i], data_x_=x,
27 | hidden_dim=self.dims[i], epochs=self.epochs[i], loss=self.loss,
28 | batch_size=self.batch_size, lr=self.lr, print_step=self.print_step)
29 |
30 | def _add_noise(self, x):
31 | if self.noise == 'gaussian':
32 | n = np.random.normal(0, 0.1, (len(x), len(x[0])))
33 | return x + n
34 | if self.noise == 'mask':
35 | frac = float(self.noise.split('-')[1])
36 | temp = np.copy(x)
37 | for i in temp:
38 | n = np.random.choice(len(i), round(frac * len(i)), replace=False)
39 | i[n] = 0
40 | return temp
41 | if self.noise == None:
42 | return x
43 |
44 | def _transform(self, data):
45 | sess = tf.Session()
46 | x = tf.constant(data, dtype=tf.float32)
47 | for w, b, a in zip(self.weights, self.biases, self.activations):
48 | weight = tf.constant(w, dtype=tf.float32)
49 | bias = tf.constant(b, dtype=tf.float32)
50 | layer = tf.matmul(x, weight) + bias
51 | x = self.activate(layer, a)
52 | return x.eval(session=sess)
53 |
54 | def get_transformed_data(self, x):
55 | self._fit(x)
56 | return self._transform(x)
57 |
58 | def _run(self, data_x, data_x_, hidden_dim, activation, loss, lr, print_step, epochs, batch_size=100):
59 | input_dim = len(data_x[0])
60 | print(input_dim)
61 | print(hidden_dim)
62 | sess = tf.Session()
63 | x = tf.placeholder(dtype=tf.float32, shape=[None, input_dim], name='x')
64 | x_ = tf.placeholder(dtype=tf.float32, shape=[None, input_dim], name='x_')
65 | encode = {'weights': tf.Variable(tf.truncated_normal([input_dim, hidden_dim], dtype=tf.float32)),
66 | 'biases': tf.Variable(tf.truncated_normal([hidden_dim], dtype=tf.float32))}
67 | decode = {'biases': tf.Variable(tf.truncated_normal([input_dim], dtype=tf.float32)),
68 | 'weights': tf.transpose(encode['weights'])}
69 |
70 | encoded = self.activate(tf.matmul(x, encode['weights']) + encode['biases'], activation)
71 | decoded = tf.matmul(encoded, decode['weights']) + decode['biases']
72 |
73 | # reconstruction loss
74 | if loss == 'rmse':
75 | loss = tf.sqrt(tf.reduce_mean(tf.square(tf.sub(x_, decoded))))
76 | elif loss == 'cross-entropy':
77 | loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(decoded, x_))
78 | train_op = tf.train.AdamOptimizer(lr).minimize(loss)
79 |
80 | sess.run(tf.initialize_all_variables())
81 | for i in range(epochs):
82 | b_x, b_x_ = self._get_batch(data_x, data_x_, batch_size)
83 | sess.run(train_op, feed_dict={x: b_x, x_: b_x_})
84 | if (i + 1) % print_step == 0:
85 | l = sess.run(loss, feed_dict={x: data_x, x_: data_x_})
86 | print('epoch {0}: global loss = {1}'.format(i, l))
87 | # debug
88 | # print('Decoded', sess.run(decoded, feed_dict={x: self.data_x_})[0])
89 | self.weights.append(sess.run(encode['weights']))
90 | self.biases.append(sess.run(encode['biases']))
91 | return sess.run(encoded, feed_dict={x: data_x_})
92 |
93 | def _get_batch(self, X, X_, size):
94 | a = np.random.choice(len(X), size, replace=False)
95 | return X[a], X_[a]
96 |
97 | def activate(self, linear, name):
98 | if name == 'sigmoid':
99 | return tf.nn.sigmoid(linear, name='encoded')
100 | elif name == 'softmax':
101 | return tf.nn.softmax(linear, name='encoded')
102 | elif name == 'tanh':
103 | return tf.nn.tanh(linear, name='encoded')
104 | elif name == 'relu':
105 | return tf.nn.relu(linear, name='encoded')
--------------------------------------------------------------------------------
/supporting_files/sda.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tonyabracadabra/Deep-Subspace-Clustering/7fc12084dfc3f4f5368f7c38841ad0bf7295f626/supporting_files/sda.pyc
--------------------------------------------------------------------------------