├── .idea
├── .name
├── Python-Work.iml
├── encodings.xml
├── misc.xml
└── modules.xml
├── Arxiv
└── search_arxiv.py
├── College
├── .ipynb_checkpoints
│ └── MLAssignment-checkpoint.ipynb
├── MLAssignment.ipynb
├── MLAssignment1.py
└── primality.py
├── Cuda
└── cuda_add.py
├── GifToPNG
└── convert.py
├── GooglePlayMusic
├── GPM1
│ ├── AnalyzeMusic.py
│ ├── BestGPM.py
│ ├── DatabaseManager.py
│ ├── GPM.db
│ └── music.csv
├── gpm_serialize.py
└── process_data.py
├── KerasTools
└── count_metrics.py
├── NeuralNetRegression.py
├── NeuralNetworks
├── ELU.png
├── SpiralGenerator.py
└── SpiralNNClassifier.py
├── NotificationTest.py
├── Parallel Quick Sort Results
├── AnalyzeResults.py
├── DotConvert.py
├── Ishaan-Data
│ ├── Result 1000 Type 1.json
│ ├── Result 1000 Type 2.json
│ ├── Result 1000000 Type 1.json
│ ├── Result 1000000 Type 2.json
│ ├── Result 10000000 Type 1.json
│ ├── Result 10000000 Type 2.json
│ ├── Result 100000000 Type 1.json
│ ├── Result 100000000 Type 2.json
│ ├── Result 20000000 Type 1.json
│ ├── Result 20000000 Type 2.json
│ ├── Result 5000000 Type 1.json
│ ├── Result 5000000 Type 2.json
│ ├── Result 50000000 Type 1.json
│ └── Result 50000000 Type 2.json
├── Java-Data
│ ├── Java Result 1000000 Type 1.json
│ ├── Java Result 1000000 Type 2.json
│ ├── Java Result 10000000 Type 1.json
│ ├── Java Result 10000000 Type 2.json
│ ├── Java Result 100000000 Type 1.json
│ ├── Java Result 100000000 Type 2.json
│ ├── Java Result 20000000 Type 1.json
│ └── Java Result 20000000 Type 2.json
├── Java-Result
│ ├── Java Result 1000000 Type 2 Image.png
│ ├── Java Result 10000000 Type 2 Image.png
│ ├── Java Result 100000000 Type 2 Image.png
│ └── Java Result 20000000 Type 2 Image.png
├── Result-Ishaan
│ ├── Result 1000 Type 2 Image.png
│ ├── Result 1000000 Type 2 Image.png
│ ├── Result 10000000 Type 2 Image.png
│ ├── Result 100000000 Type 2 Image.png
│ ├── Result 20000000 Type 2 Image.png
│ ├── Result 5000000 Type 2 Image.png
│ └── Result 50000000 Type 2 Image.png
├── Results-Som
│ ├── Result 1000 Type 2 Image.png
│ ├── Result 1000000 Type 2 Image.png
│ ├── Result 10000000 Type 2 Image.png
│ ├── Result 100000000 Type 2 Image.png
│ ├── Result 20000000 Type 2 Image.png
│ ├── Result 350000000 Type 1 Image.png
│ ├── Result 5000000 Type 2 Image.png
│ └── Result 50000000 Type 2 Image.png
├── Results.dot
├── Results.png
└── Som-Data
│ ├── Result 1000 Type 1.json
│ ├── Result 1000 Type 2.json
│ ├── Result 1000000 Type 1.json
│ ├── Result 1000000 Type 2.json
│ ├── Result 10000000 Type 1.json
│ ├── Result 10000000 Type 2.json
│ ├── Result 100000000 Type 1.json
│ ├── Result 100000000 Type 2.json
│ ├── Result 20000000 Type 1.json
│ ├── Result 20000000 Type 2.json
│ ├── Result 200000000 Type 1.json
│ ├── Result 350000000 Type 1.json
│ ├── Result 5000000 Type 1.json
│ ├── Result 5000000 Type 2.json
│ ├── Result 50000000 Type 1.json
│ └── Result 50000000 Type 2.json
├── PartialOverlappedInference
└── edit_distance.py
├── PortraitStyleTransfer.py
├── RamanujanMachines
├── euler.py
└── pi.py
├── SuperFormula
├── superformula.py
└── superformula_theano.py
├── TensorflowLearn
├── approximate_solution.py
├── eager_constrained_optimization.py
├── eager_lstm.py
├── eager_simple_optimization.py
├── function_optimization.py
├── logistic_reg.py
├── mendelbrot_eager.py
├── numpy_sgd.py
├── optimization_profit.py
├── plot_distributions.py
└── simple_optimization.py
├── Theano-learn
├── derivatives.py
├── examples.py
├── initial.py
├── linear_regression.py
└── logistic_regression.py
├── convert_flac_to_mp3
└── convert.py
├── convert_wav_to_mp3
└── convert_to_mp3.py
├── graph
├── cell.py
├── core.py
├── run_cell.py
└── run_core.py
├── medicine
├── medicine.py
└── merge_pdfs.py
├── metaprog
├── composition.py
├── delegates.py
└── registration.py
├── numpygrad
├── __init__.py
├── activations.py
├── layers.py
├── losses.py
├── optim.py
├── rnn.py
├── run.py
└── tensor.py
├── temp.py
├── tfdiffeq_examples
├── data
│ ├── Adiac_TEST
│ ├── Adiac_TRAIN
│ ├── Data.txt
│ └── Query.txt
├── jump_reduce_tf.py
├── jump_reduce_torch.py
├── lorentz_attractor.py
├── regression.py
├── spiral_odes.py
├── temp
│ ├── plot_tf.py
│ ├── temp1.py
│ ├── temp2.py
│ ├── temp3.py
│ ├── temp4.py
│ └── temp5.py
└── utils
│ ├── data_loader.py
│ ├── extract_ucr_datasets.py
│ └── progbar.py
└── wumpus_agent
├── agent.py
├── exec.py
├── memory.py
└── model.py
/.idea/.name:
--------------------------------------------------------------------------------
1 | Python-Work
--------------------------------------------------------------------------------
/.idea/Python-Work.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/.idea/encodings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/Arxiv/search_arxiv.py:
--------------------------------------------------------------------------------
1 | import arxiv
2 |
3 | search = arxiv.Search(
4 | query="Librispeech",
5 | max_results=10,
6 | sort_by=arxiv.SortCriterion.SubmittedDate,
7 | sort_order=arxiv.SortOrder.Descending,
8 | )
9 |
10 | for result in search.results():
11 | print(result)
12 |
--------------------------------------------------------------------------------
/College/.ipynb_checkpoints/MLAssignment-checkpoint.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 3,
6 | "metadata": {
7 | "collapsed": true
8 | },
9 | "outputs": [],
10 | "source": [
11 | "%matplotlib inline"
12 | ]
13 | },
14 | {
15 | "cell_type": "code",
16 | "execution_count": 4,
17 | "metadata": {
18 | "collapsed": true
19 | },
20 | "outputs": [],
21 | "source": [
22 | "import numpy as np\n",
23 | "import sklearn.svm as svm\n",
24 | "import seaborn as sns\n",
25 | "sns.set_style(\"whitegrid\")\n",
26 | "\n",
27 | "from sklearn import cross_validation as cv\n",
28 | "from sklearn.datasets import load_iris\n",
29 | "from sklearn.learning_curve import validation_curve, learning_curve"
30 | ]
31 | },
32 | {
33 | "cell_type": "code",
34 | "execution_count": 6,
35 | "metadata": {
36 | "collapsed": true
37 | },
38 | "outputs": [],
39 | "source": [
40 | "def plotValidationCurve(estimator, title, X, y, param_name, param_range, cv=5):\n",
41 | " trainScores, testScores = validation_curve(estimator, X, y, param_name=param_name, param_range=param_range, cv=cv, scoring=\"accuracy\", )\n",
42 | "\n",
43 | " trainScoresMean = np.mean(trainScores, axis=1)\n",
44 | " trainScoresStd = np.std(trainScores, axis=1)\n",
45 | " testScoresMean = np.mean(testScores, axis=1)\n",
46 | " testScoresStd = np.std(testScores, axis=1)\n",
47 | "\n",
48 | " sns.plt.title(title)\n",
49 | " sns.plt.xlabel(param_name)\n",
50 | " sns.plt.ylabel(\"Accuracy Score\")\n",
51 | " sns.plt.ylim(0.0, 1.1)\n",
52 | " sns.plt.semilogx(param_range, trainScoresMean, label=\"Training score\", color=\"r\")\n",
53 | " sns.plt.fill_between(param_range, trainScoresMean - trainScoresStd, trainScoresMean + trainScoresStd, alpha=0.2, color=\"r\")\n",
54 | " sns.plt.semilogx(param_range, testScoresMean, label=\"Cross-validation score\",color=\"b\")\n",
55 | " sns.plt.fill_between(param_range, testScoresMean - testScoresStd, testScoresMean + testScoresStd, alpha=0.2, color=\"b\")\n",
56 | "\n",
57 | " sns.plt.legend(loc=\"best\")\n",
58 | " return sns.plt"
59 | ]
60 | },
61 | {
62 | "cell_type": "code",
63 | "execution_count": 7,
64 | "metadata": {
65 | "collapsed": true
66 | },
67 | "outputs": [],
68 | "source": [
69 | "def plotLearningCurve(estimator, title, X, y, ylim=None, cv=None, n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):\n",
70 | " sns.plt.figure()\n",
71 | " sns.plt.title(title)\n",
72 | " if ylim is not None:\n",
73 | " sns.plt.ylim(*ylim)\n",
74 | " sns.plt.xlabel(\"Training examples\")\n",
75 | " sns.plt.ylabel(\"Score\")\n",
76 | " train_sizes, train_scores, test_scores = learning_curve(\n",
77 | " estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n",
78 | " train_scores_mean = np.mean(train_scores, axis=1)\n",
79 | " train_scores_std = np.std(train_scores, axis=1)\n",
80 | " test_scores_mean = np.mean(test_scores, axis=1)\n",
81 | " test_scores_std = np.std(test_scores, axis=1)\n",
82 | "\n",
83 | " sns.plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std,\n",
84 | " alpha=0.1, color=\"r\")\n",
85 | " sns.plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n",
86 | " test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n",
87 | " sns.plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n",
88 | " label=\"Training score\")\n",
89 | " sns.plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n",
90 | " label=\"Cross-validation score\")\n",
91 | "\n",
92 | " sns.plt.legend(loc=\"best\")\n",
93 | " return sns.plt\n"
94 | ]
95 | },
96 | {
97 | "cell_type": "code",
98 | "execution_count": null,
99 | "metadata": {
100 | "collapsed": true
101 | },
102 | "outputs": [],
103 | "source": []
104 | }
105 | ],
106 | "metadata": {
107 | "kernelspec": {
108 | "display_name": "Python 3",
109 | "language": "python",
110 | "name": "python3"
111 | },
112 | "language_info": {
113 | "codemirror_mode": {
114 | "name": "ipython",
115 | "version": 3
116 | },
117 | "file_extension": ".py",
118 | "mimetype": "text/x-python",
119 | "name": "python",
120 | "nbconvert_exporter": "python",
121 | "pygments_lexer": "ipython3",
122 | "version": "3.4.4"
123 | }
124 | },
125 | "nbformat": 4,
126 | "nbformat_minor": 0
127 | }
128 |
--------------------------------------------------------------------------------
/College/MLAssignment1.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import sklearn.svm as svm
3 | import seaborn as sns
4 | sns.set_style("whitegrid")
5 |
6 | from sklearn import cross_validation as cv
7 | from sklearn.datasets import load_iris
8 | from sklearn.learning_curve import validation_curve, learning_curve
9 |
10 | def plotValidationCurve(estimator, title, X, y, param_name, param_range, cv=5):
11 | trainScores, testScores = validation_curve(estimator, X, y, param_name=param_name, param_range=param_range, cv=cv, scoring="accuracy", )
12 |
13 | trainScoresMean = np.mean(trainScores, axis=1)
14 | trainScoresStd = np.std(trainScores, axis=1)
15 | testScoresMean = np.mean(testScores, axis=1)
16 | testScoresStd = np.std(testScores, axis=1)
17 |
18 | sns.plt.title(title)
19 | sns.plt.xlabel(param_name)
20 | sns.plt.ylabel("Accuracy Score")
21 | sns.plt.ylim(0.0, 1.1)
22 | sns.plt.semilogx(param_range, trainScoresMean, label="Training score", color="r")
23 | sns.plt.fill_between(param_range, trainScoresMean - trainScoresStd, trainScoresMean + trainScoresStd, alpha=0.2, color="r")
24 | sns.plt.semilogx(param_range, testScoresMean, label="Cross-validation score",color="b")
25 | sns.plt.fill_between(param_range, testScoresMean - testScoresStd, testScoresMean + testScoresStd, alpha=0.2, color="b")
26 |
27 | sns.plt.legend(loc="best")
28 | return sns.plt
29 |
30 | def plotLearningCurve(estimator, title, X, y, ylim=None, cv=None, n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
31 | sns.plt.figure()
32 | sns.plt.title(title)
33 | if ylim is not None:
34 | sns.plt.ylim(*ylim)
35 | sns.plt.xlabel("Training examples")
36 | sns.plt.ylabel("Score")
37 | train_sizes, train_scores, test_scores = learning_curve(
38 | estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
39 | train_scores_mean = np.mean(train_scores, axis=1)
40 | train_scores_std = np.std(train_scores, axis=1)
41 | test_scores_mean = np.mean(test_scores, axis=1)
42 | test_scores_std = np.std(test_scores, axis=1)
43 |
44 | sns.plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std,
45 | alpha=0.1, color="r")
46 | sns.plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
47 | test_scores_mean + test_scores_std, alpha=0.1, color="g")
48 | sns.plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
49 | label="Training score")
50 | sns.plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
51 | label="Cross-validation score")
52 |
53 | sns.plt.legend(loc="best")
54 | return sns.plt
55 |
56 |
57 | if __name__ == "__main__":
58 | iris = load_iris()
59 |
60 | X = np.array(iris.data)
61 | y = np.array(iris.target)
62 |
63 | print("Number of data points : ", len(y))
64 | print("No of features : ", X.shape[1])
65 | print("No of classes : ", len(set(y)))
66 | print("\nFeature Names : ", iris.feature_names)
67 | print("Class Names : ", iris.target_names, "\n")
68 |
69 | # Validation Curve C
70 | Cs = np.logspace(-2, 6, 10)
71 | title = "Validation Curve - Regularization Factor C"
72 |
73 | plot = plotValidationCurve(svm.SVC(random_state=0), title, X, y, param_name="C", param_range=Cs, cv=5)
74 | plot.show()
75 |
76 | # Validation Curve Gamma
77 | gammas = np.logspace(-6, 3, 10)
78 | title = "Validation Curve - Regularization Factor Gamma"
79 |
80 | plot = plotValidationCurve(svm.SVC(random_state=0), title, X, y, param_name="gamma", param_range=gammas, cv=5)
81 | plot.show()
82 |
83 | # Learning Curve
84 | crossValidation = cv.ShuffleSplit(X.shape[0], n_iter=10, test_size=0.20, random_state=0)
85 | title = "Learning Curve - Support Vector Machine"
86 |
87 | plot = plotLearningCurve(svm.SVC(random_state=0), title, X, y, ylim=(0.0, 1.1), cv=crossValidation)
88 | plot.show()
89 |
--------------------------------------------------------------------------------
/College/primality.py:
--------------------------------------------------------------------------------
1 | import time
2 | import numpy as np
3 | import pandas as pd
4 | import math
5 | from random import randrange
6 |
7 |
8 | # Obtained from https://gist.github.com/Ayrx/5884790#file-miller_rabin-py-L5
9 | def miller_rabin(n, k=40):
10 |
11 | # Implementation uses the Miller-Rabin Primality Test
12 | # The optimal number of rounds for this test is 40
13 | # See http://stackoverflow.com/questions/6325576/how-many-iterations-of-rabin-miller-should-i-use-for-cryptographic-safe-primes
14 | # for justification
15 |
16 | # If number is even, it's a composite number
17 | if n <= 0:
18 | return False
19 |
20 | if n == 1:
21 | return True
22 |
23 | if n == 2:
24 | return True
25 |
26 | if n % 2 == 0:
27 | return False
28 |
29 | # `3` causes randrange to be in the range 2-(3-1) which crashes.
30 | if n == 3:
31 | return True
32 |
33 | r, s = 0, n - 1
34 | while s % 2 == 0:
35 | r += 1
36 | s //= 2
37 |
38 | for _ in range(k):
39 | a = randrange(2, n - 1)
40 | x = pow(a, s, n)
41 | if x == 1 or x == n - 1:
42 | continue
43 | for _ in range(r - 1):
44 | x = pow(x, 2, n)
45 | if x == n - 1:
46 | break
47 | else:
48 | return False
49 |
50 | return True
51 |
52 | if __name__ == '__main__':
53 | t1 = time.time()
54 |
55 | x = []
56 | y = []
57 |
58 | for i in range(int(1e6) + 1):
59 | p = miller_rabin(i)
60 | x.append(i)
61 | y.append(float(p))
62 |
63 | if i % 1000 == 0:
64 | print("Finished %d samples" % (i))
65 |
66 | print()
67 |
68 | x = np.array(x)
69 | y = np.array(y)
70 |
71 | df = pd.DataFrame({'x': x, 'y':y})
72 | print(df.info())
73 | print(df.describe())
74 |
75 | df.to_csv('data/primes.csv', header=True, index=False, encoding='utf-8')
76 |
77 | print(time.time() - t1)
78 |
--------------------------------------------------------------------------------
/Cuda/cuda_add.py:
--------------------------------------------------------------------------------
1 | import time
2 | import numpy as np
3 | from numba import cuda
4 |
5 |
6 | @cuda.jit("void(float32[:, :], float32[:, :])")
7 | def add_inplace(x, y):
8 | i, j = cuda.grid(2) # get our global location on the cuda grid
9 |
10 | if i < x.shape[0] and j < x.shape[1]: # check whether we exceed our bounds
11 | x[i][j] = x[i][j] + y[i][j] # inplace _var_add into x
12 |
13 | @cuda.jit("void(float32[:, :], float32[:, :], float32[:, :])")
14 | def add_external(x, y, z):
15 | i, j = cuda.grid(2) # get our global location on the cuda grid
16 |
17 | if i < x.shape[0] and j < x.shape[1]: # check whether we exceed our bounds
18 | z[i][j] = x[i][j] + y[i][j] # _var_add to a zero buffer
19 |
20 |
21 | if __name__ == '__main__':
22 | NUM_TESTS = 25
23 |
24 | print("Num gpus : ", cuda.gpus)
25 |
26 | x = np.arange(int(1e8), dtype=np.float32).reshape((10000, 10000))
27 | y = np.arange(-int(1e8), int(1e8), step=2, dtype=np.float32).reshape((10000, 10000))
28 |
29 | print("x", x.shape, "y", y.shape)
30 | print()
31 |
32 | z_buffer = np.zeros_like(x, dtype=np.float32)
33 | x_copy = np.copy(x)
34 |
35 | threads_per_block = (32, 32)
36 | blocks_per_grid = ((int(x.shape[0] // threads_per_block[0])) + 1,
37 | (int(x.shape[1] // threads_per_block[1])) + 1)
38 |
39 | print("Number of blocks : ", blocks_per_grid)
40 | print("Number of threads per block: ", threads_per_block)
41 | print()
42 |
43 | x = cuda.to_device(x)
44 | y = cuda.to_device(y)
45 |
46 | # _var_add inplace
47 | # pre compile
48 | add_inplace[blocks_per_grid, threads_per_block](x, y)
49 |
50 | t1 = time.time()
51 | for i in range(NUM_TESTS):
52 | add_inplace[blocks_per_grid, threads_per_block](x, y)
53 |
54 | x = x.copy_to_host()
55 | t2 = time.time()
56 |
57 | print("Time inplace : ", (t2 - t1) / float(NUM_TESTS))
58 | print()
59 |
60 | # _var_add external
61 | z_buffer = cuda.to_device(z_buffer)
62 | x_copy = cuda.to_device(x_copy)
63 |
64 | # pre compile
65 | add_external[blocks_per_grid, threads_per_block](x_copy, y, z_buffer)
66 |
67 | t1 = time.time()
68 | for i in range(NUM_TESTS):
69 | add_external[blocks_per_grid, threads_per_block](x_copy, y, z_buffer)
70 |
71 | z_buffer = z_buffer.copy_to_host()
72 | t2 = time.time()
73 | print("Time buffer : ", (t2 - t1) / float(NUM_TESTS))
74 |
75 | cuda.synchronize()
76 | cuda.close()
--------------------------------------------------------------------------------
/GifToPNG/convert.py:
--------------------------------------------------------------------------------
1 | from PIL import Image
2 | import sys
3 | import os
4 | import glob
5 |
6 | def process_gif(infile, outdir):
7 | try:
8 | im = Image.open(infile)
9 | except IOError:
10 | print("Cant load", infile)
11 | sys.exit(1)
12 |
13 | i = 0
14 | mypalette = im.getpalette()
15 |
16 | filepath, filename = os.path.split(infile)
17 | filterame, exts = os.path.splitext(filename)
18 | print("Processing: " + infile, filterame)
19 |
20 | outpath = os.path.join(filepath, outdir, filename)
21 |
22 | if not os.path.exists(outpath):
23 | os.makedirs(outpath)
24 |
25 | try:
26 | while 1:
27 | #im.putpalette(mypalette)
28 | new_im = Image.new("RGBA", im.size)
29 | new_im.paste(im)
30 | new_im.save(os.path.join(outpath, 'frame_' + str(i) + '.png'))
31 |
32 | i += 1
33 | im.seek(im.tell() + 1)
34 |
35 | except EOFError:
36 | pass # end of sequence
37 |
38 | def process_gifs(indir, outdir):
39 | files = glob.glob(os.path.join(indir, '*.gif'))
40 |
41 | for file in files:
42 | process_gif(file, outdir)
43 |
44 |
45 | if __name__ == "__main__":
46 | dir = r""
47 |
48 | process_gifs(dir, outdir='Images')
--------------------------------------------------------------------------------
/GooglePlayMusic/GPM1/AnalyzeMusic.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | from sklearn.preprocessing import LabelEncoder
3 | from GooglePlayMusic.GPM1.DatabaseManager import GPMDBManager
4 |
5 | class MultiColumnLabelEncoder:
6 | def __init__(self,columns = None):
7 | self.columns = columns # array of column names to encode
8 |
9 | def fit(self,X,y=None):
10 | return self # not relevant here
11 |
12 | def transform(self,X):
13 | '''
14 | Transforms columns of X specified in self.columns using
15 | LabelEncoder(). If no columns specified, transforms all
16 | columns in X.
17 | '''
18 | output = X.copy()
19 | if self.columns is not None:
20 | for col in self.columns:
21 | output[col] = LabelEncoder().fit_transform(output[col])
22 | else:
23 | for colname,col in output.iteritems():
24 | output[colname] = LabelEncoder().fit_transform(col)
25 | return output
26 |
27 | def fit_transform(self,X,y=None):
28 | return self.fit(X,y).transform(X)
29 |
30 | if __name__ == "__main__":
31 | import sqlite3 as sql
32 |
33 | df = pd.read_csv("music.csv", header=0)
34 |
35 | df["SongTotal"] = df["songDurationMillis"] * df["songPlayCount"]
36 |
37 | thresholdCount = 25
38 | df["SongInBest"] = 0
39 | df.loc[(df.songPlayCount > thresholdCount), "SongInBest"] = 1
40 |
41 | print(df.info(), "\n", df.describe())
42 |
43 | timeInMillis = df.SongTotal.sum()
44 | print("Total Time listened to songs (in Minutes): ", timeInMillis / 1000 / 60)
45 | print("Total Time listened to songs (in Hours): ", timeInMillis / 1000 / 60 / 60)
46 |
47 | #con = sql.connect("GPM.db")
48 |
49 | #df.to_sql("MusicTable", con)
50 | #df.to_hdf(r"D:\Users\Yue\PycharmProjects\Python-Work\GooglePlayMusic\GPM.h5", "MusicData")
51 |
--------------------------------------------------------------------------------
/GooglePlayMusic/GPM1/BestGPM.py:
--------------------------------------------------------------------------------
1 | from GooglePlayMusic.GPM1.DatabaseManager import GPMDBManager
2 | import pandas as pd
3 |
4 | if __name__ == "__main__":
5 | db = GPMDBManager()
6 |
7 | all_songs = """SELECT * FROM Songs WHERE (songPlayCount > 0) ORDER BY songPlayCount DESC"""
8 | all_songs_from_ragnarok = """SELECT * FROM Songs WHERE (songPlayCount > 0) AND (songAlbum = 'Ragnarok Online BGM') ORDER BY songPlayCount DESC"""
9 |
10 | top_k = 20
11 | top_k_best_songs = """SELECT * FROM Songs WHERE (songPlayCount > 0) ORDER BY songPlayCount DESC LIMIT """ + str(top_k)
12 |
13 | all_tsubasa_songs = """SELECT * FROM Songs WHERE (songPlayCount > 0) AND (songAlbum = 'Tsubasa Chronicles 2' OR songAlbum = 'Tsubasa Chronicles') ORDER BY songPlayCount DESC"""
14 | best_taku_iwasaki = """SELECT * FROM Songs WHERE (songPlayCount > 0) AND (songArtist = 'Taku Iwasaki') ORDER BY songPlayCount DESC"""
15 |
16 | group_by_artist = """SELECT songArtist, Count(songArtist) FROM Songs GROUP BY songArtist ORDER BY Count(songArtist) DESC"""
17 |
18 | """
19 | select = all_songs
20 | cursor = db.conn.cursor()
21 | bestSongs = cursor.execute(select)
22 |
23 | print("Song Name, Album, Artist, Song Duration in Milliseconds, Play Count\n")
24 | for song in bestSongs:
25 | print(*[song[0], song[1], song[2], song[4]], sep=", ")
26 | """
27 |
28 | """
29 | select = all_songs_from_ragnarok
30 | cursor = db.conn.cursor()
31 | bestSongs = cursor.execute(select)
32 |
33 | print("Song Name, Album, Artist, Song Duration in Milliseconds, Play Count\n")
34 | for song in bestSongs:
35 | print(*[song[0], song[1], song[2], song[4]], sep=", ")
36 | """
37 |
38 | """
39 | select = top_k_best_songs
40 | cursor = db.conn.cursor()
41 | bestSongs = cursor.execute(select)
42 |
43 | print("Song Name, Album, Artist, Song Duration in Milliseconds, Play Count\n")
44 | for song in bestSongs:
45 | print(*[song[0], song[1], song[2], song[4]], sep=", ")
46 | """
47 |
48 | """
49 | select = all_tsubasa_songs
50 | cursor = db.conn.cursor()
51 | bestSongs = cursor.execute(select)
52 |
53 | print("Song Name, Album, Artist, Song Duration in Milliseconds, Play Count\n")
54 | for song in bestSongs:
55 | print(*[song[0], song[1], song[2], song[4]], sep=", ")
56 | """
57 |
58 | """
59 | select = best_taku_iwasaki
60 | cursor = db.conn.cursor()
61 | bestSongs = cursor.execute(select)
62 |
63 | print("Song Name, Album, Artist, Song Duration in Milliseconds, Play Count\n")
64 | for song in bestSongs:
65 | print(*[song[0], song[1], song[2], song[4]], sep=", ")
66 | """
67 |
68 | select = group_by_artist
69 | cursor = db.conn.cursor()
70 | bestSongs = cursor.execute(select)
71 |
72 | print("Artist, No of Songs by Artist\n")
73 | for song in bestSongs:
74 | print(*[song[0], song[1]], sep=", ")
75 |
--------------------------------------------------------------------------------
/GooglePlayMusic/GPM1/DatabaseManager.py:
--------------------------------------------------------------------------------
1 | import sqlite3
2 | from gmusicapi import Mobileclient
3 |
4 | class GPMDBManager:
5 |
6 | def __init__(self):
7 | self.dbName = "GPM.db"
8 |
9 | # Table Songs
10 | self.tableSongs = "Songs"
11 |
12 | # Column for Table Songs
13 | self.colSongName = "songName"
14 | self.colSongAlbum = "songAlbum"
15 | self.colSongArtist = "songArtist"
16 | self.colSongDurationMillis = "songDurationMillis"
17 | self.colSongPlayCount = "songPlayCount"
18 | self.colSongRating = "songRating"
19 | self.colSongComposer = "songComposer"
20 | self.colSongYear = "songYear"
21 |
22 | __CREATE_TABLE_SONGS = """CREATE TABLE IF NOT EXISTS Songs (songName TEXT, songAlbum TEXT,
23 | songArtist TEXT, songDurationMillis INTEGER, songPlayCount INTEGER,
24 | songRating TEXT, songComposer TEXT, songYear INTEGER)"""
25 |
26 | self.conn = None
27 |
28 | self.connect()
29 | c = self.conn.cursor()
30 | c.execute(__CREATE_TABLE_SONGS)
31 |
32 | def connect(self):
33 | if self.conn is None:
34 | self.conn = sqlite3.connect(self.dbName)
35 | #self.conn.text_factory = lambda x: str(x, "utf-16")
36 |
37 | def insertSong(self, songName, songAlbum, songArtist, songDuration, songCount, songRating, songComposer, songYear):
38 | INSERT_SONG = """INSERT INTO Songs VALUES (?, ?, ?, ?, ?, ?, ?, ?)"""
39 |
40 | c = self.conn.cursor()
41 | c.execute(INSERT_SONG, (songName, songAlbum, songArtist, songDuration, songCount, songRating, songComposer, songYear))
42 | self.conn.commit()
43 |
44 | def selectSongByName(self, songName):
45 | SELECT_BY_NAME = """SELECT * FROM Songs WHERE (songName=?) LIMIT 1"""
46 |
47 | c = self.conn.cursor()
48 | return c.execute(SELECT_BY_NAME, (songName,)).fetchone()
49 |
50 | def selectSongOrderByPlayCount(self, limit=None):
51 | if limit is None:
52 | SELECT_ORDER_PLAY_COUNT = """SELECT * FROM Songs ORDER BY songPlayCount DESC"""
53 | else:
54 | SELECT_ORDER_PLAY_COUNT = "SELECT * FROM Songs ORDER BY songPlayCount DESC LIMIT " + str(limit)
55 |
56 | c = self.conn.cursor()
57 | return c.execute(SELECT_ORDER_PLAY_COUNT)
58 |
59 | def selectSongsHeardBefore(self):
60 | SELECT = """SELECT * FROM Songs WHERE (songPlayCount > 0) ORDER BY songPlayCount DESC"""
61 | c = self.conn.cursor()
62 | return c.execute(SELECT)
63 |
64 | def deleteAllSongs(self):
65 | TRUNCATE_SONGS = """DELETE FROM Songs"""
66 | self.conn.cursor().execute(TRUNCATE_SONGS)
67 |
68 | def close(self):
69 | self.conn.close()
70 | self.conn = None
71 |
72 | if __name__ == "__main__":
73 | manager = GPMDBManager()
74 |
75 | """
76 | Load all songs into db (Use ONLY ONCE)
77 | """
78 |
79 | ANDROID_DEVICE_MAC_ADDRESS = "00:00:00:00:00:00"
80 |
81 | client = Mobileclient()
82 | client.login("abc@gmail.com", "xyz", ANDROID_DEVICE_MAC_ADDRESS)
83 |
84 | print("Getting songs")
85 |
86 | library = client.get_all_songs()
87 | print("Retreived all songs")
88 |
89 | for i, songDict in enumerate(library):
90 | name = songDict["title"]
91 | album = songDict["album"]
92 | artist = songDict["artist"]
93 | duration = songDict["durationMillis"]
94 | playCount = songDict["playCount"]
95 | songRating = songDict["rating"]
96 | songComposer = songDict["composer"]
97 | songYear = songDict["year"]
98 |
99 | manager.insertSong(name, album, artist, duration, playCount, songRating, songComposer, songYear)
100 | print("Inserted Song %d : Name = %s" % ((i+1), name))
101 |
102 |
103 | manager.close()
104 |
--------------------------------------------------------------------------------
/GooglePlayMusic/GPM1/GPM.db:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/titu1994/Python-Work/621d1476d40bc935f28877b5f170e21dcd8da371/GooglePlayMusic/GPM1/GPM.db
--------------------------------------------------------------------------------
/GooglePlayMusic/gpm_serialize.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from gmusicapi import Mobileclient
3 | import json
4 |
5 | client = Mobileclient()
6 | # client.perform_oauth()
7 |
8 | client.oauth_login(Mobileclient.FROM_MAC_ADDRESS)
9 |
10 | songs = client.get_all_songs()
11 |
12 | with open('dataset.json', 'w') as f:
13 | json.dump(songs, f, indent=4)
14 |
15 | print("Serialized song data")
--------------------------------------------------------------------------------
/GooglePlayMusic/process_data.py:
--------------------------------------------------------------------------------
1 | import json
2 | from tqdm import tqdm
3 | from dataclasses import dataclass, field
4 | import numpy as np
5 | from pprint import pprint
6 |
7 | import matplotlib.pyplot as plt
8 |
9 | with open('dataset.json', 'r') as f:
10 | dataset = json.load(f)
11 |
12 |
13 | @dataclass(order=True, repr=False)
14 | class MusicAttributes:
15 | sort_index: int = field(init=False)
16 | title: str
17 | artist: str
18 | composer: str
19 | album: str
20 | albumArtist: str
21 | year: int
22 | durationMillis: int
23 | playCount: int
24 | totalPlaytime: int = field(init=False)
25 |
26 | def __post_init__(self):
27 | self.totalPlaytime = self.durationMillis * self.playCount
28 | self.sort_index = self.totalPlaytime
29 |
30 | def total_playtime(self, return_str=True):
31 | duration = self.durationMillis * self.playCount // 1000
32 | seconds = round(duration) % 60
33 | minutes = round(duration // 60) % 60
34 | hours = round(duration // 60 // 60)
35 |
36 | if return_str:
37 | duration_str = f"Total Playtime ={hours:4d}h:{minutes:1d}m:{seconds:2d}s"
38 | return duration_str
39 | else:
40 | return duration
41 |
42 | def __str__(self):
43 | duration = self.durationMillis / 1000.
44 | seconds = round(duration) % 60
45 | minutes = round(duration // 60)
46 | duration_str = f"{minutes:1d}m:{seconds:2d}s"
47 |
48 | result = f"[ {self.title} ] (Count: {self.playCount}) - Duration = {duration_str} --- " \
49 | f"Artist = '{self.artist}' Album = '{self.album}'"
50 |
51 | return result
52 |
53 |
54 | def parse_record(record: dict) -> MusicAttributes:
55 | attribute = MusicAttributes(
56 | title=record['title'],
57 | artist=record['artist'],
58 | composer=record.get('composer', ''),
59 | album=record['album'],
60 | albumArtist=record['albumArtist'],
61 | year=record.get('year', 0),
62 | durationMillis=int(record['durationMillis']),
63 | playCount=record.get('playCount', 0),
64 | )
65 |
66 | return attribute
67 |
68 |
69 | records = []
70 | for record in tqdm(dataset, total=len(dataset)):
71 | result = parse_record(record)
72 |
73 | if result is not None:
74 | records.append(result)
75 |
76 |
77 | records = sorted(records, reverse=True) # type: list(MusicAttributes)
78 |
79 | # for ix, record in enumerate(records[:50]):
80 | # print(ix + 1, record, "|", record.total_playtime())
81 |
82 | total_durations = [record.total_playtime(return_str=False) for record in records]
83 |
84 | plt.plot(total_durations[:100])
85 | plt.xlabel('Song id')
86 | plt.ylabel('Total playtime in seconds')
87 | plt.show()
88 |
89 |
90 |
--------------------------------------------------------------------------------
/KerasTools/count_metrics.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | from keras import backend as K
3 | from keras.applications.mobilenet import MobileNet
4 |
5 | run_metadata = tf.RunMetadata()
6 |
7 | with tf.Session(graph=tf.Graph()) as sess:
8 | K.set_session(sess)
9 |
10 | model = MobileNet(alpha=1.0, weights=None, input_tensor=tf.placeholder('float32', shape=(1, 224, 224, 3)))
11 |
12 | opt = tf.profiler.ProfileOptionBuilder.float_operation()
13 | flops = tf.profiler.profile(sess.graph, run_meta=run_metadata, cmd='op_name', options=opt)
14 |
15 | opt = tf.profiler.ProfileOptionBuilder.trainable_variables_parameter()
16 | param_count = tf.profiler.profile(sess.graph, run_meta=run_metadata, cmd='op_name', options=opt)
17 |
18 | print('flops:', flops.total_float_ops)
19 | print('param count:', param_count.total_parameters)
20 |
--------------------------------------------------------------------------------
/NeuralNetRegression.py:
--------------------------------------------------------------------------------
1 | from sklearn.datasets import load_boston
2 | import sklearn.metrics as metrics
3 | import keras.layers.core as core
4 | import keras.models as models
5 | from sklearn.linear_model import LinearRegression
6 | from keras.callbacks import EarlyStopping
7 |
8 |
9 | if __name__ == "__main__":
10 |
11 | boston = load_boston()
12 | X = boston.data
13 | y = boston.target
14 | nEpochs = 200
15 |
16 | model = models.Sequential()
17 | model.add(core.Dense(1000, activation="relu", input_shape=(13,))) # 200
18 | model.add(core.Dropout(0.2))
19 | model.add(core.Dense(1000, activation="relu")) # 1000
20 | model.add(core.Dropout(0.2))
21 | model.add(core.Dense(1000, activation="relu")) # 1000
22 | model.add(core.Dropout(0.2))
23 | model.add(core.Dense(75, activation="relu")) # 200
24 | model.add(core.Dense(1))
25 |
26 | model.summary()
27 |
28 | model.compile(loss="diff", optimizer="adam")
29 | #callbacks=[EarlyStopping( patience=10)]
30 | print("NN : Begin Fitting")
31 | model.fit(X, y, nb_epoch=nEpochs, verbose=1, validation_split=0.00, )
32 |
33 | yPreds = model.predict(X)
34 | mse = metrics.mean_squared_error(y, yPreds)
35 |
36 | print("Neural Network : Mean Squared Error : ", mse)
37 |
38 | lr = LinearRegression()
39 | lr.fit(X, y)
40 |
41 | lrYPred = lr.predict(X)
42 | lrmse = metrics.mean_squared_error(y, lrYPred)
43 |
44 | print("Linear Regression : Mean Squared Error : ", lrmse)
45 |
46 | if lrmse >= mse: print("Neural Network >= Linear Regression. Better than standard LR")
47 | else: print("Neural Network < Linear Regression. Worse than standard LR")
48 |
49 | print("yLAST NN : ", yPreds[-1][0], " yLASTLR : ", lrYPred[-1],)
50 | print("NN MSE = ", -(y[-1] - yPreds[-1]), " LR MSE = ", -(y[-1] - lrYPred[-1]))
--------------------------------------------------------------------------------
/NeuralNetworks/ELU.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/titu1994/Python-Work/621d1476d40bc935f28877b5f170e21dcd8da371/NeuralNetworks/ELU.png
--------------------------------------------------------------------------------
/NeuralNetworks/SpiralGenerator.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pandas as pd
3 |
4 | def randUniform(a, b):
5 | return np.random.random_sample() * (b - a) + a
6 |
7 | def generateSpiralDataset(numSamples, noise=0.) -> list:
8 | n = int(numSamples / 2)
9 | points = []
10 |
11 | def spiral(deltaT, label):
12 | for i in range(n):
13 | r = i / n * 5
14 | t = 1.75 * i / n * 2 * np.pi + deltaT
15 | x = r * np.sin(t) + randUniform(-1, 1) * noise
16 | y = r * np.cos(t) + randUniform(-1, 1) * noise
17 | points.append((x, y, label))
18 |
19 | spiral(0, 1) # Positive Samples
20 | spiral(np.pi, -1) # Negative Samples
21 | return np.array(points)
22 |
23 | def generateSpiralDataframe(data:np.array) -> pd.DataFrame:
24 | df = pd.DataFrame({'label': data[:, 2], 'x': data[:, 0], 'y': data[:, 1]})
25 |
26 | df["x2"] = df["x"] ** 2
27 | df["y2"] = df["y"] ** 2
28 | df["xy"] = df["x"] * df["y"]
29 |
30 | df["sinx"] = np.sin(df["x"])
31 | df["siny"] = np.sin(df["y"])
32 |
33 | return df
34 |
35 | if __name__ == "__main__":
36 | import seaborn as sns
37 | sns.set_style("white")
38 |
39 | count = 1000
40 | points = generateSpiralDataset(count, noise=0.3)
41 | values = generateSpiralDataframe(points).values
42 |
43 | sns.plt.scatter(values[:count/2, 1], values[:count/2, 2], c="r")
44 | sns.plt.scatter(values[count/2:, 1], values[count/2:, 2], c="b")
45 | sns.plt.show()
--------------------------------------------------------------------------------
/NeuralNetworks/SpiralNNClassifier.py:
--------------------------------------------------------------------------------
1 | import keras.layers.core as core
2 | import keras.models as models
3 | import keras.callbacks as callbacks
4 | import keras.utils.np_utils as kutils
5 |
6 | from sklearn.cross_validation import train_test_split
7 |
8 | from NeuralNetworks.SpiralGenerator import generateSpiralDataframe, generateSpiralDataset
9 |
10 | if __name__ == "__main__":
11 |
12 | points = generateSpiralDataset(numSamples=100, noise=0.3)
13 | df = generateSpiralDataframe(points)
14 |
15 | train, test = train_test_split(df, train_size=0.7)
16 | #train = train[["label", "x", "y"]]
17 | train = train.values
18 |
19 | #test = test[["label", "x", "y"]]
20 | test = test.values
21 |
22 | trainX = train[:, 1:]
23 | trainY = train[:, 0]
24 | trainY = kutils.to_categorical(trainY)
25 |
26 | testX = test[:, 1:]
27 | testY = test[:, 0]
28 | testY = kutils.to_categorical(testY)
29 |
30 | # Variables
31 | nbFeatures = trainX.shape[1]
32 | nbClasses = trainY.shape[1]
33 |
34 | batchSize = 32
35 | epochs = 100
36 |
37 | model = models.Sequential()
38 |
39 | model.add(core.Dense(8, input_shape=(nbFeatures,), activation="relu"))
40 | model.add(core.Dense(8, activation="relu"))
41 |
42 | model.add(core.Dense(nbClasses, activation="softmax"))
43 |
44 | model.compile(optimizer='adadelta', loss="binary_crossentropy", metrics=["accuracy"])
45 |
46 | model.fit(trainX, trainY, batch_size=batchSize, nb_epoch=epochs, validation_data=(testX, testY))
47 |
48 |
--------------------------------------------------------------------------------
/NotificationTest.py:
--------------------------------------------------------------------------------
1 |
2 | from win32gui import *
3 | import win32con
4 | import sys, os
5 | import time
6 |
7 | class WindowsBalloonTip:
8 | def __init__(self, title, msg):
9 | message_map = {
10 | win32con.WM_DESTROY: self.OnDestroy,
11 | }
12 | # Register the Window class.
13 | wc = WNDCLASS()
14 | hinst = wc.hInstance = GetModuleHandle(None)
15 | wc.lpszClassName = "PythonTaskbar"
16 | wc.lpfnWndProc = message_map # could also specify a wndproc.
17 | classAtom = RegisterClass(wc)
18 | # Create the Window.
19 | style = win32con.WS_OVERLAPPED | win32con.WS_SYSMENU
20 | self.hwnd = CreateWindow( classAtom, "Taskbar", style, \
21 | 0, 0, win32con.CW_USEDEFAULT, win32con.CW_USEDEFAULT, \
22 | 0, 0, hinst, None)
23 | UpdateWindow(self.hwnd)
24 | iconPathName = os.path.abspath(os.path.join( sys.path[0], "balloontip.ico" ))
25 | icon_flags = win32con.LR_LOADFROMFILE | win32con.LR_DEFAULTSIZE
26 | try:
27 | hicon = LoadImage(hinst, iconPathName, \
28 | win32con.IMAGE_ICON, 0, 0, icon_flags)
29 | except:
30 | hicon = LoadIcon(0, win32con.IDI_APPLICATION)
31 | flags = NIF_ICON | NIF_MESSAGE | NIF_TIP
32 | nid = (self.hwnd, 0, flags, win32con.WM_USER+20, hicon, "tooltip")
33 | Shell_NotifyIcon(NIM_ADD, nid)
34 | Shell_NotifyIcon(NIM_MODIFY, \
35 | (self.hwnd, 0, NIF_INFO, win32con.WM_USER+20,\
36 | hicon, "Balloon tooltip",msg,200,title))
37 | # self.show_balloon(title, msg)
38 | time.sleep(10)
39 | DestroyWindow(self.hwnd)
40 | def OnDestroy(self, hwnd, msg, wparam, lparam):
41 | nid = (self.hwnd, 0)
42 | Shell_NotifyIcon(NIM_DELETE, nid)
43 | PostQuitMessage(0) # Terminate the app.
44 |
45 | def balloon_tip(title, msg):
46 | w=WindowsBalloonTip(title, msg)
47 |
48 | if __name__ == "__main__":
49 | balloon_tip("Somshubra", "Hello World")
--------------------------------------------------------------------------------
/Parallel Quick Sort Results/AnalyzeResults.py:
--------------------------------------------------------------------------------
1 | import glob
2 | import os
3 | import json
4 | import seaborn as sns
5 | sns.set_style("whitegrid")
6 |
7 | basepath = r"D:/Users/Yue/PycharmProjects/Python-Work/Parallel Quick Sort Results"
8 |
9 | # Paths
10 | somPath = r"/Som-Data/*.json"
11 | ishaanPath = r"/Ishaan-Data/*.json"
12 | javaPath = r"/Java-Data/*.json"
13 |
14 | def analyze(longpath):
15 | path = basepath + longpath
16 | files = glob.glob(path)
17 |
18 | counter = 0
19 |
20 | y1 = []
21 | y2 = []
22 |
23 | for pth in files:
24 | counter += 1
25 |
26 | with open(pth, "r") as f:
27 | for line in f:
28 | js = json.loads(line)
29 | gainInMs = js["percentageGain"]
30 | if counter % 2 == 1: y1.append(gainInMs)
31 | else: y2.append(gainInMs)
32 |
33 | if counter % 2 == 0:
34 | x = [i for i in range(1, len(y1) + 1)]
35 | name = os.path.basename(pth).split(".")[0]
36 |
37 | plot = sns.plt.plot(x, y1, "b", x, y2, "y")
38 | sns.plt.xlabel("Dataset Number")
39 | sns.plt.ylabel("Percent Gain")
40 |
41 | sns.plt.savefig(name + " Image.png")
42 |
43 | sns.plt.clf()
44 |
45 | y1.clear()
46 | y2.clear()
47 |
48 | def analyze350M():
49 | path = r"D:\Users\Yue\PycharmProjects\Python-Work\Parallel Quick Sort Results\Som-Data\Result 350000000 Type 1.json"
50 |
51 | counter = 0
52 |
53 | y1 = []
54 |
55 | with open(path, "r") as f:
56 | for line in f:
57 | js = json.loads(line)
58 | gainInMs = js["percentageGain"]
59 | y1.append(gainInMs)
60 |
61 | x = [i for i in range(1, len(y1) + 1)]
62 | name = os.path.basename(path).split(".")[0]
63 |
64 | plot = sns.plt.plot(x, y1, "b")
65 | sns.plt.xlabel("Dataset Number")
66 | sns.plt.ylabel("Percent Gain")
67 | sns.plt.ylim([0,400])
68 |
69 | sns.plt.savefig(name + " Image.png")
70 |
71 |
72 | def analyzeJava(longpath):
73 | path = basepath + longpath
74 | files = glob.glob(path)
75 |
76 | counter = 0
77 |
78 | y1 = []
79 | y2 = []
80 |
81 | for pth in files:
82 | counter += 1
83 |
84 | with open(pth, "r") as f:
85 | for line in f:
86 | js = json.loads(line)
87 | gainInMs = js["arraySortTotalTime"]
88 | if counter % 2 == 1: y1.append(gainInMs)
89 | else: y2.append(gainInMs)
90 |
91 | if counter % 2 == 0:
92 | x = [i for i in range(1, len(y1) + 1)]
93 | name = os.path.basename(pth).split(".")[0]
94 |
95 | plot = sns.plt.plot(x, y1, "b", x, y2, "y")
96 | sns.plt.xlabel("Dataset Number")
97 | sns.plt.ylabel("Total Sorting Time (in Milliseconds)")
98 | sns.plt.ylim(0)
99 | sns.plt.savefig(name + " Image.png")
100 |
101 | sns.plt.clf()
102 |
103 | y1.clear()
104 | y2.clear()
105 |
106 |
107 |
108 | if __name__ == "__main__":
109 | #analyze(somPath)
110 | #analyze(ishaanPath)
111 | #analyzeJava(javaPath)
112 | #analyze350M()
113 | pass
--------------------------------------------------------------------------------
/Parallel Quick Sort Results/DotConvert.py:
--------------------------------------------------------------------------------
1 | from subprocess import check_call
2 |
3 | check_call(["dot", "-Tpng", "Results.dot", "-o", "Results.png"])
4 | print("Conversion complete")
--------------------------------------------------------------------------------
/Parallel Quick Sort Results/Ishaan-Data/Result 100000000 Type 1.json:
--------------------------------------------------------------------------------
1 | {"arraySize":100000000,"sortType":1,"parallelQuickSortTotalTime":4884,"arraySortTotalTime":11956,"totalMemoryConsumed":385,"gainInMilliseconds":7072,"percentageGain":144.7993447993448,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
2 | {"arraySize":100000000,"sortType":1,"parallelQuickSortTotalTime":4398,"arraySortTotalTime":11317,"totalMemoryConsumed":382,"gainInMilliseconds":6919,"percentageGain":157.32150977717143,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
3 | {"arraySize":100000000,"sortType":1,"parallelQuickSortTotalTime":4606,"arraySortTotalTime":11279,"totalMemoryConsumed":382,"gainInMilliseconds":6673,"percentageGain":144.87624837168912,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
4 | {"arraySize":100000000,"sortType":1,"parallelQuickSortTotalTime":4414,"arraySortTotalTime":11218,"totalMemoryConsumed":382,"gainInMilliseconds":6804,"percentageGain":154.14589941096511,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
5 | {"arraySize":100000000,"sortType":1,"parallelQuickSortTotalTime":4378,"arraySortTotalTime":11327,"totalMemoryConsumed":382,"gainInMilliseconds":6949,"percentageGain":158.7254454088625,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
6 | {"arraySize":100000000,"sortType":1,"parallelQuickSortTotalTime":4439,"arraySortTotalTime":11233,"totalMemoryConsumed":382,"gainInMilliseconds":6794,"percentageGain":153.05248929939174,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
7 | {"arraySize":100000000,"sortType":1,"parallelQuickSortTotalTime":4378,"arraySortTotalTime":11330,"totalMemoryConsumed":382,"gainInMilliseconds":6952,"percentageGain":158.79396984924622,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
8 | {"arraySize":100000000,"sortType":1,"parallelQuickSortTotalTime":4533,"arraySortTotalTime":11262,"totalMemoryConsumed":382,"gainInMilliseconds":6729,"percentageGain":148.44473858371938,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
9 | {"arraySize":100000000,"sortType":1,"parallelQuickSortTotalTime":4441,"arraySortTotalTime":11193,"totalMemoryConsumed":382,"gainInMilliseconds":6752,"percentageGain":152.03782931772122,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
10 | {"arraySize":100000000,"sortType":1,"parallelQuickSortTotalTime":4386,"arraySortTotalTime":11215,"totalMemoryConsumed":382,"gainInMilliseconds":6829,"percentageGain":155.6999544003648,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
11 |
--------------------------------------------------------------------------------
/Parallel Quick Sort Results/Ishaan-Data/Result 100000000 Type 2.json:
--------------------------------------------------------------------------------
1 | {"arraySize":100000000,"sortType":2,"parallelQuickSortTotalTime":4495,"arraySortTotalTime":-1372,"totalMemoryConsumed":385,"gainInMilliseconds":-5867,"percentageGain":-130.52280311457173,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":true}
2 | {"arraySize":100000000,"sortType":2,"parallelQuickSortTotalTime":4381,"arraySortTotalTime":-1427,"totalMemoryConsumed":382,"gainInMilliseconds":-5808,"percentageGain":-132.5724720383474,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":true}
3 | {"arraySize":100000000,"sortType":2,"parallelQuickSortTotalTime":4333,"arraySortTotalTime":-1346,"totalMemoryConsumed":384,"gainInMilliseconds":-5679,"percentageGain":-131.06392799446112,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":true}
4 | {"arraySize":100000000,"sortType":2,"parallelQuickSortTotalTime":4371,"arraySortTotalTime":-1339,"totalMemoryConsumed":385,"gainInMilliseconds":-5710,"percentageGain":-130.63372226035233,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":true}
5 | {"arraySize":100000000,"sortType":2,"parallelQuickSortTotalTime":4365,"arraySortTotalTime":-1438,"totalMemoryConsumed":388,"gainInMilliseconds":-5803,"percentageGain":-132.9438717067583,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":true}
6 | {"arraySize":100000000,"sortType":2,"parallelQuickSortTotalTime":4280,"arraySortTotalTime":-1342,"totalMemoryConsumed":388,"gainInMilliseconds":-5622,"percentageGain":-131.3551401869159,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":true}
7 | {"arraySize":100000000,"sortType":2,"parallelQuickSortTotalTime":4485,"arraySortTotalTime":-1341,"totalMemoryConsumed":388,"gainInMilliseconds":-5826,"percentageGain":-129.89966555183946,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":true}
8 | {"arraySize":100000000,"sortType":2,"parallelQuickSortTotalTime":4440,"arraySortTotalTime":-1341,"totalMemoryConsumed":388,"gainInMilliseconds":-5781,"percentageGain":-130.2027027027027,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":true}
9 | {"arraySize":100000000,"sortType":2,"parallelQuickSortTotalTime":4571,"arraySortTotalTime":-1340,"totalMemoryConsumed":388,"gainInMilliseconds":-5911,"percentageGain":-129.31524830452855,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":true}
10 | {"arraySize":100000000,"sortType":2,"parallelQuickSortTotalTime":4368,"arraySortTotalTime":-1362,"totalMemoryConsumed":388,"gainInMilliseconds":-5730,"percentageGain":-131.1813186813187,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":true}
11 |
--------------------------------------------------------------------------------
/Parallel Quick Sort Results/Java-Data/Java Result 1000000 Type 1.json:
--------------------------------------------------------------------------------
1 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":147,"totalMemoryConsumed":5}
2 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":111,"totalMemoryConsumed":4}
3 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":81,"totalMemoryConsumed":4}
4 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":81,"totalMemoryConsumed":4}
5 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":80,"totalMemoryConsumed":4}
6 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":81,"totalMemoryConsumed":4}
7 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":80,"totalMemoryConsumed":4}
8 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":81,"totalMemoryConsumed":4}
9 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":81,"totalMemoryConsumed":4}
10 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":80,"totalMemoryConsumed":4}
11 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":80,"totalMemoryConsumed":4}
12 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":81,"totalMemoryConsumed":4}
13 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":80,"totalMemoryConsumed":4}
14 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":81,"totalMemoryConsumed":4}
15 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":80,"totalMemoryConsumed":4}
16 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":81,"totalMemoryConsumed":4}
17 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":80,"totalMemoryConsumed":4}
18 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":81,"totalMemoryConsumed":4}
19 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":79,"totalMemoryConsumed":4}
20 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":80,"totalMemoryConsumed":4}
21 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":80,"totalMemoryConsumed":4}
22 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":80,"totalMemoryConsumed":4}
23 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":83,"totalMemoryConsumed":4}
24 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":81,"totalMemoryConsumed":4}
25 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":81,"totalMemoryConsumed":4}
26 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":80,"totalMemoryConsumed":4}
27 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":83,"totalMemoryConsumed":4}
28 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":82,"totalMemoryConsumed":4}
29 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":80,"totalMemoryConsumed":4}
30 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":80,"totalMemoryConsumed":4}
31 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":81,"totalMemoryConsumed":4}
32 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":81,"totalMemoryConsumed":4}
33 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":80,"totalMemoryConsumed":4}
34 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":82,"totalMemoryConsumed":4}
35 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":80,"totalMemoryConsumed":4}
36 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":79,"totalMemoryConsumed":4}
37 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":82,"totalMemoryConsumed":4}
38 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":79,"totalMemoryConsumed":4}
39 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":80,"totalMemoryConsumed":4}
40 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":81,"totalMemoryConsumed":4}
41 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":82,"totalMemoryConsumed":4}
42 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":82,"totalMemoryConsumed":4}
43 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":80,"totalMemoryConsumed":4}
44 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":81,"totalMemoryConsumed":4}
45 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":81,"totalMemoryConsumed":4}
46 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":80,"totalMemoryConsumed":4}
47 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":82,"totalMemoryConsumed":4}
48 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":80,"totalMemoryConsumed":4}
49 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":81,"totalMemoryConsumed":4}
50 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":83,"totalMemoryConsumed":4}
51 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":81,"totalMemoryConsumed":4}
52 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":80,"totalMemoryConsumed":4}
53 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":78,"totalMemoryConsumed":4}
54 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":80,"totalMemoryConsumed":4}
55 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":81,"totalMemoryConsumed":4}
56 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":81,"totalMemoryConsumed":4}
57 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":80,"totalMemoryConsumed":4}
58 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":80,"totalMemoryConsumed":4}
59 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":80,"totalMemoryConsumed":4}
60 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":80,"totalMemoryConsumed":4}
61 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":81,"totalMemoryConsumed":4}
62 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":80,"totalMemoryConsumed":4}
63 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":81,"totalMemoryConsumed":4}
64 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":83,"totalMemoryConsumed":4}
65 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":81,"totalMemoryConsumed":4}
66 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":84,"totalMemoryConsumed":4}
67 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":85,"totalMemoryConsumed":4}
68 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":81,"totalMemoryConsumed":4}
69 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":82,"totalMemoryConsumed":4}
70 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":84,"totalMemoryConsumed":4}
71 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":84,"totalMemoryConsumed":4}
72 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":81,"totalMemoryConsumed":4}
73 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":81,"totalMemoryConsumed":4}
74 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":108,"totalMemoryConsumed":4}
75 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":103,"totalMemoryConsumed":4}
76 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":97,"totalMemoryConsumed":4}
77 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":92,"totalMemoryConsumed":4}
78 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":101,"totalMemoryConsumed":4}
79 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":97,"totalMemoryConsumed":4}
80 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":95,"totalMemoryConsumed":4}
81 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":82,"totalMemoryConsumed":4}
82 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":83,"totalMemoryConsumed":4}
83 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":88,"totalMemoryConsumed":4}
84 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":81,"totalMemoryConsumed":4}
85 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":88,"totalMemoryConsumed":4}
86 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":81,"totalMemoryConsumed":4}
87 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":85,"totalMemoryConsumed":4}
88 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":85,"totalMemoryConsumed":4}
89 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":81,"totalMemoryConsumed":4}
90 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":80,"totalMemoryConsumed":4}
91 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":80,"totalMemoryConsumed":4}
92 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":84,"totalMemoryConsumed":4}
93 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":84,"totalMemoryConsumed":4}
94 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":80,"totalMemoryConsumed":4}
95 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":80,"totalMemoryConsumed":4}
96 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":84,"totalMemoryConsumed":4}
97 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":102,"totalMemoryConsumed":4}
98 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":94,"totalMemoryConsumed":4}
99 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":82,"totalMemoryConsumed":4}
100 | {"arraySize":1000000,"sortType":1,"arraySortTotalTime":81,"totalMemoryConsumed":4}
101 |
--------------------------------------------------------------------------------
/Parallel Quick Sort Results/Java-Data/Java Result 1000000 Type 2.json:
--------------------------------------------------------------------------------
1 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":329,"totalMemoryConsumed":5}
2 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":88,"totalMemoryConsumed":4}
3 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":38,"totalMemoryConsumed":4}
4 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":38,"totalMemoryConsumed":4}
5 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":39,"totalMemoryConsumed":4}
6 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":37,"totalMemoryConsumed":4}
7 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":37,"totalMemoryConsumed":4}
8 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":37,"totalMemoryConsumed":4}
9 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":36,"totalMemoryConsumed":4}
10 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":37,"totalMemoryConsumed":4}
11 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":38,"totalMemoryConsumed":4}
12 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":38,"totalMemoryConsumed":4}
13 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":37,"totalMemoryConsumed":4}
14 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":37,"totalMemoryConsumed":4}
15 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":38,"totalMemoryConsumed":4}
16 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":36,"totalMemoryConsumed":4}
17 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":37,"totalMemoryConsumed":4}
18 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":40,"totalMemoryConsumed":4}
19 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":37,"totalMemoryConsumed":4}
20 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":38,"totalMemoryConsumed":4}
21 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":43,"totalMemoryConsumed":4}
22 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":38,"totalMemoryConsumed":4}
23 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":40,"totalMemoryConsumed":4}
24 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":46,"totalMemoryConsumed":4}
25 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":38,"totalMemoryConsumed":4}
26 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":37,"totalMemoryConsumed":4}
27 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":38,"totalMemoryConsumed":4}
28 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":37,"totalMemoryConsumed":4}
29 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":36,"totalMemoryConsumed":4}
30 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":38,"totalMemoryConsumed":4}
31 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":38,"totalMemoryConsumed":4}
32 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":36,"totalMemoryConsumed":4}
33 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":37,"totalMemoryConsumed":4}
34 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":43,"totalMemoryConsumed":4}
35 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":36,"totalMemoryConsumed":4}
36 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":40,"totalMemoryConsumed":4}
37 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":40,"totalMemoryConsumed":4}
38 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":36,"totalMemoryConsumed":4}
39 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":39,"totalMemoryConsumed":4}
40 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":37,"totalMemoryConsumed":4}
41 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":37,"totalMemoryConsumed":4}
42 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":37,"totalMemoryConsumed":4}
43 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":36,"totalMemoryConsumed":4}
44 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":37,"totalMemoryConsumed":4}
45 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":36,"totalMemoryConsumed":4}
46 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":37,"totalMemoryConsumed":4}
47 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":39,"totalMemoryConsumed":4}
48 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":37,"totalMemoryConsumed":4}
49 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":36,"totalMemoryConsumed":4}
50 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":35,"totalMemoryConsumed":4}
51 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":37,"totalMemoryConsumed":4}
52 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":36,"totalMemoryConsumed":4}
53 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":36,"totalMemoryConsumed":4}
54 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":38,"totalMemoryConsumed":4}
55 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":36,"totalMemoryConsumed":4}
56 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":38,"totalMemoryConsumed":4}
57 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":37,"totalMemoryConsumed":4}
58 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":36,"totalMemoryConsumed":4}
59 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":36,"totalMemoryConsumed":4}
60 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":36,"totalMemoryConsumed":4}
61 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":38,"totalMemoryConsumed":4}
62 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":37,"totalMemoryConsumed":4}
63 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":38,"totalMemoryConsumed":4}
64 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":38,"totalMemoryConsumed":4}
65 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":36,"totalMemoryConsumed":4}
66 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":36,"totalMemoryConsumed":4}
67 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":40,"totalMemoryConsumed":4}
68 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":36,"totalMemoryConsumed":4}
69 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":39,"totalMemoryConsumed":4}
70 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":37,"totalMemoryConsumed":4}
71 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":37,"totalMemoryConsumed":4}
72 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":37,"totalMemoryConsumed":4}
73 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":37,"totalMemoryConsumed":4}
74 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":37,"totalMemoryConsumed":4}
75 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":37,"totalMemoryConsumed":4}
76 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":37,"totalMemoryConsumed":4}
77 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":39,"totalMemoryConsumed":4}
78 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":37,"totalMemoryConsumed":4}
79 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":38,"totalMemoryConsumed":4}
80 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":38,"totalMemoryConsumed":4}
81 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":37,"totalMemoryConsumed":4}
82 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":38,"totalMemoryConsumed":4}
83 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":36,"totalMemoryConsumed":4}
84 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":38,"totalMemoryConsumed":4}
85 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":38,"totalMemoryConsumed":4}
86 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":36,"totalMemoryConsumed":4}
87 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":38,"totalMemoryConsumed":4}
88 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":36,"totalMemoryConsumed":4}
89 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":39,"totalMemoryConsumed":4}
90 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":36,"totalMemoryConsumed":4}
91 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":36,"totalMemoryConsumed":4}
92 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":37,"totalMemoryConsumed":4}
93 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":40,"totalMemoryConsumed":4}
94 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":36,"totalMemoryConsumed":4}
95 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":36,"totalMemoryConsumed":4}
96 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":36,"totalMemoryConsumed":4}
97 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":37,"totalMemoryConsumed":4}
98 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":36,"totalMemoryConsumed":4}
99 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":37,"totalMemoryConsumed":4}
100 | {"arraySize":1000000,"sortType":2,"arraySortTotalTime":37,"totalMemoryConsumed":4}
101 |
--------------------------------------------------------------------------------
/Parallel Quick Sort Results/Java-Data/Java Result 10000000 Type 1.json:
--------------------------------------------------------------------------------
1 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":971,"totalMemoryConsumed":39}
2 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":979,"totalMemoryConsumed":38}
3 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":932,"totalMemoryConsumed":38}
4 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":934,"totalMemoryConsumed":38}
5 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":925,"totalMemoryConsumed":38}
6 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":925,"totalMemoryConsumed":38}
7 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":924,"totalMemoryConsumed":38}
8 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":926,"totalMemoryConsumed":38}
9 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":926,"totalMemoryConsumed":38}
10 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":957,"totalMemoryConsumed":38}
11 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":935,"totalMemoryConsumed":38}
12 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":910,"totalMemoryConsumed":38}
13 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":932,"totalMemoryConsumed":38}
14 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":933,"totalMemoryConsumed":38}
15 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":926,"totalMemoryConsumed":38}
16 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":938,"totalMemoryConsumed":38}
17 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":929,"totalMemoryConsumed":38}
18 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":938,"totalMemoryConsumed":38}
19 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":925,"totalMemoryConsumed":38}
20 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":951,"totalMemoryConsumed":38}
21 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":947,"totalMemoryConsumed":38}
22 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":928,"totalMemoryConsumed":38}
23 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":919,"totalMemoryConsumed":38}
24 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":936,"totalMemoryConsumed":38}
25 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":930,"totalMemoryConsumed":38}
26 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":931,"totalMemoryConsumed":38}
27 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":934,"totalMemoryConsumed":38}
28 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":925,"totalMemoryConsumed":38}
29 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":972,"totalMemoryConsumed":38}
30 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":988,"totalMemoryConsumed":38}
31 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":928,"totalMemoryConsumed":38}
32 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":934,"totalMemoryConsumed":38}
33 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":927,"totalMemoryConsumed":38}
34 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":918,"totalMemoryConsumed":38}
35 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":920,"totalMemoryConsumed":38}
36 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":926,"totalMemoryConsumed":38}
37 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":935,"totalMemoryConsumed":38}
38 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":926,"totalMemoryConsumed":38}
39 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":935,"totalMemoryConsumed":38}
40 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":920,"totalMemoryConsumed":38}
41 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":929,"totalMemoryConsumed":38}
42 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":914,"totalMemoryConsumed":38}
43 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":932,"totalMemoryConsumed":38}
44 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":943,"totalMemoryConsumed":38}
45 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":927,"totalMemoryConsumed":38}
46 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":935,"totalMemoryConsumed":38}
47 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":927,"totalMemoryConsumed":38}
48 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":928,"totalMemoryConsumed":38}
49 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":923,"totalMemoryConsumed":38}
50 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":935,"totalMemoryConsumed":38}
51 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":928,"totalMemoryConsumed":38}
52 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":935,"totalMemoryConsumed":38}
53 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":930,"totalMemoryConsumed":38}
54 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":925,"totalMemoryConsumed":38}
55 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":925,"totalMemoryConsumed":38}
56 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":1016,"totalMemoryConsumed":38}
57 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":1039,"totalMemoryConsumed":38}
58 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":936,"totalMemoryConsumed":38}
59 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":937,"totalMemoryConsumed":38}
60 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":967,"totalMemoryConsumed":38}
61 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":925,"totalMemoryConsumed":38}
62 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":928,"totalMemoryConsumed":38}
63 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":922,"totalMemoryConsumed":38}
64 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":938,"totalMemoryConsumed":38}
65 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":935,"totalMemoryConsumed":38}
66 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":928,"totalMemoryConsumed":38}
67 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":929,"totalMemoryConsumed":38}
68 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":920,"totalMemoryConsumed":38}
69 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":925,"totalMemoryConsumed":38}
70 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":936,"totalMemoryConsumed":38}
71 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":929,"totalMemoryConsumed":38}
72 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":923,"totalMemoryConsumed":38}
73 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":927,"totalMemoryConsumed":38}
74 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":926,"totalMemoryConsumed":38}
75 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":931,"totalMemoryConsumed":38}
76 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":944,"totalMemoryConsumed":38}
77 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":932,"totalMemoryConsumed":38}
78 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":929,"totalMemoryConsumed":38}
79 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":920,"totalMemoryConsumed":38}
80 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":929,"totalMemoryConsumed":38}
81 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":977,"totalMemoryConsumed":38}
82 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":934,"totalMemoryConsumed":38}
83 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":920,"totalMemoryConsumed":38}
84 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":1019,"totalMemoryConsumed":38}
85 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":927,"totalMemoryConsumed":38}
86 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":934,"totalMemoryConsumed":38}
87 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":930,"totalMemoryConsumed":38}
88 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":933,"totalMemoryConsumed":38}
89 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":929,"totalMemoryConsumed":38}
90 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":923,"totalMemoryConsumed":38}
91 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":925,"totalMemoryConsumed":38}
92 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":926,"totalMemoryConsumed":38}
93 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":929,"totalMemoryConsumed":38}
94 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":928,"totalMemoryConsumed":38}
95 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":946,"totalMemoryConsumed":38}
96 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":928,"totalMemoryConsumed":38}
97 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":931,"totalMemoryConsumed":38}
98 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":926,"totalMemoryConsumed":38}
99 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":926,"totalMemoryConsumed":38}
100 | {"arraySize":10000000,"sortType":1,"arraySortTotalTime":927,"totalMemoryConsumed":38}
101 |
--------------------------------------------------------------------------------
/Parallel Quick Sort Results/Java-Data/Java Result 10000000 Type 2.json:
--------------------------------------------------------------------------------
1 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":651,"totalMemoryConsumed":39}
2 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":469,"totalMemoryConsumed":39}
3 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":440,"totalMemoryConsumed":39}
4 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":448,"totalMemoryConsumed":39}
5 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":442,"totalMemoryConsumed":39}
6 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":441,"totalMemoryConsumed":39}
7 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":436,"totalMemoryConsumed":39}
8 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":441,"totalMemoryConsumed":39}
9 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":441,"totalMemoryConsumed":39}
10 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":437,"totalMemoryConsumed":39}
11 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":449,"totalMemoryConsumed":39}
12 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":428,"totalMemoryConsumed":39}
13 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":436,"totalMemoryConsumed":39}
14 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":434,"totalMemoryConsumed":39}
15 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":451,"totalMemoryConsumed":39}
16 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":441,"totalMemoryConsumed":39}
17 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":429,"totalMemoryConsumed":39}
18 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":439,"totalMemoryConsumed":39}
19 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":425,"totalMemoryConsumed":39}
20 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":427,"totalMemoryConsumed":39}
21 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":443,"totalMemoryConsumed":39}
22 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":429,"totalMemoryConsumed":39}
23 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":440,"totalMemoryConsumed":39}
24 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":433,"totalMemoryConsumed":39}
25 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":433,"totalMemoryConsumed":39}
26 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":443,"totalMemoryConsumed":39}
27 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":424,"totalMemoryConsumed":39}
28 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":434,"totalMemoryConsumed":39}
29 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":446,"totalMemoryConsumed":39}
30 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":442,"totalMemoryConsumed":39}
31 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":435,"totalMemoryConsumed":39}
32 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":427,"totalMemoryConsumed":39}
33 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":447,"totalMemoryConsumed":39}
34 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":448,"totalMemoryConsumed":39}
35 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":437,"totalMemoryConsumed":39}
36 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":436,"totalMemoryConsumed":39}
37 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":428,"totalMemoryConsumed":39}
38 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":430,"totalMemoryConsumed":39}
39 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":438,"totalMemoryConsumed":39}
40 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":449,"totalMemoryConsumed":39}
41 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":435,"totalMemoryConsumed":39}
42 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":444,"totalMemoryConsumed":39}
43 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":433,"totalMemoryConsumed":39}
44 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":430,"totalMemoryConsumed":39}
45 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":434,"totalMemoryConsumed":39}
46 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":426,"totalMemoryConsumed":39}
47 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":441,"totalMemoryConsumed":39}
48 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":447,"totalMemoryConsumed":39}
49 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":438,"totalMemoryConsumed":39}
50 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":423,"totalMemoryConsumed":39}
51 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":445,"totalMemoryConsumed":39}
52 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":433,"totalMemoryConsumed":39}
53 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":431,"totalMemoryConsumed":39}
54 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":447,"totalMemoryConsumed":39}
55 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":449,"totalMemoryConsumed":39}
56 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":431,"totalMemoryConsumed":39}
57 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":441,"totalMemoryConsumed":39}
58 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":447,"totalMemoryConsumed":39}
59 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":433,"totalMemoryConsumed":39}
60 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":441,"totalMemoryConsumed":39}
61 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":427,"totalMemoryConsumed":39}
62 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":442,"totalMemoryConsumed":39}
63 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":419,"totalMemoryConsumed":39}
64 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":448,"totalMemoryConsumed":39}
65 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":437,"totalMemoryConsumed":39}
66 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":442,"totalMemoryConsumed":39}
67 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":434,"totalMemoryConsumed":39}
68 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":438,"totalMemoryConsumed":39}
69 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":440,"totalMemoryConsumed":39}
70 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":442,"totalMemoryConsumed":39}
71 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":438,"totalMemoryConsumed":39}
72 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":506,"totalMemoryConsumed":39}
73 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":536,"totalMemoryConsumed":39}
74 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":516,"totalMemoryConsumed":39}
75 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":519,"totalMemoryConsumed":39}
76 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":463,"totalMemoryConsumed":39}
77 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":466,"totalMemoryConsumed":39}
78 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":450,"totalMemoryConsumed":39}
79 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":450,"totalMemoryConsumed":39}
80 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":445,"totalMemoryConsumed":39}
81 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":434,"totalMemoryConsumed":39}
82 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":438,"totalMemoryConsumed":39}
83 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":430,"totalMemoryConsumed":39}
84 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":439,"totalMemoryConsumed":39}
85 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":421,"totalMemoryConsumed":39}
86 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":434,"totalMemoryConsumed":39}
87 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":458,"totalMemoryConsumed":39}
88 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":436,"totalMemoryConsumed":39}
89 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":433,"totalMemoryConsumed":39}
90 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":456,"totalMemoryConsumed":39}
91 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":442,"totalMemoryConsumed":39}
92 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":444,"totalMemoryConsumed":39}
93 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":444,"totalMemoryConsumed":39}
94 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":429,"totalMemoryConsumed":39}
95 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":423,"totalMemoryConsumed":39}
96 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":449,"totalMemoryConsumed":39}
97 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":438,"totalMemoryConsumed":39}
98 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":441,"totalMemoryConsumed":39}
99 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":437,"totalMemoryConsumed":39}
100 | {"arraySize":10000000,"sortType":2,"arraySortTotalTime":441,"totalMemoryConsumed":39}
101 |
--------------------------------------------------------------------------------
/Parallel Quick Sort Results/Java-Data/Java Result 100000000 Type 1.json:
--------------------------------------------------------------------------------
1 | {"arraySize":100000000,"sortType":1,"arraySortTotalTime":11661,"totalMemoryConsumed":382}
2 | {"arraySize":100000000,"sortType":1,"arraySortTotalTime":11083,"totalMemoryConsumed":382}
3 | {"arraySize":100000000,"sortType":1,"arraySortTotalTime":11097,"totalMemoryConsumed":382}
4 | {"arraySize":100000000,"sortType":1,"arraySortTotalTime":11155,"totalMemoryConsumed":382}
5 | {"arraySize":100000000,"sortType":1,"arraySortTotalTime":11157,"totalMemoryConsumed":382}
6 | {"arraySize":100000000,"sortType":1,"arraySortTotalTime":11284,"totalMemoryConsumed":382}
7 | {"arraySize":100000000,"sortType":1,"arraySortTotalTime":11156,"totalMemoryConsumed":382}
8 | {"arraySize":100000000,"sortType":1,"arraySortTotalTime":11012,"totalMemoryConsumed":382}
9 | {"arraySize":100000000,"sortType":1,"arraySortTotalTime":10952,"totalMemoryConsumed":382}
10 | {"arraySize":100000000,"sortType":1,"arraySortTotalTime":10916,"totalMemoryConsumed":382}
11 |
--------------------------------------------------------------------------------
/Parallel Quick Sort Results/Java-Data/Java Result 100000000 Type 2.json:
--------------------------------------------------------------------------------
1 | {"arraySize":100000000,"sortType":2,"arraySortTotalTime":5685,"totalMemoryConsumed":382}
2 | {"arraySize":100000000,"sortType":2,"arraySortTotalTime":4863,"totalMemoryConsumed":382}
3 | {"arraySize":100000000,"sortType":2,"arraySortTotalTime":4831,"totalMemoryConsumed":382}
4 | {"arraySize":100000000,"sortType":2,"arraySortTotalTime":5018,"totalMemoryConsumed":382}
5 | {"arraySize":100000000,"sortType":2,"arraySortTotalTime":4873,"totalMemoryConsumed":382}
6 | {"arraySize":100000000,"sortType":2,"arraySortTotalTime":4880,"totalMemoryConsumed":382}
7 | {"arraySize":100000000,"sortType":2,"arraySortTotalTime":5034,"totalMemoryConsumed":382}
8 | {"arraySize":100000000,"sortType":2,"arraySortTotalTime":4976,"totalMemoryConsumed":382}
9 | {"arraySize":100000000,"sortType":2,"arraySortTotalTime":4896,"totalMemoryConsumed":382}
10 | {"arraySize":100000000,"sortType":2,"arraySortTotalTime":5329,"totalMemoryConsumed":382}
11 |
--------------------------------------------------------------------------------
/Parallel Quick Sort Results/Java-Result/Java Result 1000000 Type 2 Image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/titu1994/Python-Work/621d1476d40bc935f28877b5f170e21dcd8da371/Parallel Quick Sort Results/Java-Result/Java Result 1000000 Type 2 Image.png
--------------------------------------------------------------------------------
/Parallel Quick Sort Results/Java-Result/Java Result 10000000 Type 2 Image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/titu1994/Python-Work/621d1476d40bc935f28877b5f170e21dcd8da371/Parallel Quick Sort Results/Java-Result/Java Result 10000000 Type 2 Image.png
--------------------------------------------------------------------------------
/Parallel Quick Sort Results/Java-Result/Java Result 100000000 Type 2 Image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/titu1994/Python-Work/621d1476d40bc935f28877b5f170e21dcd8da371/Parallel Quick Sort Results/Java-Result/Java Result 100000000 Type 2 Image.png
--------------------------------------------------------------------------------
/Parallel Quick Sort Results/Java-Result/Java Result 20000000 Type 2 Image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/titu1994/Python-Work/621d1476d40bc935f28877b5f170e21dcd8da371/Parallel Quick Sort Results/Java-Result/Java Result 20000000 Type 2 Image.png
--------------------------------------------------------------------------------
/Parallel Quick Sort Results/Result-Ishaan/Result 1000 Type 2 Image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/titu1994/Python-Work/621d1476d40bc935f28877b5f170e21dcd8da371/Parallel Quick Sort Results/Result-Ishaan/Result 1000 Type 2 Image.png
--------------------------------------------------------------------------------
/Parallel Quick Sort Results/Result-Ishaan/Result 1000000 Type 2 Image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/titu1994/Python-Work/621d1476d40bc935f28877b5f170e21dcd8da371/Parallel Quick Sort Results/Result-Ishaan/Result 1000000 Type 2 Image.png
--------------------------------------------------------------------------------
/Parallel Quick Sort Results/Result-Ishaan/Result 10000000 Type 2 Image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/titu1994/Python-Work/621d1476d40bc935f28877b5f170e21dcd8da371/Parallel Quick Sort Results/Result-Ishaan/Result 10000000 Type 2 Image.png
--------------------------------------------------------------------------------
/Parallel Quick Sort Results/Result-Ishaan/Result 100000000 Type 2 Image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/titu1994/Python-Work/621d1476d40bc935f28877b5f170e21dcd8da371/Parallel Quick Sort Results/Result-Ishaan/Result 100000000 Type 2 Image.png
--------------------------------------------------------------------------------
/Parallel Quick Sort Results/Result-Ishaan/Result 20000000 Type 2 Image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/titu1994/Python-Work/621d1476d40bc935f28877b5f170e21dcd8da371/Parallel Quick Sort Results/Result-Ishaan/Result 20000000 Type 2 Image.png
--------------------------------------------------------------------------------
/Parallel Quick Sort Results/Result-Ishaan/Result 5000000 Type 2 Image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/titu1994/Python-Work/621d1476d40bc935f28877b5f170e21dcd8da371/Parallel Quick Sort Results/Result-Ishaan/Result 5000000 Type 2 Image.png
--------------------------------------------------------------------------------
/Parallel Quick Sort Results/Result-Ishaan/Result 50000000 Type 2 Image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/titu1994/Python-Work/621d1476d40bc935f28877b5f170e21dcd8da371/Parallel Quick Sort Results/Result-Ishaan/Result 50000000 Type 2 Image.png
--------------------------------------------------------------------------------
/Parallel Quick Sort Results/Results-Som/Result 1000 Type 2 Image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/titu1994/Python-Work/621d1476d40bc935f28877b5f170e21dcd8da371/Parallel Quick Sort Results/Results-Som/Result 1000 Type 2 Image.png
--------------------------------------------------------------------------------
/Parallel Quick Sort Results/Results-Som/Result 1000000 Type 2 Image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/titu1994/Python-Work/621d1476d40bc935f28877b5f170e21dcd8da371/Parallel Quick Sort Results/Results-Som/Result 1000000 Type 2 Image.png
--------------------------------------------------------------------------------
/Parallel Quick Sort Results/Results-Som/Result 10000000 Type 2 Image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/titu1994/Python-Work/621d1476d40bc935f28877b5f170e21dcd8da371/Parallel Quick Sort Results/Results-Som/Result 10000000 Type 2 Image.png
--------------------------------------------------------------------------------
/Parallel Quick Sort Results/Results-Som/Result 100000000 Type 2 Image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/titu1994/Python-Work/621d1476d40bc935f28877b5f170e21dcd8da371/Parallel Quick Sort Results/Results-Som/Result 100000000 Type 2 Image.png
--------------------------------------------------------------------------------
/Parallel Quick Sort Results/Results-Som/Result 20000000 Type 2 Image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/titu1994/Python-Work/621d1476d40bc935f28877b5f170e21dcd8da371/Parallel Quick Sort Results/Results-Som/Result 20000000 Type 2 Image.png
--------------------------------------------------------------------------------
/Parallel Quick Sort Results/Results-Som/Result 350000000 Type 1 Image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/titu1994/Python-Work/621d1476d40bc935f28877b5f170e21dcd8da371/Parallel Quick Sort Results/Results-Som/Result 350000000 Type 1 Image.png
--------------------------------------------------------------------------------
/Parallel Quick Sort Results/Results-Som/Result 5000000 Type 2 Image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/titu1994/Python-Work/621d1476d40bc935f28877b5f170e21dcd8da371/Parallel Quick Sort Results/Results-Som/Result 5000000 Type 2 Image.png
--------------------------------------------------------------------------------
/Parallel Quick Sort Results/Results-Som/Result 50000000 Type 2 Image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/titu1994/Python-Work/621d1476d40bc935f28877b5f170e21dcd8da371/Parallel Quick Sort Results/Results-Som/Result 50000000 Type 2 Image.png
--------------------------------------------------------------------------------
/Parallel Quick Sort Results/Results.dot:
--------------------------------------------------------------------------------
1 | digraph J48Tree {
2 | N0 [label="arraysize" ]
3 | N0->N1 [label="<= 100"]
4 | N1 [label="runs" ]
5 | N1->N2 [label="<= 0.806922"]
6 | N2 [label="arraysize" ]
7 | N2->N3 [label="<= 50"]
8 | N3 [label="runs" ]
9 | N3->N4 [label="<= 0.14028"]
10 | N4 [label="Insertion Sort (400.0/1.0)" shape=box style=filled ]
11 | N3->N5 [label="> 0.14028"]
12 | N5 [label="runs" ]
13 | N5->N6 [label="<= 0.25388"]
14 | N6 [label="Shell Sort (100.0/40.0)" shape=box style=filled ]
15 | N5->N7 [label="> 0.25388"]
16 | N7 [label="Merge Sort (100.0/27.0)" shape=box style=filled ]
17 | N2->N8 [label="> 50"]
18 | N8 [label="Insertion Sort (6000.0/27.0)" shape=box style=filled ]
19 | N1->N9 [label="> 0.806922"]
20 | N9 [label="Shell Sort (1100.0/16.0)" shape=box style=filled ]
21 | N0->N10 [label="> 100"]
22 | N10 [label="arraysize" ]
23 | N10->N11 [label="<= 1000"]
24 | N11 [label="runs" ]
25 | N11->N12 [label="<= 0.68816"]
26 | N12 [label="runs" ]
27 | N12->N13 [label="<= 0.22"]
28 | N13 [label="Quick Sort (500.0/254.0)" shape=box style=filled ]
29 | N12->N14 [label="> 0.22"]
30 | N14 [label="Parallel Quick Sort (100.0/27.0)" shape=box style=filled ]
31 | N11->N15 [label="> 0.68816"]
32 | N15 [label="Quick Sort (100.0/17.0)" shape=box style=filled ]
33 | N10->N16 [label="> 1000"]
34 | N16 [label="runs" ]
35 | N16->N17 [label="<= 0.934468"]
36 | N17 [label="arraysize" ]
37 | N17->N18 [label="<= 100000"]
38 | N18 [label="Parallel Merge Sort (900.0/26.0)" shape=box style=filled ]
39 | N17->N19 [label="> 100000"]
40 | N19 [label="runs" ]
41 | N19->N20 [label="<= 0.59"]
42 | N20 [label="runs" ]
43 | N20->N21 [label="<= 0.493"]
44 | N21 [label="Parallel Merge Sort (50.0)" shape=box style=filled ]
45 | N20->N22 [label="> 0.493"]
46 | N22 [label="Parallel Quick Sort (60.0/17.0)" shape=box style=filled ]
47 | N19->N23 [label="> 0.59"]
48 | N23 [label="Parallel Merge Sort (250.0)" shape=box style=filled ]
49 | N16->N24 [label="> 0.934468"]
50 | N24 [label="arraysize" ]
51 | N24->N25 [label="<= 10000"]
52 | N25 [label="Quick Sort (100.0/5.0)" shape=box style=filled ]
53 | N24->N26 [label="> 10000"]
54 | N26 [label="arraysize" ]
55 | N26->N27 [label="<= 500000"]
56 | N27 [label="Parallel Merge Sort (100.0/20.0)" shape=box style=filled ]
57 | N26->N28 [label="> 500000"]
58 | N28 [label="Parallel Quick Sort (10.0)" shape=box style=filled ]
59 | }
60 |
61 |
--------------------------------------------------------------------------------
/Parallel Quick Sort Results/Results.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/titu1994/Python-Work/621d1476d40bc935f28877b5f170e21dcd8da371/Parallel Quick Sort Results/Results.png
--------------------------------------------------------------------------------
/Parallel Quick Sort Results/Som-Data/Result 100000000 Type 1.json:
--------------------------------------------------------------------------------
1 | {"arraySize":100000000,"sortType":1,"parallelQuickSortTotalTime":4274,"arraySortTotalTime":12173,"totalMemoryConsumed":382,"gainInMilliseconds":7899,"percentageGain":184.8151614412728,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
2 | {"arraySize":100000000,"sortType":1,"parallelQuickSortTotalTime":4754,"arraySortTotalTime":11361,"totalMemoryConsumed":382,"gainInMilliseconds":6607,"percentageGain":138.97770298695835,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
3 | {"arraySize":100000000,"sortType":1,"parallelQuickSortTotalTime":4708,"arraySortTotalTime":11619,"totalMemoryConsumed":382,"gainInMilliseconds":6911,"percentageGain":146.7926932880204,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
4 | {"arraySize":100000000,"sortType":1,"parallelQuickSortTotalTime":4751,"arraySortTotalTime":11318,"totalMemoryConsumed":382,"gainInMilliseconds":6567,"percentageGain":138.22353188802356,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
5 | {"arraySize":100000000,"sortType":1,"parallelQuickSortTotalTime":4643,"arraySortTotalTime":11471,"totalMemoryConsumed":382,"gainInMilliseconds":6828,"percentageGain":147.06009045875513,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
6 | {"arraySize":100000000,"sortType":1,"parallelQuickSortTotalTime":4520,"arraySortTotalTime":11244,"totalMemoryConsumed":382,"gainInMilliseconds":6724,"percentageGain":148.76106194690266,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
7 | {"arraySize":100000000,"sortType":1,"parallelQuickSortTotalTime":4610,"arraySortTotalTime":11680,"totalMemoryConsumed":382,"gainInMilliseconds":7070,"percentageGain":153.36225596529286,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
8 | {"arraySize":100000000,"sortType":1,"parallelQuickSortTotalTime":4614,"arraySortTotalTime":11308,"totalMemoryConsumed":382,"gainInMilliseconds":6694,"percentageGain":145.08019072388382,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
9 | {"arraySize":100000000,"sortType":1,"parallelQuickSortTotalTime":4643,"arraySortTotalTime":11379,"totalMemoryConsumed":382,"gainInMilliseconds":6736,"percentageGain":145.0786129657549,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
10 | {"arraySize":100000000,"sortType":1,"parallelQuickSortTotalTime":4691,"arraySortTotalTime":11255,"totalMemoryConsumed":382,"gainInMilliseconds":6564,"percentageGain":139.92752078448092,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
11 |
--------------------------------------------------------------------------------
/Parallel Quick Sort Results/Som-Data/Result 100000000 Type 2.json:
--------------------------------------------------------------------------------
1 | {"arraySize":100000000,"sortType":2,"parallelQuickSortTotalTime":4269,"arraySortTotalTime":5097,"totalMemoryConsumed":382,"gainInMilliseconds":828,"percentageGain":19.395643007730147,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
2 | {"arraySize":100000000,"sortType":2,"parallelQuickSortTotalTime":4146,"arraySortTotalTime":4791,"totalMemoryConsumed":382,"gainInMilliseconds":645,"percentageGain":15.557163531114327,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
3 | {"arraySize":100000000,"sortType":2,"parallelQuickSortTotalTime":4214,"arraySortTotalTime":4864,"totalMemoryConsumed":382,"gainInMilliseconds":650,"percentageGain":15.424774560987187,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
4 | {"arraySize":100000000,"sortType":2,"parallelQuickSortTotalTime":4177,"arraySortTotalTime":4783,"totalMemoryConsumed":382,"gainInMilliseconds":606,"percentageGain":14.508020110126886,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
5 | {"arraySize":100000000,"sortType":2,"parallelQuickSortTotalTime":4096,"arraySortTotalTime":4576,"totalMemoryConsumed":382,"gainInMilliseconds":480,"percentageGain":11.71875,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
6 | {"arraySize":100000000,"sortType":2,"parallelQuickSortTotalTime":5003,"arraySortTotalTime":5131,"totalMemoryConsumed":382,"gainInMilliseconds":128,"percentageGain":2.5584649210473716,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
7 | {"arraySize":100000000,"sortType":2,"parallelQuickSortTotalTime":4136,"arraySortTotalTime":4844,"totalMemoryConsumed":382,"gainInMilliseconds":708,"percentageGain":17.11798839458414,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
8 | {"arraySize":100000000,"sortType":2,"parallelQuickSortTotalTime":4022,"arraySortTotalTime":4833,"totalMemoryConsumed":382,"gainInMilliseconds":811,"percentageGain":20.164097463948284,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
9 | {"arraySize":100000000,"sortType":2,"parallelQuickSortTotalTime":4108,"arraySortTotalTime":4728,"totalMemoryConsumed":382,"gainInMilliseconds":620,"percentageGain":15.092502434274586,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
10 | {"arraySize":100000000,"sortType":2,"parallelQuickSortTotalTime":4241,"arraySortTotalTime":4865,"totalMemoryConsumed":382,"gainInMilliseconds":624,"percentageGain":14.71351096439519,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
11 |
--------------------------------------------------------------------------------
/Parallel Quick Sort Results/Som-Data/Result 200000000 Type 1.json:
--------------------------------------------------------------------------------
1 | {"arraySize":200000000,"sortType":1,"parallelQuickSortTotalTime":8711,"arraySortTotalTime":22135,"totalMemoryConsumed":764,"gainInMilliseconds":13424,"percentageGain":154.10400642865343,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
2 | {"arraySize":200000000,"sortType":1,"parallelQuickSortTotalTime":8614,"arraySortTotalTime":22829,"totalMemoryConsumed":763,"gainInMilliseconds":14215,"percentageGain":165.0220571163223,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
3 | {"arraySize":200000000,"sortType":1,"parallelQuickSortTotalTime":8496,"arraySortTotalTime":21961,"totalMemoryConsumed":763,"gainInMilliseconds":13465,"percentageGain":158.48634651600753,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
4 | {"arraySize":200000000,"sortType":1,"parallelQuickSortTotalTime":8675,"arraySortTotalTime":21873,"totalMemoryConsumed":763,"gainInMilliseconds":13198,"percentageGain":152.13832853025937,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
5 | {"arraySize":200000000,"sortType":1,"parallelQuickSortTotalTime":8365,"arraySortTotalTime":22095,"totalMemoryConsumed":763,"gainInMilliseconds":13730,"percentageGain":164.13628212791394,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
6 |
--------------------------------------------------------------------------------
/Parallel Quick Sort Results/Som-Data/Result 350000000 Type 1.json:
--------------------------------------------------------------------------------
1 | {"arraySize":350000000,"sortType":1,"parallelQuickSortTotalTime":9217,"arraySortTotalTime":40680,"totalMemoryConsumed":1342,"gainInMilliseconds":31463,"percentageGain":341.3583595529999,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
2 | {"arraySize":350000000,"sortType":1,"parallelQuickSortTotalTime":9190,"arraySortTotalTime":41799,"totalMemoryConsumed":2670,"gainInMilliseconds":32609,"percentageGain":354.83133841131666,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
3 | {"arraySize":350000000,"sortType":1,"parallelQuickSortTotalTime":9034,"arraySortTotalTime":43710,"totalMemoryConsumed":1336,"gainInMilliseconds":34676,"percentageGain":383.8388310825769,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
4 | {"arraySize":350000000,"sortType":1,"parallelQuickSortTotalTime":8956,"arraySortTotalTime":40941,"totalMemoryConsumed":1336,"gainInMilliseconds":31985,"percentageGain":357.1348816435909,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
5 | {"arraySize":350000000,"sortType":1,"parallelQuickSortTotalTime":8568,"arraySortTotalTime":40838,"totalMemoryConsumed":1336,"gainInMilliseconds":32270,"percentageGain":376.63398692810455,"ranOutOfMemoryDueToParallelQuickSort":false,"ranOutOfMemoryDueToArraysParallelSort":false}
6 |
--------------------------------------------------------------------------------
/PartialOverlappedInference/edit_distance.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import numba
3 | from typing import List, Tuple
4 |
5 | W_INS = 2
6 | W_INS_NON = 0
7 | W_DEL = 2
8 | W_DEL_NON = 0
9 | W_SUB = 1
10 | W_MATCH = -2
11 |
12 |
13 | def alloc(Di, Di_1):
14 | C = np.zeros([len(Di) + 1, len(Di_1) + 1], dtype=np.int32)
15 | Di = np.asarray(Di)
16 | Di_1 = np.asarray(Di_1)
17 | return C, Di, Di_1
18 |
19 |
20 | @numba.jit(nopython=True)
21 | def row_fill(C):
22 | for j in range(0, C.shape[0]): # 0 to N_i + 1
23 | C[j, 0] = W_DEL_NON * j
24 |
25 |
26 | @numba.jit(nopython=True)
27 | def col_fill(C):
28 | for k in range(1, C.shape[1]): # 1 to N_{i+1} + 1
29 | C[0, k] = W_INS * k
30 |
31 |
32 | @numba.jit(nopython=True)
33 | def e_sub(Di, Di_1, j, k):
34 | # j/k indices are padded by 1, so subtract 1 when indexing seq Di and Di_1
35 | if Di[j - 1] == Di_1[k - 1]:
36 | return W_MATCH
37 | else:
38 | return W_SUB
39 |
40 |
41 | @numba.jit(nopython=True)
42 | def cost(C, Di, Di_1):
43 | # first pass
44 | for j in range(1, C.shape[0]): # 1 to N_i + 1
45 | for k in range(1, C.shape[1]): # 1 to N_{i+1} + 1
46 | if j < (C.shape[0] - 1):
47 | del_cost = C[j - 1, k] + W_DEL
48 | ins_cost = C[j, k - 1] + W_INS
49 | sub_cost = C[j - 1, k - 1] + e_sub(Di, Di_1, j, k)
50 |
51 | C[j, k] = min(del_cost, ins_cost, sub_cost)
52 | else:
53 | del_cost = C[j - 1, k] + W_DEL
54 | ins_cost = C[j, k - 1] + W_INS_NON
55 | sub_cost = C[j - 1, k - 1] + e_sub(Di, Di_1, j, k)
56 |
57 | C[j, k] = min(del_cost, ins_cost, sub_cost)
58 |
59 |
60 | def compute_alignment(Di: str, Di_1: str):
61 | Di = list(Di)
62 | Di_1 = list(Di_1)
63 |
64 | # allocate memory
65 | C, Di, Di_1 = alloc(Di, Di_1)
66 |
67 | # initialize
68 | row_fill(C)
69 | col_fill(C)
70 |
71 | # compute cost
72 | cost(C, Di, Di_1)
73 | return C
74 |
75 |
76 | @numba.jit(nopython=True)
77 | def compute_overlap_path(C) -> (List[Tuple[int]], Tuple[int]):
78 | j, k = C.shape[0] - 1, C.shape[1] - 1
79 | idx = None
80 | path = [(j, k)]
81 |
82 | while j > 0 or k > 0:
83 | if j > 0 and k > 0:
84 | top = C[j - 1, k]
85 | left = C[j, k - 1]
86 | diagonal = C[j - 1, k - 1]
87 |
88 | if diagonal <= top and diagonal <= left:
89 | # overlapped segment, update both idx and path
90 | idx = (j - 1, k - 1)
91 | path.append((j - 1, k - 1))
92 | j = j - 1
93 | k = k - 1
94 |
95 | elif top <= left and top <= diagonal:
96 | # dont update overlap index, just path
97 | path.append((j - 1, k))
98 | j = j - 1
99 |
100 | elif left <= top and left <= diagonal:
101 | # not overlap, but prioritize Di_1 so update idx
102 | idx = (j, k - 1)
103 | path.append((j, k - 1))
104 | k = k - 1
105 |
106 | else:
107 | print("[INVALID STATE DURING ALIGNMENT BACKTRACK; EXITING]")
108 | break
109 |
110 | elif j > 0:
111 | # dont update overlap index, just path
112 | path.append((j - 1, k))
113 | j = j - 1
114 |
115 | else:
116 | # not overlap, but prioritize Di_1 so update path but not idx
117 | path.append((j, k - 1))
118 | k = k - 1
119 |
120 | return path, idx
121 |
122 |
123 | def merge_text(Di, Di_1, overlap_idx):
124 | new_seq = Di[:overlap_idx[0]] + Di_1[overlap_idx[1]:]
125 | return new_seq
126 |
127 |
128 | def print_alignment(C, Di, Di_1):
129 | for k in range(len(Di_1) + 1):
130 | if k == 0:
131 | print(" \t-\t", end='')
132 | else:
133 | print(f"{Di_1[k - 1]}\t", end="")
134 | print()
135 |
136 | for j in range(C.shape[0]): # 0 to N_i + 1
137 | if j == 0:
138 | print("-\t", end='')
139 | else:
140 | print(f"{Di[j - 1]}\t", end="")
141 |
142 | for k in range(C.shape[1]): # 0 to N_{i+1} + 1
143 | print(f"{C[j][k]}\t", end="")
144 | print()
145 | print()
146 |
147 |
148 | if __name__ == '__main__':
149 | Di = "speech recognize"
150 | Di_1 = "cognition"
151 | print("Previous Buffer (Di) :", Di)
152 | print("New Buffer (Di+1) :", Di_1)
153 |
154 | C = compute_alignment(Di, Di_1)
155 |
156 | print()
157 | print("Alignment Matrix :")
158 | print_alignment(C, Di, Di_1)
159 |
160 | path, overlap_idx = compute_overlap_path(C)
161 | path = [str(p) for p in path]
162 |
163 | print("Overlap path : ", " -> ".join(path))
164 | print("Overlap index :", overlap_idx)
165 |
166 | new_sentence = "".join(merge_text(Di, Di_1, overlap_idx))
167 | print()
168 | print("Previous Buffer (Di) :", Di)
169 | print("New Buffer (Di+1) :", Di_1)
170 | print("Merged sequence :", new_sentence)
171 |
--------------------------------------------------------------------------------
/RamanujanMachines/euler.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | import numba
4 | import numpy as np
5 |
6 |
7 | @numba.jit(nopython=True, cache=True)
8 | def e(n: int):
9 | x = 3.0
10 |
11 | if n <= 0:
12 | return x
13 |
14 | result = 0.0
15 | for i in range(n, 0, -1):
16 | num = -i
17 | denom = (i + 3) + result
18 | result = (num / denom)
19 |
20 | x = x + result
21 | return x
22 |
23 |
24 | def abs_diff(n: int):
25 | for ni in range(1, n + 1):
26 | val = e(ni)
27 | print("Value of Euler's constant after %d iterations" % (ni), val)
28 |
29 | diff = np.abs((np.e - val))
30 | print("Absolute difference : ", diff)
31 | print()
32 |
33 |
34 | if __name__ == '__main__':
35 | # iters = 15
36 | # val = e(iters)
37 | # print("Value of Euler's constant after %d iterations" % (iters), val)
38 | #
39 | # diff = np.abs((np.e - val))
40 | # print("Absolute difference : ", diff)
41 |
42 | abs_diff(15)
43 |
44 | # time
45 | num_tests = 10000
46 | t1 = time.time()
47 | for i in range(num_tests):
48 | e(20)
49 | t2 = time.time()
50 |
51 | print("Time for %d runs = " % (num_tests), ((t2 - t1)))
52 | print("Time per run = ", ((t2 - t1) / float(num_tests)))
--------------------------------------------------------------------------------
/RamanujanMachines/pi.py:
--------------------------------------------------------------------------------
1 | import time
2 | import numba
3 | import numpy as np
4 |
5 |
6 | @numba.jit(nopython=True, cache=True)
7 | def pi(n: int):
8 | x = 3.0
9 |
10 | if n <= 0:
11 | return x
12 |
13 | result = 0.0
14 | denom_outer = 5 + (n - 1) * 2
15 |
16 | for i in range(n, 0, -1):
17 | num = i * (i + 2)
18 | denom = denom_outer + result
19 | result = (num / denom)
20 | denom_outer -= 2
21 |
22 | x = x + result # = 4 / (pi - 2)
23 | x = (4. / x) + 2.
24 | return x
25 |
26 |
27 | def abs_diff(n: int):
28 | for ni in range(1, n + 1):
29 | val = pi(ni)
30 | print("Value of Pi after %d iterations" % (ni), val)
31 |
32 | diff = np.abs((np.pi - val))
33 | print("Absolute difference : ", diff)
34 | print()
35 |
36 |
37 | if __name__ == '__main__':
38 | # iters = 15
39 | # val = pi(iters)
40 | # print("Value of Euler's constant after %d iterations" % (iters), val)
41 | # diff = np.abs((np.pi - val))
42 | # print("Absolute difference : ", diff)
43 |
44 | abs_diff(20)
45 |
46 | # time
47 | num_tests = 10000
48 | t1 = time.time()
49 | for i in range(num_tests):
50 | pi(20)
51 | t2 = time.time()
52 |
53 | print("Time for %d runs = " % (num_tests), ((t2 - t1)))
54 | print("Time per run = ", ((t2 - t1) / float(num_tests)))
--------------------------------------------------------------------------------
/SuperFormula/superformula.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | def R(rho, a, b, m, n1, n2, n3):
4 | r = np.abs(np.abs(np.cos(m * rho / 4)) / a) ** n2 + np.abs(np.abs(np.sin(m * rho / 4)) / b) ** n3
5 | r = np.abs(r) ** (-1 / n1)
6 | return r
7 |
8 | def generalized_R(rho, a, b, y, z, n1, n2, n3):
9 | r = np.abs(np.cos(y * rho / 4) / a) ** n2 + np.abs(np.sin(z * rho / 4) / b) ** n3
10 | r = np.abs(r) ** (-1 / n1)
11 | return r
12 |
13 | def xy(rho, a, b, m, n1, n2, n3):
14 | x = R(rho, a, b, m, n1, n2, n3) * np.cos(rho)
15 | y = R(rho, a, b, m, n1, n2, n3) * np.sin(rho)
16 | return x, y
17 |
18 | def xy_general(rho, a, b, y, z, n1, n2, n3):
19 | x = generalized_R(rho, a, b, y, z, n1, n2, n3) * np.cos(rho)
20 | y = generalized_R(rho, a, b, y, z, n1, n2, n3) * np.sin(rho)
21 | return x, y
22 |
23 | def xyz(R1, theta, R2, phi):
24 | x = R1 * np.cos(theta) * R2 * np.cos(phi)
25 | y = R2 * np.sin(theta) * R2 * np.cos(phi)
26 | z = R2 * np.sin(phi)
27 | return x, y, z
28 |
29 | def xyz2(theta, a, b, m, n1, n2, n3, rho, a2, b2, m2, n4, n5, n6):
30 | x = R(theta, a, b, m, n1, n2, n3) * np.cos(theta) * R(rho, a2, b2, m2, n4, n5, n6) * np.cos(rho)
31 | y = R(theta, a, b, m, n1, n2, n3) * np.sin(theta) * R(rho, a2, b2, m2, n4, n5, n6) * np.cos(rho)
32 | z = R(rho, a2, b2, m2, n4, n5, n6) * np.sin(rho)
33 | return x, y, z
34 |
35 | if __name__ == "__main__":
36 | u = np.arange(0, 2 * np.pi, 0.001)
37 |
38 | # Ordinary formula
39 | vals = [xy(ui, 1, 1, 6, 1, 7, 8) for ui in u]
40 | x, y = [], []
41 |
42 | for v in vals:
43 | x.append(v[0])
44 | y.append(v[1])
45 |
46 | import seaborn as sns
47 | import matplotlib.pyplot as plt
48 | sns.set_style("white")
49 |
50 | plt.plot(x, y)
51 | plt.show()
52 |
53 | # Generalized formula
54 | vals = [xy_general(ui, 1, 1, 8, 40, -0.2, 1, 1) for ui in u]
55 | x.clear()
56 | y.clear()
57 |
58 | for v in vals:
59 | x.append(v[0])
60 | y.append(v[1])
61 |
62 | plt.plot(x, y)
63 | plt.show()
64 |
--------------------------------------------------------------------------------
/SuperFormula/superformula_theano.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import theano.tensor as T
3 | from theano import *
4 |
5 | def R(rho, a, b, m, n1, n2, n3):
6 | rho_ = T.vector('rho')
7 | a_ = T.scalar('a')
8 | b_ = T.scalar('b')
9 | m_ = T.scalar('m')
10 | n1_ = T.scalar('n1')
11 | n2_ = T.scalar('n2')
12 | n3_ = T.scalar('n3')
13 |
14 | r = np.abs(np.abs(T.cos(m_ * rho_ / 4)) / a_) ** n2_ + np.abs(np.abs(T.sin(m_ * rho_ / 4)) / b_) ** n3_
15 | r = np.abs(r) ** (-1 / n1_)
16 | func = function([rho_, a_, b_, m_, n1_, n2_, n3_], [r], allow_input_downcast=True)
17 | return func(rho, a, b, m, n1, n2, n3)
18 |
19 | def generalized_R(rho, a, b, y, z, n1, n2, n3):
20 | rho_ = T.scalar('rho')
21 | a_ = T.scalar('a')
22 | b_ = T.scalar('b')
23 | y_ = T.scalar('y')
24 | z_ = T.scalar('z')
25 | n1_ = T.scalar('n1')
26 | n2_ = T.scalar('n2')
27 | n3_ = T.scalar('n3')
28 |
29 | r = np.abs(T.cos(y_ * rho_ / 4) / a_) ** n2_ + np.abs(T.sin(z_ * rho_ / 4) / b_) ** n3_
30 | r = np.abs(r) ** (-1 / n1_)
31 | func = function([rho_, a_, b_, y_, z_, n1_, n2_, n3_], [r], allow_input_downcast=True)
32 | return func(rho, a, b, y, z, n1, n2, n3)
33 |
34 | def xy(rho, a, b, m, n1, n2, n3):
35 | rho_ = T.vector('rho')
36 | r = R(rho, a, b, m, n1, n2, n3)
37 |
38 | x = r * T.cos(rho_)
39 | y = r * T.sin(rho_)
40 | func = function([rho_], [x, y], allow_input_downcast=True)
41 | vals = func(rho)
42 | return vals[0].flatten(), vals[1].flatten()
43 |
44 | def xy_general(rho, a, b, y, z, n1, n2, n3):
45 | rho_ = T.vector('rho')
46 | r = generalized_R(rho, a, b, y, z, n1, n2, n3)
47 |
48 | x = r * T.cos(rho_)
49 | y = r * T.sin(rho_)
50 | func = function([rho_], [x, y], allow_input_downcast=True)
51 | vals = func(rho)
52 | return vals[0].flatten(), vals[1].flatten()
53 |
54 | def xyz(R1, theta, R2, phi):
55 | theta_ = T.scalar('theta')
56 | phi_ = T.scalar('phi')
57 |
58 | x = R1 * T.cos(theta_) * R2 * T.cos(phi_)
59 | y = R2 * T.sin(theta_) * R2 * T.cos(phi_)
60 | z = R2 * T.sin(phi_)
61 | func = function([theta_, phi_], [x, y, z], allow_input_downcast=True)
62 | vals = func(theta, phi)
63 | return vals[0].flatten(), vals[1].flatten(), vals[2].flatten()
64 |
65 | def xyz2(theta, a, b, m, n1, n2, n3, rho, a2, b2, m2, n4, n5, n6):
66 | theta_ = T.scalar('theta')
67 | rho_ = T.scalar('rho')
68 | R1 = R(theta, a, b, m, n1, n2, n3)
69 | R2 = R(rho, a2, b2, m2, n4, n5, n6)
70 |
71 | x = R1 * T.cos(theta_) * R2 * T.cos(rho_)
72 | y = R1 * T.sin(theta_) * R2 * T.cos(rho_)
73 | z = R2 * T.sin(rho_)
74 | func = function([theta_, rho_], [x, y, z], allow_input_downcast=True)
75 | vals = func(theta, rho)
76 | return vals[0].flatten(), vals[1].flatten(), vals[2].flatten()
77 |
78 | if __name__ == "__main__":
79 | u = np.arange(0, 2 * np.pi, 0.001)
80 |
81 | # Ordinary formula
82 | vals = xy(u, 1, 1, 6, 1, 7, 8)
83 | x = vals[0]
84 | y = vals[1]
85 |
86 | import seaborn as sns
87 | sns.set_style("white")
88 |
89 | sns.plt.plot(x, y)
90 | sns.plt.show()
--------------------------------------------------------------------------------
/TensorflowLearn/approximate_solution.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import tensorflow as tf
3 | # tf.set_random_seed(0)
4 |
5 | #x = tf.Variable(tf.random_uniform((1,)), dtype=tf.float32)
6 | x = tf.placeholder(tf.float32, shape=[None])
7 | y = tf.placeholder(tf.float32, shape=[None])
8 |
9 | a = tf.Variable(1, dtype=tf.float32)
10 | b = tf.Variable(1, dtype=tf.float32)
11 |
12 | y_pred = a * x
13 |
14 | #loss = y # to minimize the function itself
15 | loss = tf.nn.l2_loss(y - y_pred) # to minimize a function with variables
16 | # loss += -(1 - x * x)
17 | # loss += -(1 - a * a)
18 | # loss += -(1 - b * b)
19 |
20 | global_step = tf.Variable(0, trainable=False)
21 | lr = tf.train.exponential_decay(0.1, global_step, decay_steps=500, decay_rate=0.95, staircase=True)
22 |
23 | opt = tf.train.RMSPropOptimizer(lr)
24 | train_op = opt.minimize(loss) # minimize
25 | #train_op = opt.minimize(-loss) # maximize
26 |
27 | sess = tf.Session()
28 | sess.run(tf.global_variables_initializer())
29 |
30 | X = []
31 | Y = []
32 |
33 | for j in range(-25, 35):
34 | X.append(j)
35 | Y.append(j * (9. / 5.) + 32.)
36 |
37 | print(X[-1], Y[-1], 1.85 * j + 1.85 * 18)
38 |
39 | print()
40 |
41 | X = np.array(X, dtype='float32')
42 | Y = np.array(Y, dtype='float32')
43 |
44 | for i in range(10000):
45 | _, loss_val, val_a, val_b = sess.run([train_op, loss, a, b],
46 | feed_dict={
47 | x: X,
48 | y: Y,
49 | })
50 |
51 | if i % 50 == 0:
52 | print(i, "loss : ", loss_val, "A : ", val_a, "B : ", val_b)
--------------------------------------------------------------------------------
/TensorflowLearn/eager_constrained_optimization.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import tensorflow as tf
3 | from tensorflow.contrib.eager.python import tfe
4 |
5 | tf.enable_eager_execution()
6 | tf.set_random_seed(0)
7 |
8 | device = '/gpu:0' if tfe.num_gpus() > 0 else '/cpu:0'
9 |
10 | # variables
11 | x = tf.get_variable('x', dtype=tf.float32, initializer=1.0)
12 | y = tf.get_variable('y', dtype=tf.float32, initializer=1.0)
13 |
14 | # function to optimize
15 | def f(x, y):
16 | return x + y
17 |
18 | # Constraint
19 | # Solution must => x^2 + y^2 = 1
20 |
21 | lambd = tf.get_variable('lambda', dtype=tf.float32, initializer=1.0,
22 | constraint=lambda x: tf.clip_by_value(x, 0., np.infty))
23 |
24 | def constraint(x, y):
25 | return (x * x + y * y - 1)
26 |
27 |
28 | def L(x, y, l):
29 | return -f(x, y) + l * constraint(x, y)
30 |
31 | optimizer = tf.train.GradientDescentOptimizer(0.05)
32 |
33 | for i in range(1000):
34 | #loss_val, grad_vars = gradients(x, y, lambd_x)
35 | #optimizer.apply_gradients(grad_vars, tf.train.get_or_create_global_step())
36 |
37 | optimizer.minimize(lambda: L(x, y, lambd), tf.train.get_or_create_global_step())
38 |
39 | #lambd_x = tf.clip_by_value(lambd_x, 0., np.inf)
40 | print("L", lambd.numpy())
41 |
42 | if i % 1 == 0:
43 | print("X", x.numpy(), "Y", y.numpy(), "norm", (x ** 2 + y ** 2).numpy())
44 |
45 | loss_val = L(x, y, lambd)
46 | print("Iteration %d : Loss %0.4f, function value : %0.4f" % (i + 1, loss_val.numpy(), f(x, y).numpy()))
47 | print()
48 |
49 |
50 | print("X", x.numpy(), "Y", y.numpy(), "norm", (x ** 2 + y ** 2).numpy())
--------------------------------------------------------------------------------
/TensorflowLearn/eager_lstm.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import tensorflow as tf
3 | from tensorflow.contrib.eager.python import tfe
4 | tf.enable_eager_execution()
5 |
6 | class LSTMModel(tf.keras.Model):
7 |
8 | def __init__(self, units=20, **kwargs):
9 | super().__init__(**kwargs)
10 | self.units = units
11 | self.kernel = tf.keras.layers.Dense(4 * units, use_bias=False)
12 | self.recurrent_kernel = tf.keras.layers.Dense(4 * units, kernel_initializer='orthogonal')
13 |
14 | def call(self, inputs, training=None, mask=None):
15 | outputs = []
16 | states = []
17 | h_state = tf.zeros((inputs.shape[0], self.units))
18 | c_state = tf.zeros((inputs.shape[0], self.units))
19 |
20 | for t in range(inputs.shape[1]):
21 | ip = inputs[:, t, :]
22 | z = self.kernel(ip)
23 | z += self.recurrent_kernel(h_state)
24 |
25 | z0 = z[:, :self.units]
26 | z1 = z[:, self.units: 2 * self.units]
27 | z2 = z[:, 2 * self.units: 3 * self.units]
28 | z3 = z[:, 3 * self.units:]
29 |
30 | # gate updates
31 | i = tf.nn.sigmoid(z0)
32 | f = tf.nn.sigmoid(z1)
33 | o = tf.nn.sigmoid(z3)
34 |
35 | # state updates
36 | c = f * c_state + i * tf.nn.tanh(z2)
37 | h = o * tf.nn.tanh(c)
38 |
39 | h_state = h
40 | c_state = c
41 |
42 | outputs.append(h)
43 | states.append([h, c])
44 |
45 | self.states = states
46 |
47 | return tf.stack(outputs, axis=1)
48 |
49 | units = 20
50 | model = LSTMModel(units)
51 |
52 | X = np.linspace(0.0, 1.0, num=100) + np.random.normal(0, 0.05, size=(100, 10, 1))
53 | Y = np.linspace(-0.1, 1.1, num=100).reshape(-1, 1, 1) + np.random.normal(0, 0.045, size=(100, 10, units))
54 |
55 | optimizer = tf.train.AdamOptimizer(1e-3)
56 | model.compile(optimizer=optimizer, loss='diff')
57 |
58 | model.fit(X, Y, batch_size=20, epochs=200, validation_split=0.1)
59 |
60 |
--------------------------------------------------------------------------------
/TensorflowLearn/eager_simple_optimization.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | from tensorflow.contrib.eager.python import tfe
3 | tf.enable_eager_execution()
4 |
5 | x = tfe.Variable(initial_value=tf.random_uniform([1], -1., 1.), name='x')
6 |
7 | def loss(input):
8 | return tf.sigmoid(input)
9 |
10 | grad_vars = tfe.implicit_gradients(loss)
11 | opt = tf.train.GradientDescentOptimizer(learning_rate=1)
12 |
13 | for i in range(1000):
14 | for j in range(50):
15 | opt.apply_gradients(grad_vars(x))
16 |
17 | if i % 50 == 0:
18 | loss_val = loss(x)
19 | print(i, "Optimal Value : ", loss_val.numpy(), "Val (X) : ", x.numpy())
--------------------------------------------------------------------------------
/TensorflowLearn/function_optimization.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import tensorflow as tf
3 | # tf.set_random_seed(0)
4 |
5 | x = tf.Variable(tf.random_uniform((1,)), dtype=tf.float32)
6 | a = tf.Variable(tf.random_uniform((1,)), dtype=tf.float32)
7 | b = tf.Variable(tf.random_uniform((1,)), dtype=tf.float32)
8 |
9 | y = tf.exp(a * x) + tf.exp(b * x)
10 |
11 | #loss = y # to minimize the function itself
12 | loss = tf.nn.l2_loss(y) # to minimize a function with variables
13 | # loss += -(1 - x * x)
14 | # loss += -(1 - a * a)
15 | # loss += -(1 - b * b)
16 |
17 | global_step = tf.Variable(0, trainable=False)
18 | lr = tf.train.exponential_decay(0.1, global_step, decay_steps=500, decay_rate=0.95, staircase=True)
19 |
20 | opt = tf.train.RMSPropOptimizer(lr)
21 | train_op = opt.minimize(loss) # minimize
22 | #train_op = opt.minimize(-loss) # maximize
23 |
24 | sess = tf.Session()
25 | sess.run(tf.global_variables_initializer())
26 |
27 | for i in range(10000):
28 | for j in range(50):
29 | _, loss, val_x, val_a, val_b = sess.run([train_op, y, x, a, b])
30 |
31 | if i % 50 == 0:
32 | print(i, "Y : ", loss, "X : ", val_x, "A : ", val_a, "B : ", val_b)
--------------------------------------------------------------------------------
/TensorflowLearn/logistic_reg.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | np.random.seed(1)
4 | from sklearn.model_selection import train_test_split
5 | from scipy.signal import sawtooth
6 | import matplotlib.pyplot as plt
7 |
8 | import tensorflow as tf
9 |
10 | nb_samples = 640
11 | nb_timesteps = 512
12 | nb_epochs = 1000
13 | reg_lambda = 0.5
14 |
15 |
16 | def sin_wave():
17 | nb = np.random.randint(1, 100, size=1)[0]
18 | shift = np.random.randint(0, 91, size=1)[0]
19 |
20 | x = np.arange(-nb * np.pi, nb * np.pi, step=(2 * nb * np.pi / nb_timesteps))
21 | y = np.sin(x + (shift / 180.))
22 |
23 | noise = np.random.uniform(-0.1, 0.1, size=len(x))
24 | y += noise
25 |
26 | return y
27 |
28 |
29 | def triangle_wave():
30 | nb = np.random.randint(1, 100, size=1)[0]
31 | shift = np.random.randint(0, 91, size=1)[0]
32 |
33 | x = np.arange(-nb * np.pi, nb * np.pi, step=(2 * nb * np.pi / nb_timesteps))
34 | y = sawtooth(x + (shift / 180.), width=0.5)
35 |
36 | noise = np.random.uniform(-0.1, 0.1, size=len(x))
37 | y += noise
38 |
39 | return y
40 |
41 |
42 | # x, y = sin_wave()
43 | # x, y = triangle_wave()
44 |
45 | X = np.zeros((nb_samples, nb_timesteps), dtype='float32')
46 | y = np.zeros((nb_samples, 2), dtype='float32')
47 |
48 | for i in range(nb_samples // 2):
49 | X[i] = sin_wave()
50 | y[i, 0] = 1.
51 |
52 | for i in range(nb_samples // 2):
53 | X[i + (nb_samples // 2)] = triangle_wave()
54 | y[i + (nb_samples // 2), 1] = 1.
55 |
56 | # for i in range(3):
57 | # idx = np.random.randint(0, 512, size=1)[0]
58 | # plt.plot(X[idx])
59 | # plt.show()
60 |
61 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0, shuffle=True)
62 |
63 | X = tf.placeholder(tf.float32, shape=(None, nb_timesteps), name='input')
64 | Y = tf.placeholder(tf.float32, shape=(None, 2), name='label')
65 |
66 | W = tf.Variable(np.random.normal(0.0, 1.0, size=(nb_timesteps, 2)), name='weights', dtype=tf.float32)
67 | b = tf.Variable(np.zeros((2,), dtype=np.float32))
68 |
69 | y_pred = tf.matmul(X, W) + b
70 |
71 | accuracy = tf.reduce_mean(tf.cast(tf.equal(
72 | tf.argmax(Y, axis=-1),
73 | tf.argmax(tf.nn.softmax(y_pred), axis=-1)), tf.float32))
74 |
75 | loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=y_pred, name='loss'))
76 | loss += reg_lambda * tf.nn.l2_loss(W)
77 |
78 | optimizer = tf.train.GradientDescentOptimizer(0.01)
79 | train_op = optimizer.minimize(loss)
80 |
81 | with tf.Session() as sess:
82 | sess.run(tf.global_variables_initializer())
83 |
84 | train_accs = []
85 | test_accs = []
86 |
87 | for epoch in range(nb_epochs):
88 | feed_dict = {
89 | X: X_train,
90 | Y: y_train,
91 | }
92 |
93 | _, acc, l = sess.run([train_op, accuracy, loss],
94 | feed_dict=feed_dict)
95 |
96 | test_acc = sess.run(accuracy,
97 | feed_dict={
98 | X: X_test,
99 | Y: y_test,
100 | })
101 |
102 | train_accs.append(acc)
103 | test_accs.append(test_acc)
104 |
105 | if epoch % 50 == 0:
106 | print("%d: Train accuracy : " % (epoch), acc, "Loss : ", l)
107 | print("%d: Test accuracy : " % (epoch), test_acc)
108 | print()
109 |
110 | acc = sess.run(accuracy,
111 | feed_dict={
112 | X: X_test,
113 | Y: y_test,
114 | })
115 |
116 | print()
117 | print("%d: Final Accuracy: " % nb_epochs, acc, "Best test score : ", max(test_accs))
118 |
119 | plt.plot(train_accs, label='train')
120 | plt.plot(test_accs, label='test')
121 | plt.legend()
122 | plt.show()
123 |
--------------------------------------------------------------------------------
/TensorflowLearn/mendelbrot_eager.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | from tensorflow.contrib.eager.python import tfe
3 | import numpy as np
4 |
5 | from PIL import Image
6 | from io import BytesIO
7 | import matplotlib.pyplot as plt
8 |
9 | tf.enable_eager_execution()
10 |
11 |
12 | def DisplayFractal(a):
13 | """Display an array of iteration counts as a
14 | colorful picture of a fractal."""
15 | a_cyclic = (6.28 * a / 20.0).reshape(list(a.shape) + [1])
16 | img = np.concatenate([10 + 20 * np.cos(a_cyclic),
17 | 30 + 50 * np.sin(a_cyclic),
18 | 155 - 80 * np.cos(a_cyclic)], 2)
19 | img[a == a.max()] = 0
20 | a = img
21 | a = np.uint8(np.clip(a, 0, 255))
22 |
23 | print(a.shape)
24 | plt.figure(dpi=300, figsize=(20, 20))
25 | plt.subplots_adjust(left=0.0, right=1.0, bottom=0.0, top=1.0)
26 | plt.imshow(a)
27 | #plt.show()
28 | plt.savefig('temp.png')
29 |
30 |
31 | Y, X = np.mgrid[-1.3:1.3:0.001, -2:1:0.001]
32 | Z = X + 1j * Y
33 |
34 | num_gpus = tfe.num_gpus()
35 |
36 | if num_gpus > 0:
37 | with tf.device('gpu:0'):
38 | xs = tf.constant(Z.astype(np.complex64))
39 | zs = tfe.Variable(xs)
40 | ns = tfe.Variable(tf.zeros_like(xs, tf.float32))
41 | else:
42 | with tf.device('/cpu:0'):
43 | xs = tf.constant(Z.astype(np.complex64))
44 | zs = tfe.Variable(xs)
45 | ns = tfe.Variable(tf.zeros_like(xs, tf.float32))
46 |
47 | # Operation to update the zs and the iteration count.
48 | #
49 | # Note: We keep computing zs after they diverge! This
50 | # is very wasteful! There are better, if a little
51 | # less simple, ways to do this.
52 |
53 | def compute(zs, ns):
54 | for i in range(1000):
55 | # Compute the new values of z: z^2 + x
56 | zs_ = zs * zs + xs
57 |
58 | # Have we diverged with this new value?
59 | not_diverged = tf.abs(zs_) < 4
60 |
61 | zs = zs_
62 | ns = ns + tf.cast(not_diverged, tf.float32)
63 |
64 | return zs, ns
65 |
66 | if num_gpus > 0:
67 | with tf.device('gpu:0'):
68 | zs, ns = compute(zs, ns)
69 | else:
70 | with tf.device('/cpu:0'):
71 | zs, ns = compute(zs, ns)
72 |
73 | DisplayFractal(ns.numpy())
--------------------------------------------------------------------------------
/TensorflowLearn/numpy_sgd.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | np.random.seed(0)
3 |
4 | x = np.random.uniform(-1.0, 1.0)
5 |
6 | def f(x):
7 | return np.exp(-np.logaddexp(0, -x)) + np.tanh(x)
8 |
9 | def df_dx(x):
10 | z1 = np.exp(-np.logaddexp(0, -x))
11 | z2 = np.tanh(x)
12 | return z1 * (1 - z1) + (1 - z2 ** 2)
13 |
14 | learning_rate = 1
15 | beta1 = 0.9
16 | beta2 = 0.999
17 | epsilon = 1e-8
18 | iter = 1
19 |
20 | M = 0.
21 | R = 0.
22 | R_hat = 0.
23 |
24 | for i in range(10000):
25 | for j in range(50):
26 | dx = df_dx(x)
27 |
28 | M = beta1 * M + (1 - beta1) * dx
29 | R = beta2 * R + (1 - beta2) * dx ** 2
30 |
31 | R_hat = np.maximum(R_hat, R)
32 |
33 | lr = learning_rate / (np.sqrt(R_hat + epsilon))
34 |
35 | x -= lr * (M)
36 |
37 | R = R_hat
38 | iter += 1
39 | #x -= learning_rate * df_dx(x)
40 |
41 | if i % 50 == 0:
42 | print(i, "Optimal Value : ", f(x), "Val (X) : ", x)
--------------------------------------------------------------------------------
/TensorflowLearn/optimization_profit.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | tf.enable_eager_execution()
3 |
4 | # Available Flour = 30
5 | # Available Eggs = 40
6 |
7 | # Pasta recipe => 1 Pasta = 2.5 * Flour + 5 * Eggs
8 | # Bread recipe => 1 Bread = 3.5 * Flour + 2.5 * Eggs
9 |
10 | # Pasta Sale Price = 3
11 | # Bread Sale Price = 2.5
12 |
13 | # Constraints
14 | # 2.5 Pasta + 3.0 Bread <= 30 # Flour
15 | # 5.0 Pasta + 2.5 Bread <= 40 # Eggs
16 | # Bread >= 0
17 | # Pasta >= 0
18 |
19 | # Maximize : 3 * Pasta + 2.5 * Bread
20 |
21 | # Use non neg constraint to force last 2 constrains
22 | pasta_t = tf.get_variable('pasta', initializer=0., constraint=tf.keras.constraints.non_neg())
23 | bread_t = tf.get_variable('breads', initializer=0., constraint=tf.keras.constraints.non_neg())
24 |
25 |
26 | # Flour cost (per pizza and per bread)
27 | def flour():
28 | res = 2.5 * pasta_t + 3.5 * bread_t
29 | return res
30 |
31 | # Eggs cost (per pizza and per bread)
32 | def eggs():
33 | res = 5.0 * pasta_t + 2.5 * bread_t
34 | return res
35 |
36 | # Profit per pizza and bread
37 | def profit():
38 | return 3.0 * pasta_t + 2.5 * bread_t
39 |
40 | # Additional constraints on available flour and eggs
41 | # Can substitute square instead of abs for smoother fit
42 | def constraint():
43 | return tf.square(29.5 - flour()) + \
44 | tf.square(39.5 - eggs())
45 |
46 | # Objective function - to be minimized (minimize constraints, maximize profits)
47 | def objective():
48 | return constraint() - profit()
49 |
50 |
51 | optimizer = tf.train.GradientDescentOptimizer(0.01)
52 |
53 | for i in range(200):
54 | with tf.GradientTape() as tape:
55 | loss = objective()
56 | gradients = tape.gradient(loss, [pasta_t, bread_t])
57 |
58 | grad_vars = zip(gradients, [pasta_t, bread_t])
59 | optimizer.apply_gradients(grad_vars, global_step=tf.train.get_or_create_global_step())
60 |
61 | print("Objective : ", objective().numpy())
62 | print("Profit : ", profit().numpy())
63 | print()
64 |
65 | p = pasta_t.numpy()
66 | b = bread_t.numpy()
67 | print("Pasta : ", p, "Bread : ", b)
68 | print("Flour : ", flour().numpy(), "Eggs : ", eggs().numpy())
69 |
--------------------------------------------------------------------------------
/TensorflowLearn/plot_distributions.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 |
3 | k = tf.placeholder(tf.float32)
4 |
5 | # Make a normal distribution, with a shifting mean
6 | mean_moving_normal = tf.random_normal(shape=[1000], mean=(5*k), stddev=1)
7 | # Record that distribution into a histogram summary
8 | tf.summary.histogram("normal/moving_mean", mean_moving_normal)
9 |
10 | # Make a normal distribution with shrinking variance
11 | variance_shrinking_normal = tf.random_normal(shape=[1000], mean=0, stddev=1-(k))
12 | # Record that distribution too
13 | tf.summary.histogram("normal/shrinking_variance", variance_shrinking_normal)
14 |
15 | # Let's combine both of those distributions into one dataset
16 | normal_combined = tf.concat([mean_moving_normal, variance_shrinking_normal], 0)
17 | # We _var_add another histogram summary to record the combined distribution
18 | tf.summary.histogram("normal/bimodal", normal_combined)
19 |
20 | # Add a gamma distribution
21 | gamma = tf.random_gamma(shape=[1000], alpha=k)
22 | tf.summary.histogram("gamma", gamma)
23 |
24 | # And a poisson distribution
25 | poisson = tf.random_poisson(shape=[1000], lam=k)
26 | tf.summary.histogram("poisson", poisson)
27 |
28 | # And a uniform distribution
29 | uniform = tf.random_uniform(shape=[1000], maxval=k*10)
30 | tf.summary.histogram("uniform", uniform)
31 |
32 | # Finally, combine everything together!
33 | all_distributions = [mean_moving_normal, variance_shrinking_normal,
34 | gamma, poisson, uniform]
35 | all_combined = tf.concat(all_distributions, 0)
36 | tf.summary.histogram("all_combined", all_combined)
37 |
38 | summaries = tf.summary.merge_all()
39 |
40 | # Setup a session and summary writer
41 | sess = tf.Session()
42 | writer = tf.summary.FileWriter("tmp")
43 |
44 | # Setup a loop and write the summaries to disk
45 | N = 400
46 | for step in range(N):
47 | k_val = step/float(N)
48 | summ = sess.run(summaries, feed_dict={k: k_val})
49 | writer.add_summary(summ, global_step=step)
50 |
--------------------------------------------------------------------------------
/TensorflowLearn/simple_optimization.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import tensorflow as tf
3 | from tensorflow.contrib.opt.python.training.sign_decay import get_cosine_decay_fn
4 | from tensorflow.contrib.opt.python.training.powersign import PowerSignOptimizer
5 |
6 | x = tf.Variable(initial_value=tf.random_uniform((1,)), dtype=tf.float32)
7 | #y = tf.nn.sigmoid(x) * (1 - tf.nn.sigmoid(x))
8 | #y = (1 - tf.nn.tanh(x) ** 2)
9 | #y = tf.tan(x) ** 2 - x - tf.log(tf.abs(x)) / (tf.log(10.) * (x ** 2 - 1))
10 | y = tf.nn.sigmoid(x) + tf.nn.tanh(x)
11 |
12 | #opt = tf.train.GradientDescentOptimizer(learning_rate=1)
13 | #train_op = opt.minimize(y) # minimize
14 | #train_op = opt.minimize(-y) # maximize
15 |
16 | global_step = tf.Variable(0, trainable=False, name='global_step')
17 | decay_steps = 1000
18 | cosine_decay = get_cosine_decay_fn(decay_steps)
19 | opt = PowerSignOptimizer(learning_rate=1, sign_decay_fn=cosine_decay)
20 |
21 | train_op = opt.minimize(y, global_step) # minimize
22 | #train_op = opt.minimize(-y, global_step) # maximize
23 |
24 | sess = tf.Session()
25 | sess.run(tf.global_variables_initializer())
26 |
27 | for i in range(1000):
28 | for j in range(50):
29 | _, loss, val = sess.run([train_op, y, x])
30 |
31 | if i % 50 == 0:
32 | print(i, "Optimal Value : ", loss, "Val (X) : ", val)
--------------------------------------------------------------------------------
/Theano-learn/derivatives.py:
--------------------------------------------------------------------------------
1 | import theano.tensor as T
2 | from theano import *
3 | import numpy as np
4 |
5 | # Derivative of x ** 2
6 |
7 | x = T.dscalar('x')
8 | y = x ** 2
9 |
10 | # Derivative of x ** 2 = 2 * x
11 | gradY = T.grad(y, x)
12 | print("Symbolic derivative : ", pp(gradY))
13 |
14 | f = function([x], gradY)
15 | print("Derivative function : ", pp(f.maker.fgraph.outputs[0])) # Should return 2 * x
16 |
17 | print(f(4))
18 |
19 | # Second order derivatives
20 |
21 | d2y = T.grad(gradY, x)
22 | print("Symbolic derivative : ", pp(d2y))
23 |
24 | f = function([x], d2y)
25 | print("Derivative function : ", pp(f.maker.fgraph.outputs[0])) # Should return 2
26 |
27 | print(f(4)) # Should return 2
28 |
29 | # Derivative of logistic function
30 |
31 | x = T.dmatrix('x')
32 | s = T.sum(1 / (1 + T.exp(-x)))
33 |
34 | dS = T.grad(s, x)
35 |
36 | dlogistic = function([x], dS)
37 | mat1 = [[0, 1], [-1, -2]]
38 |
39 | print(dlogistic(mat1))
40 |
41 |
42 |
43 |
--------------------------------------------------------------------------------
/Theano-learn/examples.py:
--------------------------------------------------------------------------------
1 | from theano import *
2 | import theano.tensor as T
3 | import numpy as np
4 |
5 | # Logistic function
6 | x = T.matrix('x', 'float32')
7 | op = 1 / (1 + T.exp(-x))
8 |
9 | logistic = function([x], op)
10 |
11 | mat1 = [[0, 1], [-1, -2]]
12 | print(logistic(mat1))
13 |
14 | # Multiple outputs
15 | a, b = T.fmatrices('a', 'b')
16 | diff = a - b
17 | absDiff = abs(diff)
18 | sqrDiff = diff ** 2
19 |
20 | f = function([a, b], [diff, absDiff, sqrDiff])
21 |
22 | mat2 = [[10, 5], [5, 10]]
23 | mat3 = [[5, 10], [10, 5]]
24 |
25 | print(f(mat2, mat3))
26 |
27 | # Default values
28 |
29 | x, y = T.fscalars('x', 'y')
30 | z = x + y
31 |
32 | f = function([x, In(y, value=0)], z)
33 |
34 | print(f(20))
35 | print(f(20, 10))
36 |
37 | # Shared variables
38 |
39 | state = shared(0)
40 | inc = T.iscalar('inc')
41 |
42 | accumulator = function([inc], state, updates=[(state, state + inc)])
43 |
44 | print("state : ", state.get_value())
45 | accumulator(1)
46 | print("state : ", state.get_value())
47 | accumulator(300)
48 | print("state : ", state.get_value())
49 |
50 | print("resetting state")
51 | state.set_value(0)
52 | print("state : ", state.get_value())
53 |
54 | # Copying functions
55 |
56 | newState = shared(0)
57 | acc2 = accumulator.copy(swap={state:newState})
58 | acc2(1000)
59 |
60 | print('original state : ', state.get_value())
61 | print("new state : ", newState.get_value())
62 |
63 | # Using Random Numbers
64 |
65 | from theano.tensor.shared_randomstreams import RandomStreams
66 | srng = RandomStreams(seed=1)
67 |
68 | rv_u = srng.uniform((2, 2))
69 | rv_v = srng.normal((2, 2))
70 |
71 | f = function([], rv_u)
72 | g = function([], rv_v, no_default_updates=True)
73 | nearly_zeros = function([], rv_u + rv_u - 2 * rv_u)
74 |
75 | print(f(), ' ', f())
76 | print(g(), ' ', g())
77 | print(nearly_zeros(), ' ', nearly_zeros())
--------------------------------------------------------------------------------
/Theano-learn/initial.py:
--------------------------------------------------------------------------------
1 | from theano import *
2 | import theano.tensor as T
3 | import numpy as np
4 |
5 | x = T.dscalar('x')
6 | y = T.dscalar('y')
7 | z = x + y
8 |
9 | f = function([x, y], z)
10 |
11 | print(f(2, 3))
12 |
13 | x = T.dmatrix('x')
14 | y = T.dmatrix('y')
15 | z = x + y
16 |
17 | f = function([x, y], z)
18 |
19 | mat1 = [[10, 5], [5, 10]]
20 | mat2 = [[5, 10], [10, 5]]
21 |
22 | print(f(mat1, mat2))
23 |
24 | a = T.vector('a', 'float32')
25 | b = a + a ** 10
26 |
27 | f = function([a], b)
28 |
29 | print(f([0, 1, 2]))
--------------------------------------------------------------------------------
/Theano-learn/linear_regression.py:
--------------------------------------------------------------------------------
1 | import theano.tensor as T
2 | from theano import *
3 | import random
4 |
5 | x = T.vector('x')
6 | y = T.vector('y')
7 | y_pred = T.vector('y_pred')
8 |
9 | meanX = T.mean(x)
10 | meanY = T.mean(y)
11 |
12 | beta = T.sum((x - meanX) * (y - meanY)) / T.sum((x - meanX) ** 2)
13 | alpha = meanY - beta * meanX
14 | _predict = beta * x + alpha
15 |
16 | _mse = T.sum((y - y_pred) ** 2)
17 |
18 | compute_vals = function([x, y], [alpha, beta], allow_input_downcast=True)
19 | predict = function([x, alpha, beta], [_predict], allow_input_downcast=True)
20 | mse = function([y, y_pred], [_mse], allow_input_downcast=True)
21 |
22 | if __name__ == "__main__":
23 | import numpy as np
24 | random.seed(1)
25 |
26 | X = [i * 0.1 for i in range(1, 101)]
27 | y = [i + random.gauss(0, 0.33) for i in X]
28 |
29 | alpha, beta = compute_vals(X, y)
30 | print("lr : ", alpha, " - beta : ", beta)
31 |
32 | preds = predict(X, alpha, beta)[0]
33 | error = mse(y, preds)[0]
34 |
35 | print("Mean Squared Error : ", error)
36 |
37 | import seaborn as sns
38 | sns.set_style('white')
39 |
40 | sns.plt.scatter(X, y,)
41 | sns.plt.plot(X, preds, )
42 | sns.plt.show()
43 |
--------------------------------------------------------------------------------
/Theano-learn/logistic_regression.py:
--------------------------------------------------------------------------------
1 | import theano.tensor as T
2 | from theano import *
3 | import numpy as np
4 |
5 | rng = np.random
6 | import sklearn.metrics as metrics
7 |
8 | N = 400
9 | features = 784
10 |
11 | # generate a dataset: D = (input_values, target_class)
12 | D = (rng.randn(N, features), rng.randint(size=N, low=0, high=2))
13 | training_steps = 10000
14 |
15 | x = T.fmatrix('x')
16 | y = T.fvector('y')
17 |
18 | # initialize the weight vector w randomly
19 | #
20 | # this and the following bias variable b
21 | # are shared so they keep their values
22 | # between training iterations (updates)
23 | w = shared(rng.random(features) - 0.5, name="w")
24 |
25 | # initialize the bias term
26 | b = shared(0., name="b")
27 |
28 | print("Initial model:")
29 | print(w.get_value())
30 | print(b.get_value())
31 |
32 | p_1 = 1 / (1 + T.exp(-T.dot(x, w) - b)) # Probability that target = 1
33 | prediction = p_1 > 0.5 # The prediction thresholded
34 | xent = -y * T.log(p_1) - (1-y) * T.log(1-p_1) # Cross-entropy loss function
35 | cost = xent.mean() + 0.01 * (w ** 2).sum()# The loss to minimize
36 | gw, gb = T.grad(cost, [w, b]) # Compute the gradient of the loss
37 | # w.r.t weight vector w and
38 | # bias term b
39 | # (we shall return to this in a
40 | # following section of this tutorial)
41 |
42 | # Compile
43 | train = function(
44 | inputs=[x,y],
45 | outputs=[prediction, xent],
46 | updates=((w, w - 0.1 * gw), (b, b - 0.1 * gb)), allow_input_downcast=True)
47 | predict = function(inputs=[x], outputs=prediction, allow_input_downcast=True)
48 |
49 | # Train
50 | for i in range(training_steps):
51 | pred, err = train(D[0], D[1])
52 | if i % 100 == 0: print((i / 100.), '% done.')
53 |
54 | print("Final model:")
55 | print(w.get_value())
56 | print(b.get_value())
57 | print("target values for D:")
58 | print(D[1])
59 | print("prediction on D:")
60 | preds = predict(D[0])
61 | print(preds)
62 |
63 | print("Accuracy :", metrics.accuracy_score(D[1], preds))
--------------------------------------------------------------------------------
/convert_flac_to_mp3/convert.py:
--------------------------------------------------------------------------------
1 | import os
2 | import glob
3 | from subprocess import call
4 |
5 | flac_files = glob.glob("*.flac")
6 |
7 | for fp in flac_files:
8 | new_name = os.path.splitext(fp)[0] + ".mp3"
9 |
10 | call(["ffmpeg", "-y", "-i", fp, "-q:a", "0",new_name])
11 | print("Converted file : ", new_name)
12 |
13 | print("Finished converting all files !")
--------------------------------------------------------------------------------
/convert_wav_to_mp3/convert_to_mp3.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 | from pathlib import Path
3 |
4 | wav_dir = Path('wav/')
5 | wav_files = wav_dir.glob('*.wav')
6 |
7 | mp3_dir = Path('mp3/')
8 |
9 | if not mp3_dir.exists():
10 | mp3_dir.mkdir(parents=True, exist_ok=True)
11 |
12 | for wf in wav_files:
13 | name = wf.stem + '.mp3'
14 |
15 | mf = mp3_dir / name
16 |
17 | subprocess.run(['ffmpeg', '-i', str(wf), '-b:a', '320k', str(mf)])
--------------------------------------------------------------------------------
/graph/cell.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from copy import deepcopy
3 | from collections import defaultdict
4 | from typing import List
5 | from uuid import uuid4
6 |
7 | from graph.core import Variable
8 |
9 |
10 | _cell_global_counter = 1
11 |
12 |
13 | class Cell(object):
14 |
15 | def __init__(self, name, inputs, ops, combine_op, id=None):
16 | global _cell_global_counter
17 |
18 | if name is None:
19 | name = '#C%d' % (_cell_global_counter)
20 | _cell_global_counter += 1
21 |
22 | self.name = name
23 | self.inputs = list(inputs) # type: List[Variable]
24 | self.ops = ops # type: List[str]
25 | self.combine_op = combine_op # type: str
26 | self.id = id if id is not None else uuid4()
27 |
28 | self.cell = self._build_cell()
29 |
30 | def function(self, activation):
31 | var_name = self._get_name(self)
32 |
33 | if hasattr(self, 'cell') and self.cell is not None:
34 | self.cell.name = '%s(%s)' % (activation, var_name)
35 | else:
36 | self.name = '%s(%s)' % (activation, var_name)
37 |
38 | return self
39 |
40 | def concat(self, other):
41 | # type: (Cell) -> Cell
42 | return Cell('([%s ; %s])' % (self.name, self._get_name(other)),
43 | inputs=[self, other],
44 | ops=None,
45 | combine_op='concat')
46 |
47 | def dot(self, other):
48 | # type: (Cell) -> Cell
49 | return Cell('(%s . %s)' % (self.name, self._get_name(other)),
50 | inputs=[self, other],
51 | ops=None,
52 | combine_op='dot')
53 |
54 | def __add__(self, other):
55 | # type: (Cell) -> Cell
56 | return Cell('(%s + %s)' % (self.name, self._get_name(other)),
57 | inputs=[self, other],
58 | ops=None,
59 | combine_op='add')
60 |
61 | def __sub__(self, other):
62 | # type: (Cell) -> Cell
63 | return Cell('(%s - %s)' % (self.name, self._get_name(other)),
64 | inputs=[self, other],
65 | ops=None,
66 | combine_op='sub')
67 |
68 | def __mul__(self, other):
69 | # type: (Cell) -> Cell
70 | return Cell('(%s * %s)' % (self.name, self._get_name(other)),
71 | inputs=[self, other],
72 | ops=None, # [self.ops, self._get_ops(other)]
73 | combine_op='mul')
74 |
75 | def _build_cell(self):
76 | if len(self.inputs) != 2:
77 | return None
78 |
79 | if self.ops is None:
80 | self.ops = ['noop', 'noop']
81 |
82 | elif len(self.ops) != 2:
83 | raise ValueError('2 Operations must be provided. '
84 | 'If no operation is required on a cell input, use `noop`.')
85 |
86 | x = self.inputs[0]
87 | y = self.inputs[1]
88 |
89 | # perform respective op on inputs
90 | o1 = Variable(self._check_special_ops(self.ops[0], x),
91 | data=0,
92 | parents=[x],
93 | op=self.ops[0])
94 |
95 | o2 = Variable(self._check_special_ops(self.ops[1], y),
96 | data=0,
97 | parents=[y],
98 | op=self.ops[1])
99 |
100 | if self.combine_op == 'concat':
101 | cell = self._var_concat(o1, o2)
102 | elif self.combine_op == 'dot' or self.combine_op == 'matmul':
103 | cell = self._var_dot(o1, o2)
104 | elif self.combine_op == 'add':
105 | cell = self._var_add(o1, o2)
106 | elif self.combine_op == 'mul':
107 | cell = self._var_mul(o1, o2)
108 | else:
109 | raise ValueError('Cell `combine_op` must be in [`add`, `concat]')
110 |
111 | return cell
112 |
113 | def _get_name(self, var):
114 | if hasattr(var, 'cell') and var.cell is not None: # is a Cell
115 | var_name = str(var)
116 | else:
117 | var_name = var.name # is a Variable
118 | return var_name
119 |
120 | def _get_ops(self, x):
121 | if hasattr(x, 'ops'):
122 | return x.ops
123 |
124 | return None
125 |
126 | def _check_special_ops(self, op, var):
127 | var_name = self._get_name(var)
128 |
129 | if op == 'noop' or op == 'no-op':
130 | return var_name
131 |
132 | return '%s(%s)' % (op, var_name)
133 |
134 | def _var_concat(self, x, y):
135 | return Cell('([%s ; %s])' % (x.name, y.name),
136 | inputs=[self],
137 | ops=self.ops,
138 | combine_op='concat')
139 |
140 | def _var_dot(self, x, y):
141 | return Cell('(%s . %s)' % (x.name, y.name),
142 | inputs=[self],
143 | ops=self.ops,
144 | combine_op='dot')
145 |
146 | def _var_add(self, x, y):
147 | return Cell('(%s + %s)' % (x.name, y.name),
148 | inputs=[self],
149 | ops=self.ops,
150 | combine_op='add')
151 |
152 | def _var_mul(self, x, y):
153 | return Cell('(%s * %s)' % (x.name, y.name),
154 | inputs=[self],
155 | ops=self.ops,
156 | combine_op='mul')
157 |
158 | def __repr__(self):
159 | if self.cell is not None:
160 | return '{%s}' % (str(self.cell.name))
161 | else:
162 | return self.name
--------------------------------------------------------------------------------
/graph/core.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from collections import defaultdict
3 | from uuid import uuid4
4 |
5 |
6 | _var_global_counter = 1
7 |
8 |
9 | class Variable(object):
10 |
11 | def __init__(self, name, data, parents=None, op=None, id=None):
12 | global _var_global_counter
13 |
14 | if name is None:
15 | name = '#%d' % (_var_global_counter)
16 | _var_global_counter += 1
17 |
18 | self.name = name
19 | self.data = data
20 | self.parents = parents
21 | self.op_name = op
22 | self.id = id if id is not None else uuid4()
23 |
24 | def resolve_expression(self, topdown=True):
25 | if self.parents is not None:
26 | if topdown:
27 | print("Operation : ", self)
28 |
29 | for i, parent in enumerate(self.parents):
30 | if parent is not None:
31 | parent.resolve_expression(topdown)
32 |
33 | if not topdown:
34 | print(parent)
35 |
36 | if not topdown:
37 | print("Operation : ", self)
38 | print('-' * 10)
39 | print()
40 | else:
41 | if topdown:
42 | print(self)
43 |
44 | def assign(self, other, name=None):
45 | if name is None:
46 | name = '%s\'' % (self.name)
47 |
48 | v = Variable(name,
49 | data=other.data,
50 | parents=[other],
51 | op='assign')
52 |
53 | v._assignment = '(%s = %s)' % (self.name, other.name)
54 |
55 | return v
56 |
57 | def __add__(self, other):
58 | return Variable('(%s + %s)' % (self.name, other.name),
59 | data=self.data + other.data,
60 | parents=[self, other],
61 | op='_var_add')
62 |
63 | def __sub__(self, other):
64 | return Variable('(%s - %s)' % (self.name, other.name),
65 | data=self.data - other.data,
66 | parents=[self, other],
67 | op='sub')
68 |
69 | def __mul__(self, other):
70 | return Variable('(%s * %s)' % (self.name, other.name),
71 | data=self.data * other.data,
72 | parents=[self, other],
73 | op='mul')
74 |
75 | def __truediv__(self, other):
76 | return Variable('(%s / %s)' % (self.name, other.name),
77 | data=self.data / other.data,
78 | parents=[self, other],
79 | op='div')
80 |
81 | def __floordiv__(self, other):
82 | return Variable('(%s // %s)' % (self.name, other.name),
83 | data=self.data // other.data,
84 | parents=[self, other],
85 | op='floor_div')
86 |
87 | def __neg__(self):
88 | return Variable('(-%s)' % (self.name),
89 | data=-self.data,
90 | parents=[self],
91 | op='neg')
92 |
93 | def __pow__(self, power, modulo=None):
94 | return Variable('(%s ** %s)' % (self.name, power.name),
95 | data=self.data ** power.data,
96 | parents=[self, power],
97 | op='pow')
98 |
99 | def __repr__(self):
100 | if hasattr(self, '_assignment'):
101 | return '%s [%s] [value = %s]' % (self.name, self._assignment, str(self.data))
102 |
103 | return '%s [value = %s]' % (self.name, str(self.data))
--------------------------------------------------------------------------------
/graph/run_cell.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from copy import deepcopy
3 | from graph.core import Variable
4 | from graph.cell import Cell
5 |
6 |
7 | """ Small Cell """
8 | # x = Variable('x', 0)
9 | # y = Variable('y', 0)
10 | # z = Variable('z', 0)
11 | #
12 | # c1 = Cell('C1', inputs=[x, y], ops=['sigmoid', 'tanh'], combine_op='add')
13 | # c2 = Cell('C2', inputs=[z, c1], ops=['relu', 'no-op'], combine_op='concat')
14 | # c3 = Cell('C3', inputs=[c1, c2], ops=['relu', 'tanh'], combine_op='add')
15 | #
16 | # print(c3)
17 |
18 |
19 | """ LSTM Gates """
20 |
21 | # Inputs
22 | x = Variable('x', 0)
23 | ht_1 = Variable('ht-1', 0)
24 | ct_1 = Variable('ct-1', 0)
25 |
26 | # Weights
27 | W = Variable('w . x', 0)
28 | U = Variable('u . ht-1', 0)
29 |
30 | # Gates
31 | i = Cell('i', inputs=[W, U], ops=None, combine_op='add').function('sigmoid')
32 | f = Cell('f', inputs=[W, U], ops=None, combine_op='add').function('sigmoid')
33 | c = Cell('c', inputs=[W, U], ops=None, combine_op='add').function('sigmoid')
34 | o = Cell('o', inputs=[W, U], ops=None, combine_op='add').function('sigmoid')
35 |
36 | c1 = f * ct_1
37 | c2 = i * c
38 | c = Cell('c', inputs=[c1, c2], ops=None, combine_op='add').function('tanh')
39 | h = Cell('h', inputs=[o, c], ops=None, combine_op='mul')
40 |
41 | print(h)
42 |
43 |
44 |
45 |
--------------------------------------------------------------------------------
/graph/run_core.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import matplotlib.pyplot as plt
3 | from copy import deepcopy
4 |
5 | from graph.core import Variable
6 |
7 | def factorial(n):
8 | if n <= 1:
9 | return Variable(None, n)
10 | else:
11 | return factorial(n - 1) * Variable(None, n)
12 |
13 | # n = 7
14 | # f = factorial(n)
15 | #
16 | # print(f)
17 | # print()
18 | #
19 | # f.resolve_expression(topdown=True)
20 |
21 | """ Fibonacci """
22 |
23 | def fibonacci (n):
24 | if n <= 1:
25 | return Variable(None, n)
26 |
27 | return fibonacci(n - 1) + fibonacci(n - 2)
28 |
29 | # n = 5
30 | # f = fibonacci(n)
31 | #
32 | # print(f)
33 | # print()
34 | #
35 | # f.resolve_expression(topdown=True)
36 |
37 | def fibonacci_gen(n):
38 | a = Variable('a', 0)
39 | b = Variable('b', 1)
40 |
41 | for _ in range(n):
42 | yield a
43 | a, b = a.assign(b, name='a'), b.assign(a + b, name='b')
44 |
45 | # n = 5
46 | # fs = list(fibonacci_gen(n))
47 | #
48 | # print(fs[-1])
49 | # print()
50 |
51 | """ PI Finding """
52 |
53 | """ Track 1 """
54 |
55 | one = Variable('1', 1.)
56 | two = Variable('2', 2.)
57 |
58 | # def pi(x, r):
59 | # square = x ** two
60 | # return (r - square) ** (one / two)
61 | #
62 | # n = 20000
63 | #
64 | # cx = Variable('cx', 2.0 / n)
65 | # area = Variable('area', 0.0)
66 | #
67 | # for i in range(n):
68 | # i_node = Variable(None, i)
69 | #
70 | # x = (-one) + cx * i_node
71 | # area += pi(x, one) * cx
72 | #
73 | # pi_calc = two * area
74 | #
75 | # print(pi_calc)
76 | # print()
77 | #
78 | # # pi_calc.resolve_expression()
79 |
80 | """ Golden Ratio Phi """
81 |
82 | def phi(n):
83 | if n <= 1:
84 | return Variable(None, 3)
85 |
86 | return one + one / phi(n - 1)
87 |
88 | # n = 5
89 | # p = phi(n)
90 | #
91 | # print(p)
92 | # print()
93 | #
94 | # p.resolve_expression(topdown=True)
95 |
96 |
97 | """ Assignment """
98 |
99 | # x = Variable('x', 1)
100 | # y = Variable('y', 2)
101 | #
102 | # z = x * y
103 | # z += x.assign(y)
104 | # z = y.assign(z, name='z')
105 | #
106 | # print(x)
107 | # print(y)
108 | # print(z)
109 | # print()
110 | #
111 | # z.resolve_expression(topdown=True)
112 |
113 |
114 | """ Running Average """
115 | # np.random.seed(0)
116 | #
117 | # def input_attenuation(n):
118 | # beta = Variable('beta', 0.9)
119 | # beta_inv = Variable('(1 - beta)', 1. - beta.data)
120 | #
121 | # w = Variable('w', 0.)
122 | # x = Variable('x', 1.)
123 | #
124 | # X = []
125 | # W = []
126 | #
127 | # for i in range(n):
128 | # v = beta * w + beta_inv * x
129 | # x = x.assign(x - v, name='x')
130 | # w = w.assign(v, name='w')
131 | #
132 | # print(i + 1, x)
133 | # print(i + 1, w)
134 | # print()
135 | #
136 | # X.append(x.data)
137 | # W.append(w.data)
138 | #
139 | # return X, W
140 | #
141 | # n = 100
142 | # X, W = input_attenuation(n)
143 | #
144 | # plt.plot(X, label='X')
145 | # plt.plot(W, label='Moving average W')
146 | # plt.legend()
147 | # plt.show()
148 |
149 |
150 | # w.resolve_expression(topdown=True)
151 |
--------------------------------------------------------------------------------
/medicine/medicine.py:
--------------------------------------------------------------------------------
1 | import datetime
2 |
3 | begin_date = datetime.date(2024, 6, 1)
4 | end_date = datetime.date(2025, 6, 1)
5 | delta = end_date - begin_date
6 | day_count = delta.days
7 |
8 | print('Begin date : ', begin_date)
9 | print('End Date : ', end_date)
10 | print("Number of days : ", delta.days)
11 | print()
12 |
13 |
14 | def count_med_strips(name, per_day, per_strip, verbose=True):
15 | total_count = day_count * per_day
16 | number_of_strips = total_count // per_strip + 1
17 |
18 | if verbose:
19 | print('Medicine {:15s}: Count = {:7.2f} ({:0.2f} per day) | Number of strips = {:3.0f}'.format(
20 | name, total_count, per_day, number_of_strips
21 | ))
22 |
23 | return total_count, number_of_strips
24 |
25 |
26 | def count_med_strips_with_available_count(name, per_day, per_strip, available_count, verbose=True):
27 | total_count, number_of_strips = count_med_strips(name, per_day, per_strip, verbose=False)
28 | number_of_strips = (total_count - available_count) // per_strip + 1
29 |
30 | currently_available_strips = available_count // per_strip
31 |
32 | if verbose:
33 | print('Medicine {:15s}: Count = {:7.2f} ({:0.2f} per day) | '
34 | 'Currently Available Strips = {:4.2f} (count={:4.2f}) | '
35 | 'Number of strips to buy = {:3.0f} (count~={:4.0f})'.format(
36 | name, total_count, per_day,
37 | currently_available_strips, available_count,
38 | number_of_strips, total_count - available_count
39 | ))
40 |
41 | return total_count, number_of_strips
42 |
43 |
44 | """ Count basic medicines """
45 |
46 | # count_med_strips(name='Levipil 500', per_day=2, per_strip=10)
47 | # count_med_strips(name='Cardiace 5', per_day=3, per_strip=10)
48 | # # count_med_strips(name='Azoran', per_day=3, per_strip=10)
49 | # count_med_strips(name='HCQS 200', per_day=2, per_strip=15)
50 | # count_med_strips(name='Omnacortil 2.5', per_day=3, per_strip=10)
51 | # count_med_strips(name='Ecosporin 75', per_day=1, per_strip=10)
52 | # count_med_strips(name='Shellcal HD', per_day=1, per_strip=15)
53 | # count_med_strips(name='Osteofos 70', per_day=1. / 7, per_strip=4)
54 |
55 | """ Count mediciation with available accounted for """
56 | # count_med_strips_with_available_count(name='Levipil 500', per_day=2, per_strip=10, available_count=0)
57 | # count_med_strips_with_available_count(name='Cellcept', per_day=3, per_strip=10, available_count=2 * 428)
58 | count_med_strips_with_available_count(name='Ramipril/Cardace', per_day=3, per_strip=15, available_count=65 * 15)
59 | # count_med_strips_with_available_count(name='Azoran', per_day=3, per_strip=10, available_count=0)
60 | count_med_strips_with_available_count(name='HCQS 200', per_day=2, per_strip=15, available_count=600)
61 | count_med_strips_with_available_count(name='Omnacortil 2.5', per_day=0.5, per_strip=10, available_count=130)
62 | count_med_strips_with_available_count(name='Ecosporin 75', per_day=1, per_strip=14, available_count=10 * 14)
63 | # count_med_strips_with_available_count(name='Shellcal HD', per_day=1, per_strip=15, available_count=390)
64 | count_med_strips_with_available_count(name='Osteofos 70', per_day=1. / 7, per_strip=4, available_count=18 * 4)
65 | count_med_strips_with_available_count(name='Tayo 60K', per_day=1. / 30., per_strip=7, available_count=12)
66 |
67 |
--------------------------------------------------------------------------------
/medicine/merge_pdfs.py:
--------------------------------------------------------------------------------
1 | import os
2 | import glob
3 | import argparse
4 |
5 | from PyPDF2 import PdfMerger
6 |
7 |
8 | parser = argparse.ArgumentParser('Python pdf file merger')
9 |
10 | parser.add_argument('-f', dest='filepaths', type=str, nargs='+', default=None, required=False, help='List of file paths')
11 | parser.add_argument('-d', dest='directory', type=str, default=None, required=False, help='Directory of file paths')
12 | parser.add_argument('-o', dest='output', type=str, default=None, required=False, help='Output file path')
13 |
14 | args = parser.parse_args()
15 |
16 | filepaths = args.filepaths
17 | directory = args.directory
18 | output_dir = args.output
19 |
20 | if (filepaths is None or len(filepaths) == 0) and (directory is None):
21 | raise FileNotFoundError("No file provided!")
22 |
23 | if filepaths is None:
24 | if directory[-1] in ("'", '"'):
25 | directory = directory[:-1]
26 | filepaths = sorted(list(glob.glob(os.path.join(directory, "*.pdf"))))
27 |
28 | if output_dir is None:
29 | output_dir = os.getcwd()
30 |
31 | if not os.path.exists(output_dir):
32 | os.makedirs(output_dir, exist_ok=True)
33 |
34 | merger = PdfMerger()
35 |
36 | for pdf in filepaths:
37 | merger.append(pdf)
38 |
39 |
40 | output_dir = os.path.abspath(output_dir)
41 | output_filepath = os.path.join(output_dir, 'Som - Feb 2023 - Reports.pdf')
42 | merger.write(output_filepath)
43 | merger.close()
44 |
45 | print(f"Results written to path : {output_filepath}")
46 |
--------------------------------------------------------------------------------
/metaprog/composition.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | def custom_dir(c, add):
4 | return dir(type(c)) + list(c.__dict__.keys()) + add
5 |
6 |
7 | class BaseComposite:
8 | "Base class for attr accesses in `self._extra_params` passed down to `self.components`"
9 |
10 | @property
11 | def _extra_params(self):
12 | if not hasattr(self, 'components'):
13 | self.components = []
14 |
15 | if type(self.components) not in {list, tuple}:
16 | self.components = [self.components]
17 |
18 | elif type(self.components) == tuple:
19 | self.components = list(self.components)
20 |
21 | args = []
22 | for component in self.components:
23 | args.extend([o for o in dir(component)
24 | if not o.startswith('_')])
25 |
26 | return args
27 |
28 | def __getattr__(self, k):
29 | if k in self._extra_params:
30 | for component in self.components:
31 | if hasattr(component, k):
32 | return getattr(self.components, k)
33 |
34 | raise AttributeError(k)
35 |
36 | def __dir__(self):
37 | return custom_dir(self, self._extra_params)
38 |
--------------------------------------------------------------------------------
/metaprog/delegates.py:
--------------------------------------------------------------------------------
1 | import inspect
2 |
3 |
4 | def delegates(to=None, keep=False):
5 | "Decorator: replace `**kwargs` in signature with params from `to`"
6 |
7 | def _f(f):
8 | if to is None:
9 | to_f, from_f = f.__base__.__init__, f.__init__
10 | else:
11 | to_f, from_f = to, f
12 |
13 | sig = inspect.signature(from_f)
14 | sigd = dict(sig.parameters)
15 | k = sigd.pop('kwargs')
16 |
17 | s2 = {k: v for k, v in inspect.signature(to_f).parameters.items()
18 | if v.default != inspect.Parameter.empty and k not in sigd}
19 | sigd.update(s2)
20 |
21 | if keep:
22 | sigd['kwargs'] = k
23 |
24 | from_f.__signature__ = sig.replace(parameters=sigd.values())
25 | return f
26 |
27 | return _f
28 |
--------------------------------------------------------------------------------
/metaprog/registration.py:
--------------------------------------------------------------------------------
1 | import abc
2 | import six
3 | from collections import OrderedDict
4 |
5 | REGISTERED_STRATEGIES = OrderedDict()
6 |
7 |
8 | class RegistrationError(ValueError):
9 |
10 | def __init__(self, cls_name, subclass_type):
11 | msg = "Class %s is not an abstract class, yet it's subclass type " \
12 | "was specified as %s. All registered subclasses must set the " \
13 | "class parameter `subclass_type` to be properly registered !" % (
14 | cls_name, subclass_type)
15 | ValueError.__init__(self, msg)
16 |
17 |
18 | class RegisteredStrategy(abc.ABCMeta):
19 |
20 | def __new__(mcls, class_name, bases, class_dict):
21 | name = class_name
22 | class_dict['name'] = name
23 |
24 | cls = super(RegisteredStrategy, mcls).__new__(mcls, class_name, bases, class_dict)
25 | subclass_type = cls.subclass_type
26 |
27 | if 'abstract' not in name.lower() and subclass_type == 'abstract':
28 | raise RegistrationError(name, subclass_type)
29 |
30 | # override the subclass type if it is an Abstract class.
31 | if 'abstract' in name.lower():
32 | subclass_type = 'abstract'
33 |
34 | # register the class object
35 | if subclass_type in REGISTERED_STRATEGIES:
36 | if name in REGISTERED_STRATEGIES[subclass_type]:
37 | raise RuntimeError("A Strategy has already been registered with the given "
38 | "class name in this subclass type. "
39 | "Set a different name for this class.")
40 |
41 | else:
42 | REGISTERED_STRATEGIES[subclass_type][name] = cls
43 |
44 | else:
45 | # create a new subclass type in the registry, and add the registered subclass itself
46 | REGISTERED_STRATEGIES[subclass_type] = OrderedDict()
47 | REGISTERED_STRATEGIES[subclass_type][name] = cls
48 |
49 | return cls
50 |
51 |
52 | @six.add_metaclass(RegisteredStrategy)
53 | class AbstractStrategy(object):
54 | # Subclass type designates which registry the class will belong to.
55 | subclass_type = 'abstract'
56 |
57 | def __init__(self,):
58 | super(AbstractStrategy, self).__init__()
59 |
60 |
61 | def get(strategy_name) -> AbstractStrategy:
62 | strategy_list = []
63 | for subclass_type in REGISTERED_STRATEGIES.keys():
64 |
65 | if strategy_name in REGISTERED_STRATEGIES[subclass_type]:
66 | return REGISTERED_STRATEGIES[subclass_type][strategy_name]
67 |
68 | # strategy not found, add the currently registered registry
69 | strategy_list.extend(list(REGISTERED_STRATEGIES[subclass_type].keys()))
70 |
71 | raise RuntimeError("Strategy %s not found in registered registry ! "
72 | "Found registry = %s" % (strategy_name,
73 | str(strategy_list)))
74 |
--------------------------------------------------------------------------------
/numpygrad/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/titu1994/Python-Work/621d1476d40bc935f28877b5f170e21dcd8da371/numpygrad/__init__.py
--------------------------------------------------------------------------------
/numpygrad/activations.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from numpygrad.layers import Layer
3 |
4 |
5 | class _Activation(Layer):
6 |
7 | def __init__(self):
8 | super(_Activation, self).__init__()
9 |
10 | def forward(self, input, **kwargs):
11 | raise NotImplementedError()
12 |
13 |
14 | class Sigmoid(_Activation):
15 |
16 | def forward(self, input, **kwargs):
17 | return input.sigmoid()
18 |
19 |
20 | class Tanh(_Activation):
21 |
22 | def forward(self, input, **kwargs):
23 | return input.tanh()
24 |
25 |
26 | class ReLU(_Activation):
27 |
28 | def forward(self, input, **kwargs):
29 | return input.relu()
30 |
--------------------------------------------------------------------------------
/numpygrad/layers.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from numpygrad.tensor import Tensor
3 |
4 | class Layer(object):
5 |
6 | def __init__(self):
7 | self._parameters = []
8 |
9 | def forward(self, input, *args, **kwargs):
10 | raise NotImplementedError()
11 |
12 | def __call__(self, input, *args, **kwargs):
13 | if not isinstance(input, Tensor):
14 | input = Tensor(input, autograd=True)
15 |
16 | return self.forward(input, *args, **kwargs)
17 |
18 | @property
19 | def parameters(self):
20 | return self._parameters
21 |
22 | def __setattr__(self, key, value):
23 | super(Layer, self).__setattr__(key, value)
24 |
25 | if isinstance(value, Tensor) and value.autograd:
26 | self._parameters.append(value)
27 |
28 | elif isinstance(value, Layer):
29 | self._parameters.extend(value.parameters)
30 |
31 |
32 | class Sequential(Layer):
33 |
34 | def __init__(self, layers=None):
35 | super(Sequential, self).__init__()
36 |
37 | if layers is None:
38 | layers = []
39 |
40 | self.layers = layers
41 |
42 | for l in layers:
43 | self._parameters.extend(l.parameters)
44 |
45 | def add(self, layer):
46 | self.layers.append(layer)
47 | self._parameters.extend(layer.parameters)
48 |
49 | def forward(self, input, **kwargs):
50 | x = input
51 |
52 | for layer in self.layers:
53 | x = layer(x)
54 |
55 | return x
56 |
57 |
58 | class Dense(Layer):
59 |
60 | def __init__(self, n_in, n_out):
61 | super(Dense, self).__init__()
62 |
63 | W = np.random.uniform(size=(n_in, n_out)) * np.sqrt(2. / n_in)
64 | b = np.zeros(n_out)
65 |
66 | self.w = Tensor(W, autograd=True)
67 | self.b = Tensor(b, autograd=True)
68 |
69 | def forward(self, input, **kwargs):
70 | out = input.dot(self.w) + self.b.expand(axis=0, repeats=len(input.data))
71 | return out
72 |
73 |
74 | class Embedding(Layer):
75 |
76 | def __init__(self, vocab_size, dim):
77 | super(Embedding, self).__init__()
78 |
79 | self.vocab_size = vocab_size
80 | self.dim = dim
81 |
82 | # this random initialiation style is just a convention from word2vec
83 | w = np.random.rand(vocab_size, dim) - 0.5 / dim
84 | self.weight = Tensor(w, autograd=True)
85 |
86 | def forward(self, input, *args, **kwargs):
87 | return self.weight.index_select(input)
88 |
--------------------------------------------------------------------------------
/numpygrad/losses.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from numpygrad.layers import Layer
3 |
4 |
5 | class _Loss(Layer):
6 |
7 | def __init__(self):
8 | super(_Loss, self).__init__()
9 |
10 | def __call__(self, target, prediction, **kwargs):
11 | return super(_Loss, self).__call__(target, prediction, **kwargs)
12 |
13 | def forward(self, target, predicted, **kwargs):
14 | raise NotImplementedError()
15 |
16 |
17 | class MSELoss(_Loss):
18 |
19 | def __init__(self):
20 | super(MSELoss, self).__init__()
21 |
22 | def forward(self, target, predicted, **kwargs):
23 | return ((target - predicted) * (target - predicted)).sum(0)
24 |
25 |
26 | class CrossEntropyLoss(_Loss):
27 |
28 | def __init__(self):
29 | super(CrossEntropyLoss, self).__init__()
30 |
31 | def forward(self, input, target, **kwargs):
32 | return input.cross_entropy(target)
33 |
--------------------------------------------------------------------------------
/numpygrad/optim.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | class _Optimizer(object):
4 |
5 | def __init__(self, parameters):
6 | self.parameters = parameters
7 |
8 | def reset(self):
9 | for p in self.parameters:
10 | p.grad.data *= 0
11 |
12 | def step(self, zero_grad=True):
13 | raise NotImplementedError()
14 |
15 |
16 | class SGD(_Optimizer):
17 |
18 | def __init__(self, parameters, lr=0.1):
19 | super(SGD, self).__init__(parameters)
20 | self.lr = lr
21 |
22 | def step(self, zero_grad=True):
23 | for p in self.parameters:
24 | p.data -= p.grad.data * self.lr
25 |
26 | if zero_grad:
27 | p.grad.data *= 0.
28 |
29 |
30 | class Adam(_Optimizer):
31 |
32 | def __init__(self, parameters, lr=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8):
33 | super(Adam, self).__init__(parameters)
34 | self.lr = lr
35 | self.beta1 = beta1
36 | self.beta2 = beta2
37 | self.epsilon = epsilon
38 |
39 | self._prepare_weights()
40 |
41 | def step(self, zero_grad=True):
42 | self.t += 1
43 |
44 | for p, m, v in zip(self.parameters, self.M, self.V):
45 | grad = p.grad.data
46 | m = self.beta1 * m + (1. - self.beta1) * grad
47 | v = self.beta2 * v + (1. - self.beta2) * (grad * grad)
48 | m_hat = m / (1. - (self.beta1 ** self.t))
49 | v_hat = v / (1. - (self.beta2 ** self.t))
50 |
51 | p.data -= self.lr * (m_hat / (np.sqrt(v_hat) + self.epsilon))
52 |
53 | if zero_grad:
54 | p.grad.data *= 0.
55 |
56 |
57 | def _prepare_weights(self):
58 | self.M = []
59 | self.V = []
60 | self.t = 0
61 |
62 | for p in self.parameters:
63 | m = np.zeros_like(p)
64 | v = np.zeros_like(p)
65 |
66 | self.M.append(m)
67 | self.V.append(v)
68 |
--------------------------------------------------------------------------------
/numpygrad/rnn.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from numpygrad.tensor import Tensor
3 | from numpygrad.layers import Layer, Dense
4 | from numpygrad.activations import Sigmoid, Tanh, ReLU
5 |
6 |
7 | class _RNN(Layer):
8 |
9 | def forward(self, input, states, **kwargs):
10 | return super(_RNN, self).forward(input, states, **kwargs)
11 |
12 |
13 | class RNN(_RNN):
14 |
15 | def __init__(self, n_in, n_hidden, n_out, activation='sigmoid'):
16 | super(RNN, self).__init__()
17 |
18 | self.n_in = n_in
19 | self.n_out = n_out
20 | self.n_hidden = n_hidden
21 |
22 | if activation == 'sigmoid':
23 | self.activation = Sigmoid()
24 | elif activation == 'tanh':
25 | self.activation = Tanh()
26 | else:
27 | self.activation = ReLU()
28 |
29 | self.w_ih = Dense(n_in, n_hidden)
30 | self.w_hh = Dense(n_hidden, n_hidden)
31 | self.w_ho = Dense(n_hidden, n_out)
32 |
33 | def forward(self, input, states, **kwargs):
34 | from_prev_state = self.w_hh(states)
35 | combined = self.w_ih(input) + from_prev_state
36 | new_hidden = self.activation(combined)
37 | output = self.w_ho(new_hidden)
38 |
39 | return output, new_hidden
40 |
41 | def init_state(self, batch_size=1):
42 | return Tensor(np.zeros((batch_size, self.n_hidden)), autograd=True)
43 |
44 |
45 |
--------------------------------------------------------------------------------
/temp.py:
--------------------------------------------------------------------------------
1 | from tensorflow.python.keras import layers, models
2 |
3 |
4 | class IncepLayer(layers.Layer):
5 | def __init__(self,filters=32):
6 | super(IncepLayer, self).__init__()
7 | self.filters = filters
8 | self.c1 = layers.Conv2D(filters=self.filters,kernel_size=1,padding='same')
9 | self.c2 = layers.Conv2D(filters=self.filters,kernel_size=1,padding='same')
10 | self.c22 = layers.Conv2D(filters=self.filters,kernel_size=5,padding='same')
11 | self.c3 = layers.Conv2D(filters=self.filters,kernel_size=1,padding='same')
12 | self.c33 = layers.Conv2D(filters=self.filters,kernel_size=7,padding='same')
13 |
14 | def build(self,input_shape):
15 | super(IncepLayer,self).build(input_shape)
16 |
17 | def call(self, inputs):
18 | t1 = self.c1(inputs)
19 | t2 = self.c2(inputs)
20 | t2 = self.c22(t2)
21 | t3 = self.c3(inputs)
22 | t3 = self.c33(t3)
23 | inp_kernels = inputs.shape[-1].value
24 | concat = layers.concatenate([t1,t2,t3])
25 | print(concat.shape)
26 | return concat
27 |
28 | inp = layers.Input((28,28,3))
29 | x = IncepLayer()(inp)
30 |
31 | m = models.Model(inp,x)
32 | m.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
33 | print(m.summary())
--------------------------------------------------------------------------------
/tfdiffeq_examples/data/Query.txt:
--------------------------------------------------------------------------------
1 | 0.0000000e+000 1.1607272e+000 6.9771532e-001 8.3555610e-001 2.1199925e+000 5.0304004e+000 4.1208873e+000 2.6446407e+000 2.8049135e+000 4.0172945e+000 5.2017709e+000 5.2985477e+000 5.1660207e+000 4.4315405e+000 4.0937909e+000 3.7205858e+000 5.4116803e+000 5.3829433e+000 5.3547841e+000 4.6764730e+000 5.9206228e+000 4.1818010e+000 3.8937135e+000 4.5352795e+000 5.5153316e+000 6.1771786e+000 8.0266148e+000 8.6453609e+000 7.2796514e+000 8.6508017e+000 8.2257151e+000 9.0186280e+000 7.9110478e+000 7.3318726e+000 4.3070539e+000 4.8636549e+000 4.4372744e+000 3.2257561e+000 1.3568871e+000 1.4767531e+000 1.5294296e+000 1.3778415e+000 1.2159486e+000 9.8296293e-001 1.9322891e+000 3.7624076e+000 3.4078905e+000 2.5265238e+000 3.3795701e+000 4.1407301e+000 3.6701051e+000 2.7244843e+000 4.0086637e+000 4.2322559e+000 4.3669175e+000 4.6347789e+000 3.7969029e+000 5.2019678e+000 4.4625902e+000 3.1524367e+000 2.8963944e+000 1.4337066e+000 5.3245369e-001 2.3199520e+000 3.0928431e+000 3.3242214e+000 3.4438993e+000 2.6928269e+000 3.0880007e+000 2.1462209e+000 1.9939872e+000 2.3894990e+000 1.3350025e+000 1.7063209e+000 1.6870834e+000 2.5273170e+000 3.5672657e+000 4.7372982e+000 5.6713913e+000 4.7609036e+000 5.0688520e+000 5.5256401e+000 5.4173232e+000 5.1245184e+000 4.8762680e+000 4.9523057e+000 4.6679371e+000 4.4242433e+000 5.4732792e+000 7.1214257e+000 6.1828741e+000 5.5284021e+000 5.6836399e+000 5.1284313e+000 4.5864579e+000 5.2905810e+000 4.9792833e+000 5.8320053e+000 5.2457346e+000 5.3751458e+000 5.9151743e+000 5.7831998e+000 5.3904723e+000 4.7916757e+000 4.7282919e+000 4.3119695e+000 5.0939671e+000 4.5073311e+000 6.1770480e+000 4.8770526e+000 6.1181828e+000 6.1325291e+000 6.7434167e+000 6.1307852e+000 5.2699132e+000 5.0656451e+000 5.4072741e+000 6.4834902e+000 6.2307137e+000 5.9636110e+000 5.9715284e+000 6.8216158e+000 7.8845506e+000 9.4805290e+000 1.0023738e+001 1.0222443e+001 1.0780917e+001 1.0082370e+001
2 |
--------------------------------------------------------------------------------
/tfdiffeq_examples/jump_reduce_tf.py:
--------------------------------------------------------------------------------
1 | import time
2 | import argparse
3 | import os
4 |
5 | import numpy as np
6 | import matplotlib.pyplot as plt
7 |
8 | import tensorflow as tf
9 | from tfdiffeq import odeint
10 | tf.enable_eager_execution()
11 |
12 | parser = argparse.ArgumentParser('ODE demo')
13 | parser.add_argument('--method', type=str, choices=['dopri5', 'adams'], default='dopri5')
14 | parser.add_argument('--data_size', type=int, default=2000)
15 | parser.add_argument('--rtol', type=float, default=1e-7)
16 | parser.add_argument('--atol', type=float, default=1e-9)
17 | parser.add_argument('--viz', action='store_true')
18 | parser.add_argument('--gpu', type=int, default=0)
19 | # parser.add_argument('--adjoint', type=eval, default=False)
20 | parser.set_defaults(viz=True)
21 | args = parser.parse_args()
22 |
23 | device = 'gpu:' + str(args.gpu) if tf.test.is_gpu_available() else 'cpu:0'
24 |
25 | true_y0 = tf.convert_to_tensor(1, dtype=tf.float64)
26 | t_n = np.linspace(0, 100., num=args.data_size)
27 | t = tf.convert_to_tensor(t_n, dtype=tf.float32)
28 |
29 |
30 | class Lambda(tf.keras.Model):
31 |
32 | def call(self, t, y):
33 | dydt = tf.exp(-t * y) * 1 / y
34 | return dydt
35 |
36 | with tf.device(device):
37 | t1 = time.time()
38 | pred_y = odeint(Lambda(), true_y0, t, rtol=args.rtol, atol=args.atol, method=args.method)
39 | t2 = time.time()
40 |
41 | print("Number of solutions : ", pred_y.shape)
42 | print("Time taken : ", t2 - t1)
43 |
44 | plt.plot(t_n, pred_y.numpy(), 'r-', label='x')
45 | # plt.plot(time, pred_y.numpy(), 'b--', label='y')
46 | plt.legend()
47 | plt.show()
48 |
49 |
--------------------------------------------------------------------------------
/tfdiffeq_examples/jump_reduce_torch.py:
--------------------------------------------------------------------------------
1 | import time
2 | import argparse
3 | import os
4 |
5 | import numpy as np
6 | import matplotlib.pyplot as plt
7 |
8 | import torch
9 | from torchdiffeq import odeint
10 |
11 | parser = argparse.ArgumentParser('ODE demo')
12 | parser.add_argument('--method', type=str, choices=['dopri5', 'adams'], default='dopri5')
13 | parser.add_argument('--data_size', type=int, default=2000)
14 | parser.add_argument('--rtol', type=float, default=1e-3)
15 | parser.add_argument('--atol', type=float, default=1e-3)
16 | parser.add_argument('--viz', action='store_true')
17 | parser.add_argument('--gpu', type=int, default=0)
18 | # parser.add_argument('--adjoint', type=eval, default=False)
19 | parser.set_defaults(viz=True)
20 | args = parser.parse_args()
21 |
22 | torch.set_default_dtype(torch.float64)
23 | device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')
24 |
25 | true_y0 = torch.tensor(1,).float().to(device)
26 | t_n = np.linspace(0, 100., num=args.data_size)
27 | t = torch.tensor(t_n).to(device)
28 |
29 |
30 | class Lambda(torch.nn.Module):
31 |
32 | def forward(self, t, y):
33 | dydt = -t * y + 1 / y
34 | return dydt
35 |
36 | t1 = time.time()
37 | pred_y = odeint(Lambda(), true_y0, t, rtol=args.rtol, atol=args.atol, method=args.method)
38 | t2 = time.time()
39 |
40 | print("Number of solutions : ", pred_y.shape)
41 | print("Time taken : ", t2 - t1)
42 |
43 | plt.plot(t_n, pred_y.cpu().numpy(), 'r-', label='x')
44 | # plt.plot(time, pred_y.numpy(), 'b--', label='y')
45 | plt.legend()
46 | plt.show()
47 |
48 |
--------------------------------------------------------------------------------
/tfdiffeq_examples/lorentz_attractor.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import matplotlib.pyplot as plt
3 | from scipy.integrate import odeint
4 | from mpl_toolkits.mplot3d import Axes3D
5 |
6 | rho = 28.0
7 | sigma = 10.0
8 | beta = 8.0 / 3.0
9 |
10 | def f(state, t):
11 | x, y, z = state # unpack the state vector
12 | return sigma * (y - x), x * (rho - z) - y, x * y - beta * z # derivatives
13 |
14 | state0 = [1.0, 1.0, 1.0]
15 | t = np.arange(0.0, 100.0, 0.01)
16 |
17 | states = odeint(f, state0, t)
18 |
19 | fig = plt.figure()
20 | ax = fig.gca(projection='3d')
21 | ax.plot(states[:,0], states[:,1], states[:,2])
22 | plt.show()
23 |
--------------------------------------------------------------------------------
/tfdiffeq_examples/regression.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import tensorflow as tf
3 | from tfdiffeq import odeint
4 | from tfdiffeq.models.dense_odenet import ODENet
5 | import matplotlib.pyplot as plt
6 |
7 | from tfdiffeq_examples.utils.data_loader import load_dataset
8 |
9 | tf.compat.v2.enable_v2_behavior()
10 |
11 | X_train, y_train, X_test, y_test = load_dataset('adiac', normalize_timeseries=True)
12 | print()
13 |
14 | data_dim = X_train.shape[-1]
15 |
16 | model = ODENet(data_dim, hidden_dim=1, output_dim=data_dim, augment_dim=1,
17 | non_linearity='linear', time_dependent=True, tol=1e-3)
18 |
19 | optimizer = tf.train.AdamOptimizer(1e-2)
20 | criterion = tf.keras.losses.MeanAbsoluteError()
21 |
22 | BATCH_SIZE = 128
23 | EPOCHS = 100
24 |
25 | X_train = tf.constant(X_train)
26 | y_train = tf.constant(y_train)
27 | X_test = tf.constant(X_test)
28 | y_test = tf.constant(y_test)
29 | global_step = tf.Variable(0, dtype=tf.int64, trainable=False)
30 |
31 | for epoch in range(EPOCHS):
32 | with tf.GradientTape() as tape:
33 | outputs = model(X_train)
34 | loss = criterion(y_train, outputs)
35 |
36 | grads = tape.gradient(loss, model.trainable_variables)
37 | optimizer.apply_gradients(zip(grads, model.trainable_variables), global_step)
38 |
39 | print("Epoch %d: Loss = %0.5f" % (epoch + 1, loss.numpy().mean()))
40 |
41 | x = X_test[0]
42 | x_pred = model(tf.expand_dims(x, 0))
43 |
44 | plt.plot(x, label='original')
45 | plt.plot(x_pred[0], label='generated', alpha=0.5)
46 | plt.legend()
47 | plt.show()
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
--------------------------------------------------------------------------------
/tfdiffeq_examples/spiral_odes.py:
--------------------------------------------------------------------------------
1 | import time
2 | import argparse
3 | import os
4 |
5 | import numpy as np
6 |
7 | import tensorflow as tf
8 |
9 | tf.enable_eager_execution()
10 |
11 | parser = argparse.ArgumentParser('ODE demo')
12 | parser.add_argument('--method', type=str, choices=['dopri5', 'adams'], default='dopri5')
13 | parser.add_argument('--data_size', type=int, default=1000)
14 | parser.add_argument('--niters', type=int, default=2000)
15 | parser.add_argument('--viz', action='store_true')
16 | parser.add_argument('--gpu', type=int, default=0)
17 | # parser.add_argument('--adjoint', type=eval, default=False)
18 | parser.set_defaults(viz=True)
19 | args = parser.parse_args()
20 |
21 | from tfdiffeq import odeint
22 |
23 | device = 'gpu:' + str(args.gpu) if tf.test.is_gpu_available() else 'cpu:0'
24 |
25 | true_y0 = tf.convert_to_tensor([[0.5, 0.01]])
26 | t = tf.linspace(0., 25., args.data_size)
27 | true_A = tf.convert_to_tensor([[-0.1, 3.0], [-3.0, -0.1]], dtype=tf.float64)
28 |
29 |
30 | class Lambda(tf.keras.Model):
31 |
32 | def call(self, t, y):
33 | y = tf.cast(y, tf.float64)
34 | return tf.matmul(y, true_A)
35 |
36 |
37 | with tf.device(device):
38 | t1 = time.time()
39 | true_y = odeint(Lambda(), true_y0, t, method=args.method)
40 | t2 = time.time()
41 | print(true_y)
42 | print()
43 | print("Time taken to compute solution : ", t2 - t1)
44 |
45 |
46 | def get_batch():
47 | s = np.random.choice(
48 | np.arange(args.data_size - args.batch_time,
49 | dtype=np.int64), args.batch_size,
50 | replace=False)
51 |
52 | temp_y = true_y.numpy()
53 | batch_y0 = tf.convert_to_tensor(temp_y[s]) # (M, D)
54 | batch_t = t[:args.batch_time] # (T)
55 | batch_y = tf.stack([temp_y[s + i] for i in range(args.batch_time)], axis=0) # (T, M, D)
56 | return batch_y0, batch_t, batch_y
57 |
58 |
59 | def makedirs(dirname):
60 | if not os.path.exists(dirname):
61 | os.makedirs(dirname)
62 |
63 |
64 | if args.viz:
65 | makedirs('png')
66 | import matplotlib.pyplot as plt
67 | fig = plt.figure(figsize=(12, 4), facecolor='white')
68 | ax_traj = fig.add_subplot(131, frameon=False)
69 | ax_phase = fig.add_subplot(132, frameon=False)
70 | ax_vecfield = fig.add_subplot(133, frameon=False)
71 | plt.show(block=False)
72 |
73 |
74 | def visualize(true_y, pred_y, odefunc, itr):
75 |
76 | if args.viz:
77 |
78 | max_y, min_y = true_y.numpy().max(), true_y.numpy().min()
79 |
80 | ax_traj.cla()
81 | ax_traj.set_title('Trajectories')
82 | ax_traj.set_xlabel('t')
83 | ax_traj.set_ylabel('x,y')
84 | ax_traj.plot(t.numpy(), true_y.numpy()[:, 0, 0], t.numpy(), true_y.numpy()[:, 0, 1], 'g-', label='True trajectories')
85 | ax_traj.plot(t.numpy(), pred_y.numpy()[:, 0, 0], '--', t.numpy(), pred_y.numpy()[:, 0, 1], 'b--', label='Predicted Trajectories')
86 | ax_traj.set_xlim(min(t.numpy()), max(t.numpy()))
87 | ax_traj.set_ylim(min_y, max_y)
88 | ax_traj.legend()
89 |
90 | ax_phase.cla()
91 | ax_phase.set_title('Phase Portrait')
92 | ax_phase.set_xlabel('x')
93 | ax_phase.set_ylabel('y')
94 | ax_phase.plot(true_y.numpy()[:, 0, 0], true_y.numpy()[:, 0, 1], 'g-')
95 | ax_phase.plot(pred_y.numpy()[:, 0, 0], pred_y.numpy()[:, 0, 1], 'b--')
96 | ax_phase.set_xlim(min_y, max_y)
97 | ax_phase.set_ylim(min_y, max_y)
98 |
99 | ax_vecfield.cla()
100 | ax_vecfield.set_title('Learned Vector Field')
101 | ax_vecfield.set_xlabel('x')
102 | ax_vecfield.set_ylabel('y')
103 |
104 | y, x = np.mgrid[min_y:max_y:21j, min_y:max_y:21j]
105 | dydt = odefunc(0, tf.convert_to_tensor(np.stack([x, y], -1).reshape(21 * 21, 2))).numpy()
106 | mag = np.sqrt(dydt[:, 0]**2 + dydt[:, 1]**2).reshape(-1, 1)
107 | dydt = (dydt / mag)
108 | dydt = dydt.reshape(21, 21, 2)
109 |
110 | ax_vecfield.streamplot(x, y, dydt[:, :, 0], dydt[:, :, 1], color="black")
111 | ax_vecfield.set_xlim(min_y, max_y)
112 | ax_vecfield.set_ylim(min_y, max_y)
113 |
114 | fig.tight_layout()
115 | plt.savefig('png/{:03d}'.format(itr))
116 | plt.draw()
117 | plt.pause(0.001)
118 |
119 |
120 | class ODEFunc(tf.keras.Model):
121 |
122 | def __init__(self, **kwargs):
123 | super(ODEFunc, self).__init__(**kwargs)
124 |
125 | self.x = tf.keras.layers.Dense(50, activation='tanh',
126 | kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.1))
127 | self.y = tf.keras.layers.Dense(2,
128 | kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.1))
129 |
130 | def call(self, t, y):
131 | y = tf.cast(y, tf.float32)
132 | x = self.x(y)
133 | y = self.y(x)
134 | return y
135 |
136 |
137 | class RunningAverageMeter(object):
138 | """Computes and stores the average and current value"""
139 |
140 | def __init__(self, momentum=0.99):
141 | self.momentum = momentum
142 | self.reset()
143 |
144 | def reset(self):
145 | self.val = None
146 | self.avg = 0
147 |
148 | def update(self, val):
149 | if self.val is None:
150 | self.avg = val
151 | else:
152 | self.avg = self.avg * self.momentum + val * (1 - self.momentum)
153 | self.val = val
154 |
155 |
156 | if __name__ == '__main__':
157 |
158 | ii = 0
159 | end = time.time()
160 |
161 | time_meter = RunningAverageMeter(0.97)
162 | loss_meter = RunningAverageMeter(0.97)
163 |
164 | with tf.device(device):
165 | func = ODEFunc()
166 |
167 | lr = 1e-3
168 | optimizer = tf.train.RMSPropOptimizer(lr)
169 |
170 | for itr in range(1, args.niters + 1):
171 |
172 | with tf.GradientTape() as tape:
173 | batch_y0, batch_t, batch_y = get_batch()
174 | pred_y = odeint(func, batch_y0, batch_t)
175 | loss = tf.reduce_mean(tf.abs(pred_y - batch_y))
176 |
177 | grads = tape.gradient(loss, func.variables)
178 | grad_vars = zip(grads, func.variables)
179 |
180 | optimizer.apply_gradients(grad_vars)
181 |
182 | time_meter.update(time.time() - end)
183 | loss_meter.update(loss.numpy())
184 |
185 | if itr % args.test_freq == 0:
186 | pred_y = odeint(func, true_y0, t)
187 | loss = tf.reduce_mean(tf.abs(pred_y - true_y))
188 | print('Iter {:04d} | Total Loss {:.6f}'.format(itr, loss.numpy()))
189 | visualize(true_y, pred_y, func, ii)
190 | ii += 1
191 |
192 | end = time.time()
193 |
--------------------------------------------------------------------------------
/tfdiffeq_examples/temp/plot_tf.py:
--------------------------------------------------------------------------------
1 | import time
2 | import argparse
3 | import os
4 |
5 | import numpy as np
6 | import matplotlib.pyplot as plt
7 |
8 | import tensorflow as tf
9 | from tfdiffeq import odeint
10 |
11 | tf.enable_eager_execution()
12 |
13 | parser = argparse.ArgumentParser('ODE demo')
14 | parser.add_argument('--method', type=str, choices=['dopri5', 'adams'], default='dopri5')
15 | parser.add_argument('--data_size', type=int, default=2000)
16 | parser.add_argument('--rtol', type=float, default=1e-3)
17 | parser.add_argument('--atol', type=float, default=1e-4)
18 | parser.add_argument('--viz', action='store_true')
19 | parser.add_argument('--gpu', type=int, default=0)
20 | # parser.add_argument('--adjoint', type=eval, default=False)
21 | parser.set_defaults(viz=True)
22 | args = parser.parse_args()
23 |
24 | device = 'gpu:' + str(args.gpu) if tf.test.is_gpu_available() else 'cpu:0'
25 |
26 | true_y0 = tf.convert_to_tensor([[1, -1]], dtype=tf.float64)
27 | t_n = np.linspace(-2, 1, num=args.data_size)
28 | t = tf.convert_to_tensor(t_n, dtype=tf.float32)
29 |
30 | true_A = tf.convert_to_tensor([[1, -0.2], [-0.2, 1]], dtype=tf.float64)
31 |
32 | class Lambda(tf.keras.Model):
33 |
34 | def call(self, t, y):
35 | dydt = tf.matmul(y, true_A)
36 | return dydt
37 |
38 |
39 | with tf.device(device):
40 | t1 = time.time()
41 | pred_y = odeint(Lambda(), true_y0, t, rtol=args.rtol, atol=args.atol, method=args.method)
42 | t2 = time.time()
43 |
44 | print("Number of solutions : ", pred_y.shape)
45 | print("Time taken : ", t2 - t1)
46 |
47 | pred_y = pred_y.numpy()
48 |
49 | plt.plot(t_n, pred_y[:, 0, 0], t_n, pred_y[:, 0, 1], 'r-', label='trajectory')
50 | # plt.plot(time, pred_y.numpy(), 'b--', label='y')
51 | plt.legend()
52 | plt.xlabel('time')
53 | plt.ylabel('magnitude')
54 | plt.show()
55 |
56 | plt.plot(pred_y[:, 0, 0], pred_y[:, 0, 1], 'b-', label='phase')
57 | plt.legend()
58 | plt.xlabel('x')
59 | plt.ylabel('y')
60 | plt.show()
61 |
62 |
--------------------------------------------------------------------------------
/tfdiffeq_examples/temp/temp1.py:
--------------------------------------------------------------------------------
1 | import time
2 | import argparse
3 | import os
4 |
5 | import numpy as np
6 | import matplotlib.pyplot as plt
7 |
8 | import tensorflow as tf
9 | from tfdiffeq import odeint
10 | tf.enable_eager_execution()
11 |
12 | parser = argparse.ArgumentParser('ODE demo')
13 | parser.add_argument('--method', type=str, choices=['dopri5', 'adams'], default='dopri5')
14 | parser.add_argument('--data_size', type=int, default=20)
15 | parser.add_argument('--batch_time', type=int, default=10)
16 | parser.add_argument('--batch_size', type=int, default=20)
17 | parser.add_argument('--niters', type=int, default=2000)
18 | parser.add_argument('--test_freq', type=int, default=20)
19 | parser.add_argument('--viz', action='store_true')
20 | parser.add_argument('--gpu', type=int, default=0)
21 | # parser.add_argument('--adjoint', type=eval, default=False)
22 | parser.set_defaults(viz=True)
23 | args = parser.parse_args()
24 |
25 | device = 'gpu:' + str(args.gpu) if tf.test.is_gpu_available() else 'cpu:0'
26 |
27 | true_y0 = tf.convert_to_tensor(1, dtype=tf.float64)
28 | time = np.linspace(0, 35., num=args.data_size)
29 | t = tf.convert_to_tensor(time, dtype=tf.float32)
30 | # true_A = tf.convert_to_tensor([[-0.1, 2.0], [-2.0, -0.1]], dtype=tf.float64)
31 |
32 |
33 | def true_y(t, y0):
34 | # dy / dt = 5x - 3
35 | return t + y0 # 0.2 * np.exp(5 * t + 5 * y0) + 3. / 5.
36 |
37 |
38 | class Lambda(tf.keras.Model):
39 |
40 | def call(self, t, y):
41 | if t < 10.:
42 | u = 0.0
43 | else:
44 | u = 2.0
45 |
46 | out = (-y + u) / 5.0
47 | return out
48 |
49 |
50 | real_y = [true_y(t, true_y0.numpy()) for t in time]
51 | pred_y = odeint(Lambda(), true_y0, t, method=args.method)
52 |
53 | mse = np.mean(np.square(real_y - pred_y.numpy()))
54 | print("MSE :", mse)
55 |
56 | for i, (real, pred) in enumerate(zip(real_y, pred_y.numpy())):
57 | print(i + 1, real, pred)
58 |
59 | plt.plot(pred_y.numpy(), label='integral')
60 | plt.legend()
61 | plt.show()
62 |
63 |
--------------------------------------------------------------------------------
/tfdiffeq_examples/temp/temp2.py:
--------------------------------------------------------------------------------
1 | import time
2 | import argparse
3 | import os
4 |
5 | import numpy as np
6 | import matplotlib.pyplot as plt
7 |
8 | import tensorflow as tf
9 | from tfdiffeq import odeint
10 | tf.enable_eager_execution()
11 |
12 | parser = argparse.ArgumentParser('ODE demo')
13 | parser.add_argument('--method', type=str, choices=['dopri5', 'adams'], default='dopri5')
14 | parser.add_argument('--data_size', type=int, default=20)
15 | parser.add_argument('--batch_time', type=int, default=10)
16 | parser.add_argument('--batch_size', type=int, default=20)
17 | parser.add_argument('--niters', type=int, default=2000)
18 | parser.add_argument('--test_freq', type=int, default=20)
19 | parser.add_argument('--viz', action='store_true')
20 | parser.add_argument('--gpu', type=int, default=0)
21 | # parser.add_argument('--adjoint', type=eval, default=False)
22 | parser.set_defaults(viz=True)
23 | args = parser.parse_args()
24 |
25 | device = 'gpu:' + str(args.gpu) if tf.test.is_gpu_available() else 'cpu:0'
26 |
27 | true_y0 = tf.convert_to_tensor([0, 0], dtype=tf.float64)
28 | time = np.linspace(0, 35., num=args.data_size)
29 | t = tf.convert_to_tensor(time, dtype=tf.float32)
30 |
31 |
32 | class Lambda(tf.keras.Model):
33 |
34 | def call(self, t, y):
35 | dxdt = 3 * tf.exp(-t)
36 | dydt = 3 - y[1]
37 | return [dxdt, dydt]
38 |
39 |
40 | pred_y = odeint(Lambda(), true_y0, t, method=args.method)
41 |
42 | print("Number of solutions : ", pred_y.shape)
43 |
44 | plt.plot(time, pred_y[:, 0].numpy(), 'r-', label='x')
45 | plt.plot(time, pred_y[:, 1].numpy(), 'b--', label='y')
46 | plt.legend()
47 | plt.show()
48 |
49 |
--------------------------------------------------------------------------------
/tfdiffeq_examples/temp/temp3.py:
--------------------------------------------------------------------------------
1 | import time
2 | import argparse
3 | import os
4 |
5 | import numpy as np
6 | import matplotlib.pyplot as plt
7 |
8 | import tensorflow as tf
9 | from tfdiffeq import odeint
10 | tf.enable_eager_execution()
11 |
12 | parser = argparse.ArgumentParser('ODE demo')
13 | parser.add_argument('--method', type=str, choices=['dopri5', 'adams'], default='dopri5')
14 | parser.add_argument('--data_size', type=int, default=2000)
15 | parser.add_argument('--batch_time', type=int, default=10)
16 | parser.add_argument('--batch_size', type=int, default=20)
17 | parser.add_argument('--niters', type=int, default=2000)
18 | parser.add_argument('--test_freq', type=int, default=20)
19 | parser.add_argument('--viz', action='store_true')
20 | parser.add_argument('--gpu', type=int, default=0)
21 | # parser.add_argument('--adjoint', type=eval, default=False)
22 | parser.set_defaults(viz=True)
23 | args = parser.parse_args()
24 |
25 | device = 'gpu:' + str(args.gpu) if tf.test.is_gpu_available() else 'cpu:0'
26 |
27 | true_y0 = tf.convert_to_tensor(1, dtype=tf.float64)
28 | time = np.linspace(0, 10000., num=args.data_size)
29 | t = tf.convert_to_tensor(time, dtype=tf.float32)
30 |
31 |
32 | class Lambda(tf.keras.Model):
33 |
34 | def call(self, t, y):
35 | dydt = 1. / (y - true_y0 + 1e-3)
36 | return dydt
37 |
38 |
39 | pred_y = odeint(Lambda(), true_y0, t, method=args.method)
40 |
41 | print("Number of solutions : ", pred_y.shape)
42 |
43 | plt.plot(time, pred_y.numpy(), 'r-', label='x')
44 | # plt.plot(time, pred_y.numpy(), 'b--', label='y')
45 | plt.legend()
46 | plt.show()
47 |
48 |
--------------------------------------------------------------------------------
/tfdiffeq_examples/temp/temp4.py:
--------------------------------------------------------------------------------
1 | import time
2 | import argparse
3 | import os
4 |
5 | import numpy as np
6 | import matplotlib.pyplot as plt
7 |
8 | import tensorflow as tf
9 | from tfdiffeq import odeint
10 | tf.enable_eager_execution()
11 |
12 | parser = argparse.ArgumentParser('ODE demo')
13 | parser.add_argument('--method', type=str, choices=['dopri5', 'adams'], default='dopri5')
14 | parser.add_argument('--data_size', type=int, default=2000)
15 | parser.add_argument('--batch_time', type=int, default=10)
16 | parser.add_argument('--batch_size', type=int, default=20)
17 | parser.add_argument('--niters', type=int, default=2000)
18 | parser.add_argument('--test_freq', type=int, default=20)
19 | parser.add_argument('--viz', action='store_true')
20 | parser.add_argument('--gpu', type=int, default=0)
21 | # parser.add_argument('--adjoint', type=eval, default=False)
22 | parser.set_defaults(viz=True)
23 | args = parser.parse_args()
24 |
25 | device = 'gpu:' + str(args.gpu) if tf.test.is_gpu_available() else 'cpu:0'
26 |
27 | true_y0 = tf.convert_to_tensor(1, dtype=tf.float64)
28 | time = np.linspace(0, 1., num=args.data_size)
29 | t = tf.convert_to_tensor(time, dtype=tf.float32)
30 |
31 |
32 | def true_val(t, y):
33 | return 0.2 * np.exp(t) * np.exp(5 * y) + 0.6
34 |
35 |
36 | class Lambda(tf.keras.Model):
37 |
38 | def call(self, t, y):
39 | dydt = 5 * y - 3
40 | return dydt
41 |
42 |
43 | real_y = [true_val(t, true_y0.numpy()) for t in time]
44 | pred_y = odeint(Lambda(), true_y0, t, method=args.method)
45 |
46 | mse = np.mean(np.square(real_y - pred_y.numpy()))
47 | print('MSE : ', mse)
48 | print("Number of solutions : ", pred_y.shape)
49 |
50 | plt.plot(time, real_y, label='real')
51 | plt.plot(time, pred_y.numpy(), label='pred')
52 | plt.legend()
53 | plt.show()
54 |
55 |
--------------------------------------------------------------------------------
/tfdiffeq_examples/temp/temp5.py:
--------------------------------------------------------------------------------
1 | import time
2 | import argparse
3 | import os
4 |
5 | import numpy as np
6 | import matplotlib.pyplot as plt
7 |
8 | import tensorflow as tf
9 | from tfdiffeq import odeint
10 | tf.enable_eager_execution()
11 |
12 | parser = argparse.ArgumentParser('ODE demo')
13 | parser.add_argument('--method', type=str, choices=['dopri5', 'adams'], default='dopri5')
14 | parser.add_argument('--data_size', type=int, default=2000)
15 | parser.add_argument('--batch_time', type=int, default=10)
16 | parser.add_argument('--batch_size', type=int, default=20)
17 | parser.add_argument('--niters', type=int, default=2000)
18 | parser.add_argument('--test_freq', type=int, default=20)
19 | parser.add_argument('--viz', action='store_true')
20 | parser.add_argument('--gpu', type=int, default=0)
21 | # parser.add_argument('--adjoint', type=eval, default=False)
22 | parser.set_defaults(viz=True)
23 | args = parser.parse_args()
24 |
25 | device = 'gpu:' + str(args.gpu) if tf.test.is_gpu_available() else 'cpu:0'
26 |
27 | true_y0 = tf.convert_to_tensor(1, dtype=tf.float64)
28 | time = np.linspace(0, 100., num=args.data_size)
29 | t = tf.convert_to_tensor(time, dtype=tf.float32)
30 |
31 |
32 | class Lambda(tf.keras.Model):
33 |
34 | def call(self, t, y):
35 | dydt = 1 / (y * y)
36 | return dydt
37 |
38 | with tf.device(device):
39 | pred_y = odeint(Lambda(), true_y0, t, method=args.method)
40 |
41 | print("Number of solutions : ", pred_y.shape)
42 |
43 | plt.plot(time, pred_y.numpy(), 'r-', label='x')
44 | # plt.plot(time, pred_y.numpy(), 'b--', label='y')
45 | plt.legend()
46 | plt.show()
47 |
48 |
--------------------------------------------------------------------------------
/tfdiffeq_examples/utils/data_loader.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pandas as pd
3 |
4 | import os
5 |
6 |
7 | def load_dataset(dataset_name, normalize_timeseries=False, verbose=True):
8 | train_path = 'data/' + dataset_name + '_TRAIN'
9 | test_path = 'data/' + dataset_name + '_TEST'
10 |
11 | if verbose: print("Loading train / test dataset : ", train_path, test_path)
12 |
13 | if os.path.exists(train_path):
14 | df = pd.read_csv(train_path, header=None, encoding='latin-1')
15 | else:
16 | raise FileNotFoundError('File %s not found!' % (train_path))
17 |
18 | # remove all columns which are completely empty
19 | df.dropna(axis=1, how='all', inplace=True)
20 |
21 | # fill all missing columns with 0
22 | df.fillna(0, inplace=True)
23 |
24 | # extract labels Y and normalize to [0 - (MAX - 1)] range
25 | y_train = df[[0]].values
26 | nb_classes = len(np.unique(y_train))
27 | y_train = (y_train - y_train.min()) / (y_train.max() - y_train.min()) * (nb_classes - 1)
28 |
29 | # drop labels column from train set X
30 | df.drop(df.columns[0], axis=1, inplace=True)
31 |
32 | X_train = df.values
33 |
34 | # scale the values
35 | if normalize_timeseries:
36 | X_train_mean = X_train.mean(axis=-1, keepdims=True)
37 | X_train_std = X_train.std(axis=-1, keepdims=True)
38 | X_train = (X_train - X_train_mean) / (X_train_std + 1e-8)
39 |
40 | if verbose: print("Finished loading train dataset..")
41 |
42 | if os.path.exists(test_path):
43 | df = pd.read_csv(test_path, header=None, encoding='latin-1')
44 | else:
45 | raise FileNotFoundError('File %s not found!' % (test_path))
46 |
47 | # remove all columns which are completely empty
48 | df.dropna(axis=1, how='all', inplace=True)
49 |
50 | # fill all missing columns with 0
51 | df.fillna(0, inplace=True)
52 |
53 | # extract labels Y and normalize to [0 - (MAX - 1)] range
54 | y_test = df[[0]].values
55 | nb_classes = len(np.unique(y_test))
56 | y_test = (y_test - y_test.min()) / (y_test.max() - y_test.min()) * (nb_classes - 1)
57 |
58 | # drop labels column from train set X
59 | df.drop(df.columns[0], axis=1, inplace=True)
60 |
61 | X_test = df.values
62 |
63 | # scale the values
64 | if normalize_timeseries:
65 | X_test_mean = X_test.mean(axis=-1, keepdims=True)
66 | X_test_std = X_test.std(axis=-1, keepdims=True)
67 | X_test = (X_test - X_test_mean) / (X_test_std + 1e-8)
68 |
69 | if verbose:
70 | print("Finished loading test dataset..")
71 | print()
72 | print("Number of train samples : ", X_train.shape[0], "Number of test samples : ", X_test.shape[0])
73 | print("Number of classes : ", nb_classes)
74 | print("Sequence length : ", X_train.shape[-1])
75 |
76 |
77 | return X_train, y_train, X_test, y_test
78 |
79 |
80 |
--------------------------------------------------------------------------------
/tfdiffeq_examples/utils/extract_ucr_datasets.py:
--------------------------------------------------------------------------------
1 | import os
2 | import glob
3 | import pandas as pd
4 | from joblib import Parallel, delayed
5 |
6 | path = '_data'
7 |
8 | if not os.path.exists(path):
9 | os.makedirs(path)
10 |
11 |
12 | def process_file(fn):
13 | file_name = os.path.split(fn)[-1]
14 | file_name = file_name[:-4]
15 | new_path = os.path.join(path, file_name)
16 |
17 | # Load the Tab seperated values in the dataset
18 | df = pd.read_table(fn, header=None, encoding='latin-1')
19 |
20 | # Fill the empty timesteps with 0.0
21 | df.fillna(0.0, inplace=True)
22 |
23 | # Save the prepared dataset as a CSV file that the dataset reader can use
24 | df.to_csv(new_path, sep=',', index=False, header=None, encoding='latin-1')
25 |
26 | # shutil.copy(fn, new_path)
27 | print("Copied file from %s to %s" % (fn, new_path))
28 |
29 |
30 | with Parallel(n_jobs=-1, backend='loky', verbose=1) as engine:
31 | engine([delayed(process_file)(fn) for fn in glob.glob("*/*.tsv")])
32 |
33 | print()
34 | print("Extracted all files. Transfer all these files to the `data` directory")
35 |
--------------------------------------------------------------------------------
/tfdiffeq_examples/utils/progbar.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import time
3 |
4 |
5 |
6 | # Modified from https://github.com/markdregan/K-Nearest-Neighbors-with-Dynamic-Time-Warping
7 | class _ProgressBar(object):
8 |
9 | def __init__(self, iterations, animation_interval=0.5):
10 | self.iterations = iterations
11 | self.start = time.time()
12 | self.last = 0
13 | self.animation_interval = animation_interval
14 |
15 | def percentage(self, i):
16 | return 100 * i / float(self.iterations)
17 |
18 | def animate(self, i, e):
19 | pass
20 |
21 | def update(self, i):
22 | elapsed = time.time() - self.start
23 | i = i + 1
24 |
25 | if elapsed - self.last > self.animation_interval:
26 | self.animate(i + 1, elapsed)
27 | self.last = elapsed
28 | elif i == self.iterations:
29 | self.animate(i, elapsed)
30 |
31 |
32 | # Modified from https://github.com/markdregan/K-Nearest-Neighbors-with-Dynamic-Time-Warping
33 | class _TextProgressBar(_ProgressBar):
34 |
35 | def __init__(self, iterations, printer):
36 | self.fill_char = '-'
37 | self.width = 40
38 | self.printer = printer
39 |
40 | _ProgressBar.__init__(self, iterations)
41 | self.update(0)
42 |
43 | def animate(self, i, elapsed):
44 | self.printer(self.progbar(i, elapsed))
45 |
46 | def progbar(self, i, elapsed):
47 | bar = self.bar(self.percentage(i))
48 | return "[%s] %i of %i complete in %.1f sec" % (
49 | bar, i, self.iterations, round(elapsed, 1))
50 |
51 | def bar(self, percent):
52 | all_full = self.width - 2
53 | num_hashes = int(percent / 100 * all_full)
54 |
55 | bar = self.fill_char * num_hashes + ' ' * (all_full - num_hashes)
56 |
57 | info = '%d%%' % percent
58 | loc = (len(bar) - len(info)) // 2
59 | return replace_at(bar, info, loc, loc + len(info))
60 |
61 |
62 | def replace_at(str, new, start, stop):
63 | return str[:start] + new + str[stop:]
64 |
65 |
66 | def consoleprint(s):
67 | if sys.platform.lower().startswith('nt'):
68 | print(s, '\r', end='')
69 | else:
70 | print(s)
71 |
72 |
73 | def progress_bar(iters):
74 | return _TextProgressBar(iters, consoleprint)
75 |
--------------------------------------------------------------------------------
/wumpus_agent/agent.py:
--------------------------------------------------------------------------------
1 | from .memory import ExperienceReplay
2 | import numpy as np
3 | import os
4 |
5 | initial_state = ['0.0', '0.0', '0.0', '0.0', '0.0', '0.0', '0.0', '0.33', '1.0', '1.0', '1.0', '1.0', '0.0', '0.0', '1.0', '1.0', '1.0', '0.0', '1.0', '1.0', '0.0', '0.0', '0.0', '1.0', '1.0', '1.0', '0.0', '1.0', '1.0', '0.0', '1.0', '1.0', '0.0', '1.0', '1.0', '0.0', '1.0', '1.0', '0.0', '1.0', '1.0', '0.0', '1.0', '1.0', '0.0', '1.0', '1.0', '0.0', '1.0', '1.0', '0.0', '1.0', '1.0', '0.0', '1.0', '1.0', '0.0']
6 |
7 | class Agent:
8 |
9 | def __init__(self, model, memory=None, memory_size=1000):
10 | assert len(model.output_shape) == 2, "Model's output shape should be (nb_samples, nb_actions)."
11 |
12 | if memory:
13 | self.memory = memory
14 | else:
15 | self.memory = ExperienceReplay(memory_size)
16 |
17 | self.model = model
18 | self.frames = None
19 |
20 | @property
21 | def memory_size(self):
22 | return self.memory.memory_size
23 |
24 | @memory_size.setter
25 | def memory_size(self, value):
26 | self.memory.memory_size = value
27 |
28 | def reset_memory(self):
29 | self.memory.reset_memory()
30 |
31 | def get_game_data(self, game_data):
32 | input_data = game_data[0]
33 | reward = game_data[1]
34 | return np.expand_dims(input_data, 0), reward
35 |
36 | def clear_frames(self):
37 | self.frames = None
38 |
39 | def train(self, game_data, nb_actions, nb_epoch=1000, batch_size=50, gamma=0.9, epsilon=[1., .1], epsilon_rate=0.5, reset_memory=False, observe=0, checkpoint=None):
40 | if type(epsilon) in {tuple, list}:
41 | delta = ((epsilon[0] - epsilon[1]) / (nb_epoch * epsilon_rate))
42 | final_epsilon = epsilon[1]
43 | epsilon = epsilon[0]
44 | else:
45 | final_epsilon = epsilon
46 |
47 | model = self.model
48 | nb_actions = model.output_shape[-1]
49 | win_count = 0
50 |
51 | for epoch in range(nb_epoch):
52 | loss = 0.
53 | self.clear_frames()
54 |
55 | if reset_memory:
56 | self.reset_memory()
57 |
58 | game_over = False
59 | S, r = self.get_game_data(game_data)
60 |
61 | while not game_over:
62 | if np.random.random() < epsilon or epoch < observe:
63 | a = int(np.random.randint(nb_actions))
64 | else:
65 | q = model.predict(S)
66 | a = int(np.argmax(q[0]))
67 |
68 | #game.play(a)
69 |
70 | r = game.get_score()
71 | S_prime = self.get_game_data(game)
72 | game_over = game.is_over()
73 | transition = [S, a, r, S_prime, game_over]
74 | self.memory.remember(*transition)
75 | S = S_prime
76 | if epoch >= observe:
77 | batch = self.memory.get_batch(model=model, batch_size=batch_size, gamma=gamma)
78 | if batch:
79 | inputs, targets = batch
80 | loss += float(model.train_on_batch(inputs, targets))
81 | if checkpoint and ((epoch + 1 - observe) % checkpoint == 0 or epoch + 1 == nb_epoch):
82 | model.save_weights('weights.dat')
83 | if game.is_won():
84 | win_count += 1
85 | if epsilon > final_epsilon and epoch >= observe:
86 | epsilon -= delta
87 | print("Epoch {:03d}/{:03d} | Loss {:.4f} | Epsilon {:.2f} | Win count {}".format(epoch + 1, nb_epoch, loss, epsilon, win_count))
88 |
89 | def play(self, game, nb_epoch=10, epsilon=0., visualize=True):
90 | self.check_game_compatibility(game)
91 | model = self.model
92 | win_count = 0
93 | frames = []
94 | for epoch in range(nb_epoch):
95 | game.reset()
96 | self.clear_frames()
97 | S = self.get_game_data(game)
98 | if visualize:
99 | frames.append(game.draw())
100 | game_over = False
101 | while not game_over:
102 | if np.random.rand() < epsilon:
103 | print("random")
104 | action = int(np.random.randint(0, game.nb_actions))
105 | else:
106 | q = model.predict(S)[0]
107 | possible_actions = game.get_possible_actions()
108 | q = [q[i] for i in possible_actions]
109 | action = possible_actions[np.argmax(q)]
110 | game.play(action)
111 | S = self.get_game_data(game)
112 | if visualize:
113 | frames.append(game.draw())
114 | game_over = game.is_over()
115 | if game.is_won():
116 | win_count += 1
117 | print("Accuracy {} %".format(100. * win_count / nb_epoch))
118 |
119 |
--------------------------------------------------------------------------------
/wumpus_agent/exec.py:
--------------------------------------------------------------------------------
1 | '''
2 | Author : Somshubra Majumdar
3 | Date : 25-Jan-17
4 |
5 | Tests the wumpus world environment 100 times (default, can be changed),
6 | wherein each test creates 10,000 games. Then provides statistics of the games,
7 | including mean, standard deviation, max and min score achieved in 100 runs of
8 | 10,000 games.
9 |
10 | '''
11 |
12 | from __future__ import absolute_import
13 | from __future__ import print_function
14 | from __future__ import division
15 |
16 | import asyncio
17 | import sys
18 | from asyncio.subprocess import PIPE, STDOUT
19 |
20 | import argparse
21 | import os
22 | import numpy as np
23 |
24 | from memory import ExperienceReplay
25 | from model import build_model
26 |
27 | assert os.path.exists("WorldApplication.class"), "WorldApplication.class not found. Aborting. Read instructions to use stats.py"
28 |
29 | parser = argparse.ArgumentParser('Wumpus World statistics')
30 | parser.add_argument('-i', default=10000, type=int, help='Number of iterations to test Wumpus world')
31 |
32 | parser.add_argument('-d', default='4', type=str, help='Sets the dimensions of the Wumpus World to be dimension x dimension. Default: 4 (a 4x4 world)')
33 | parser.add_argument('-s', default='50', type=str, help='Sets the maximum number of time steps. Default: 50')
34 | parser.add_argument('-t', default='1', type=str, help='Sets the number of trials. Default: 10000')
35 | parser.add_argument('-a', default="false", type=str, help="Sets whether the agent's location and orientation is randomly generated. Default: true")
36 | parser.add_argument('-r', default=-1, type=str, help='Sets the seed for the random Wumpus World generator. Default: (random integer)')
37 | parser.add_argument('-f', default='wumpus_out.txt', type=str, help='sets the filename for the output file (containing the terminal output). Default: wumpus_out.txt')
38 | parser.add_argument('-n', default="false", type=str, help="sets whether the agent's GO_FORWARD action behavior is non-deterministic. Default: true")
39 | parser.add_argument('-p', default="false", type=str, help='sets whether the terminal will print out the environment along with the actions. When off, will still print final score Default: false')
40 |
41 | args = parser.parse_args()
42 |
43 | iterations = args.i # number of iterations to check
44 |
45 | param_args = ["java", "WorldApplication", "-d", args.d, "-s", args.s, "-t", args.t, "-a", args.a, "-f", args.f, "-n",
46 | args.n, "-p", args.p]
47 |
48 | if args.r != -1:
49 | param_args.append("-r")
50 | param_args.append(args.r)
51 |
52 | print("Testing Wumpus world environment %d times" % iterations, "\n", "*" * 60, "\n")
53 |
54 | out_path = r"wumpus_out.txt"
55 |
56 | scores = []
57 | FNULL = open(os.devnull, 'w') # Prevent java code output on screen
58 |
59 | ''' Constants '''
60 | nb_actions = 6
61 | memory_size = 100
62 | observe = 0
63 | batch_size = 50
64 |
65 | epsilon = (1.0, 0.1)
66 | epsilon_rate = 0.5
67 |
68 | delta = ((epsilon[0] - epsilon[1]) / (iterations * epsilon_rate))
69 | final_epsilon = epsilon[1]
70 | epsilon = epsilon[0]
71 |
72 | win_count = 0
73 |
74 | ''' Memory and Model '''
75 | memory = ExperienceReplay(memory_size)
76 | model = build_model()
77 |
78 | ''' Agent Code '''
79 | initial_state = ['0.0', '0.0', '0.0', '0.0', '0.0', '0.0', '0.0', '0.33', '1.0', '1.0', '1.0', '1.0', '0.0', '0.0', '1.0', '1.0', '1.0', '0.0', '1.0', '1.0', '0.0', '0.0', '0.0', '1.0', '1.0', '1.0', '0.0', '1.0', '1.0', '0.0', '1.0', '1.0', '0.0', '1.0', '1.0', '0.0', '1.0', '1.0', '0.0', '1.0', '1.0', '0.0', '1.0', '1.0', '0.0', '1.0', '1.0', '0.0', '1.0', '1.0', '0.0', '1.0', '1.0', '0.0', '1.0', '1.0', '0.0']
80 |
81 | if sys.platform == "win32":
82 | loop = asyncio.ProactorEventLoop() # for subprocess' pipes on Windows
83 | asyncio.set_event_loop(loop)
84 | else:
85 | loop = asyncio.get_event_loop()
86 |
87 | async def run_loop():
88 | global epsilon, win_count
89 |
90 | print("Beginning new game iteration : ", step + 1)
91 | loss = 0.
92 | game_over = 0
93 | final_score = 0
94 | S = np.asarray(initial_state)[np.newaxis]
95 |
96 | # p = Popen(param_args, stdin=PIPE, stdout=PIPE)
97 |
98 | p = await asyncio.create_subprocess_exec(*param_args, stdin=PIPE, stdout=PIPE)
99 |
100 | for _ in range(10): # skip first 10 lines
101 | await asyncio.wait_for(p.stdout.readline(), timeout=2)
102 |
103 | curr_score = 0
104 |
105 | for i in range(50): # Play for 50 game steps
106 | if np.random.random() < epsilon or i < observe:
107 | a = int(np.random.randint(nb_actions))
108 | else:
109 | a = model.predict_classes(S)
110 |
111 | action = str(a + 1) + "\n" # ArgMax returns in range of [0-5], whereas actions are [1-6]
112 |
113 | if "[" in action:
114 | action = action[1:2]
115 |
116 | # print("Action : ", action)
117 |
118 | p.stdin.write(bytes(action, encoding='utf-8')) # Perform action
119 | #try:
120 | # p.stdin.flush()
121 | #except OSError:
122 | # print('**warning** : Failed to flush')
123 |
124 | # result = str(p.stdout.readline(), 'utf-8') # Get new state
125 | # curr_score = str(p.stdout.readline(), 'utf-8') # Get new reward
126 |
127 | try:
128 | result = await asyncio.wait_for(p.stdout.readline(), timeout=1)
129 | result = result[:-4]
130 | except asyncio.TimeoutError:
131 | print("Timeout error, breaking.")
132 | break
133 |
134 | try:
135 | curr_score = await asyncio.wait_for(p.stdout.readline(), timeout=1)
136 | except asyncio.TimeoutError:
137 | print("Timeout error, breaking.")
138 | break
139 |
140 | result = str(result, 'utf-8')
141 |
142 | ''' Game End Criteria '''
143 | try:
144 | curr_score = float(curr_score)
145 | except ValueError:
146 | curr_score = str(curr_score, 'utf-8')
147 | game_over = 1
148 |
149 | if 'Average Score: ' in result:
150 | result = result.replace('Average Score: ', '')
151 | final_score = float(result)
152 | game_over = 1
153 |
154 | if not isinstance(curr_score, float):
155 | if 'Average Score: ' in curr_score:
156 | curr_score = curr_score.replace('Average Score: ', '')
157 | final_score = float(curr_score)
158 | game_over = 1
159 |
160 | if result == 'Finished.':
161 | game_over = 1
162 |
163 | ''' Updates '''
164 |
165 | S_prime = result.split(' ')
166 | S_prime[-1] = S_prime[-1].replace('\r\n', '')
167 | S_prime = np.asarray(S_prime)[np.newaxis]
168 |
169 | r = curr_score
170 |
171 | memory.remember(S, a, r, S_prime, game_over)
172 | S = S_prime
173 |
174 | if i >= observe:
175 | batch = memory.get_batch(model=model, batch_size=batch_size, gamma=0.9)
176 | if batch:
177 | inputs, targets = batch
178 | loss += float(model.train_on_batch(inputs, targets))
179 |
180 | model.save_weights('dnn.h5', overwrite=True)
181 |
182 | if game_over:
183 | break
184 |
185 | print('Final Score :', final_score)
186 | if isinstance(final_score, float):
187 | if final_score > 0: # Assume won if score is this high
188 | win_count += 1
189 |
190 | if epsilon > final_epsilon and step >= observe:
191 | epsilon -= delta
192 |
193 | print("Epoch {:03d}/{:03d} | Loss {:.4f} | Epsilon {:.2f} | Win count {}".format(step + 1, iterations, loss, epsilon,
194 | win_count))
195 |
196 | for step in range(iterations):
197 | loop.run_until_complete(run_loop())
198 |
199 | loop.close()
200 | print('Finished.')
--------------------------------------------------------------------------------
/wumpus_agent/memory.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from random import sample
3 | from keras import backend as K
4 |
5 | class Memory:
6 |
7 | def __init__(self):
8 | pass
9 |
10 | def remember(self, S, a, r, S_prime, game_over):
11 | pass
12 |
13 | def get_batch(self, model, batch_size):
14 | pass
15 |
16 |
17 | class ExperienceReplay(Memory):
18 |
19 | def __init__(self, memory_size=100, fast=False):
20 | super(ExperienceReplay, self).__init__()
21 |
22 | self.fast = fast
23 | self.memory = []
24 | self._memory_size = memory_size
25 |
26 | def remember(self, s, a, r, s_prime, game_over):
27 | self.input_shape = s.shape[1:]
28 | self.memory.append(np.concatenate([s.flatten(), np.array(a).flatten(), np.array(r).flatten(), s_prime.flatten(), 1 * np.array(game_over).flatten()]))
29 | if self.memory_size > 0 and len(self.memory) > self.memory_size:
30 | self.memory.pop(0)
31 |
32 | def get_batch(self, model, batch_size, gamma=0.9):
33 | if self.fast:
34 | return self.get_batch_fast(model, batch_size, gamma)
35 | if len(self.memory) < batch_size:
36 | batch_size = len(self.memory)
37 |
38 | nb_actions = model.output_shape[-1]
39 |
40 | samples = np.array(sample(self.memory, batch_size))
41 | input_dim = np.prod(self.input_shape)
42 |
43 | S = samples[:, :input_dim]
44 | a = samples[:, input_dim]
45 | r = samples[:, input_dim + 1]
46 |
47 | S_prime = samples[:, input_dim + 2 : 2 * input_dim + 2]
48 | game_over = samples[:, 2 * input_dim + 2]
49 | r = r.repeat(nb_actions).reshape((batch_size, nb_actions))
50 | game_over = game_over.repeat(nb_actions).reshape((batch_size, nb_actions))
51 |
52 | S = S.reshape((batch_size, ) + self.input_shape)
53 | S_prime = S_prime.reshape((batch_size, ) + self.input_shape)
54 |
55 | X = np.concatenate([S, S_prime], axis=0)
56 | Y = model.predict(X)
57 |
58 | Qsa = np.max(Y[batch_size:], axis=1).repeat(nb_actions).reshape((batch_size, nb_actions))
59 |
60 | delta = np.zeros((batch_size, nb_actions))
61 | a = np.cast['int'](a)
62 | delta[np.arange(batch_size), a] = 1
63 |
64 | targets = (1 - delta) * Y[:batch_size] + delta * (r + gamma * (1 - game_over) * Qsa)
65 | #targets = targets.astype(np.float32)
66 | return S, targets
67 |
68 | @property
69 | def memory_size(self):
70 | return self._memory_size
71 |
72 | @memory_size.setter
73 | def memory_size(self, value):
74 | if value > 0 and value < self._memory_size:
75 | self.memory = self.memory[:value]
76 | self._memory_size = value
77 |
78 | def reset_memory(self):
79 | self.memory = []
80 |
81 | def set_batch_function(self, model, input_shape, batch_size, nb_actions, gamma):
82 | input_dim = np.prod(input_shape)
83 | samples = K.placeholder(shape=(batch_size, input_dim * 2 + 3))
84 |
85 | S = samples[:, 0 : input_dim]
86 | a = samples[:, input_dim]
87 | a = K.cast(a, '')
88 | r = samples[:, input_dim + 1]
89 | S_prime = samples[:, input_dim + 2 : 2 * input_dim + 2]
90 | game_over = samples[:, 2 * input_dim + 2 : 2 * input_dim + 3]
91 |
92 | r = K.reshape(r, (batch_size, 1))
93 | r = K.repeat(r, nb_actions)
94 | r = K.reshape(r, (batch_size, nb_actions))
95 |
96 | game_over = K.repeat(game_over, nb_actions)
97 | game_over = K.reshape(game_over, (batch_size, nb_actions))
98 |
99 | S = K.reshape(S, (batch_size, ) + input_shape)
100 | S_prime = K.reshape(S_prime, (batch_size, ) + input_shape)
101 |
102 | X = K.concatenate([S, S_prime], axis=0)
103 | Y = model(X)
104 |
105 | Qsa = K.max(Y[batch_size:], axis=1)
106 | Qsa = K.reshape(Qsa, (batch_size, 1))
107 | Qsa = K.repeat(Qsa, nb_actions)
108 | Qsa = K.reshape(Qsa, (batch_size, nb_actions))
109 |
110 | delta = K.reshape(self.one_hot(a, nb_actions), (batch_size, nb_actions))
111 | targets = (1 - delta) * Y[:batch_size] + delta * (r + gamma * (1 - game_over) * Qsa)
112 |
113 | self.batch_function = K.function(inputs=[samples], outputs=[S, targets])
114 |
115 | def one_hot(self, seq, num_classes):
116 | return K.one_hot(seq, num_classes) #K.equal(K.reshape(seq, (-1, 1)), K.arange(num_classes))
117 |
118 | def get_batch_fast(self, model, batch_size, gamma):
119 | if len(self.memory) < batch_size:
120 | return None
121 |
122 | samples = np.array(sample(self.memory, batch_size))
123 |
124 | if not hasattr(self, 'batch_function'):
125 | self.set_batch_function(model, self.input_shape, batch_size, model.output_shape[-1], gamma)
126 |
127 | S, targets = self.batch_function([samples])
128 | targets = targets.astype(np.float32)
129 | return S, targets
--------------------------------------------------------------------------------
/wumpus_agent/model.py:
--------------------------------------------------------------------------------
1 |
2 | from keras.models import Sequential
3 | from keras.layers import Flatten, Dense
4 | from keras.optimizers import Adam
5 |
6 | data_members = 57
7 | nb_actions = 6
8 | hidden_size = 100
9 |
10 | def build_model():
11 | model = Sequential()
12 | model.add(Dense(hidden_size, activation='relu', input_shape=(data_members,)))
13 | model.add(Dense(hidden_size, activation='relu'))
14 | model.add(Dense(nb_actions))
15 | model.compile(Adam(lr=1e-3), "diff")
16 |
17 | return model
18 |
19 |
20 |
21 |
--------------------------------------------------------------------------------