├── .gitignore ├── Classification ├── Logistic Regression.ipynb ├── cost.jpg ├── derc.jpg └── hyp.jpg ├── Decision Tree ├── Explanations.ipynb ├── funcs.py └── img │ ├── entropy.png │ └── venn_dia.png ├── Dimensionality Reduction └── Principal Component Analysis (PCA) │ ├── Principal Component Analysis.ipynb │ ├── img │ ├── covariance.png │ └── svd.png │ └── visualization.py ├── LICENSE ├── README.md ├── Regression └── Linear Regressions.ipynb └── _config.yml /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | .pytest_cache/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | db.sqlite3 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # Environments 85 | .env 86 | .venv 87 | env/ 88 | venv/ 89 | ENV/ 90 | env.bak/ 91 | venv.bak/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | .spyproject 96 | 97 | # Rope project settings 98 | .ropeproject 99 | 100 | # mkdocs documentation 101 | /site 102 | 103 | # mypy 104 | .mypy_cache/ 105 | -------------------------------------------------------------------------------- /Classification/cost.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Arkady-A/Exploring-Machine-Learning/4e740bec813d869f87dbe25cc8d653edd7edce65/Classification/cost.jpg -------------------------------------------------------------------------------- /Classification/derc.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Arkady-A/Exploring-Machine-Learning/4e740bec813d869f87dbe25cc8d653edd7edce65/Classification/derc.jpg -------------------------------------------------------------------------------- /Classification/hyp.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Arkady-A/Exploring-Machine-Learning/4e740bec813d869f87dbe25cc8d653edd7edce65/Classification/hyp.jpg -------------------------------------------------------------------------------- /Decision Tree/funcs.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Tue Oct 23 17:36:35 2018 4 | 5 | @author: ark4d 6 | """ 7 | import numpy as np 8 | 9 | 10 | class Probability(): 11 | ''' 12 | This class contains methods to work with probability 13 | ''' 14 | @staticmethod 15 | def get_distr(data, un_values=[]): 16 | ''' 17 | Creates array with probability of each values that to occur 18 | Parameters 19 | ---------- 20 | data : numpy-array(int) 21 | data probability of element which will be tested 22 | un_values : array-like(int), optional 23 | unique values probability of which will be created 24 | Returns 25 | ---------- 26 | numpy-array(float) 27 | chace of occurence 28 | ''' 29 | if len(un_values)==0: 30 | un_values=np.unique(data) 31 | values, count = [],[] 32 | for value in un_values: 33 | values.append(value) 34 | count.append(len(data[data==value])) 35 | dist = np.array([values, np.array(count)/data.shape[0]]) 36 | return dist 37 | 38 | 39 | @staticmethod 40 | def dist_table(data1, data2): 41 | ''' 42 | Create table that represent joint probability of two distributions 43 | 44 | Parameters 45 | ---------- 46 | data1 : numpy-array (int) 47 | data 48 | data2 : numpy-array (int) 49 | data 50 | 51 | Returns 52 | ---------- 53 | list 54 | unique values of data1 55 | list 56 | unique values of data2 57 | numpy matrix 58 | joint probability of two distribution 59 | 60 | Raises 61 | ---------- 62 | ValueError 63 | if `data1` shape is not equal to `data2` shape 64 | ''' 65 | if data1.shape == data2.shape: 66 | size = data1.shape[0] 67 | un_x_values = np.unique(data1) 68 | un_y_values = np.unique(data2) 69 | table = np.zeros((un_x_values.shape[0], un_y_values.shape[0])) 70 | for row in np.transpose(np.array([data1,data2])): 71 | table[ 72 | np.where(un_x_values==row[0]), 73 | np.where(un_y_values==row[1]) 74 | ]+=1 75 | return un_x_values, un_y_values, table/size 76 | else: 77 | raise ValueError('{} != {}'.format(data1.shape, data2.shape)) 78 | 79 | 80 | class Itheory(): 81 | @staticmethod 82 | # note: if you were to check this function with the function in scipy.stats 83 | # you will get different values# because scipy calculates entropy with base 84 | # e instead of 2. The values is different, but they showing same thing. you 85 | # can say that the function in scipy return decibels of entropy whereas 86 | # the function beneath returns bits (shannons) 87 | # of entropy. 88 | def entropy(pmf): 89 | ''' 90 | Calculates entropy 91 | 92 | Parameters 93 | ---------- 94 | pmf : numpy-array (float) 95 | Probabilities of each element to occur 96 | 97 | Retuns 98 | ---------- 99 | float 100 | entropy 101 | ''' 102 | entrp = -(pmf*np.nan_to_num(np.log2(pmf),0)).sum() 103 | return entrp 104 | 105 | @staticmethod 106 | def joint_entropy(table): 107 | ''' 108 | Calculates joint entropy of 2 distributions 109 | 110 | Parameters 111 | ---------- 112 | table : numpy matrix 113 | Joint probability distribution matrix (can be generated 114 | by `dist_table` function) 115 | 116 | Returns 117 | ---------- 118 | float 119 | joint entropy 120 | ''' 121 | j_ent = np.nan_to_num(table*np.log2(table),0).sum() 122 | return -j_ent 123 | 124 | @staticmethod 125 | def conditional_entropy(table, axis_marginal): 126 | ''' 127 | Calculates conditional entropy of 2 distributions 128 | 129 | Parameters 130 | ---------- 131 | table : numpy matrix 132 | Joint probability distribution matrix (can be generated 133 | by `dist_table` function) 134 | 135 | axis_marginal : int, 1 or 0 136 | Determines given which data entropy will be calculater 137 | 0 - H(`data1`|`data2`) 138 | 1 - H(`data2`|`data1`) 139 | look in `dist_table` for definition of `data1` and `data2` 140 | Returns 141 | ---------- 142 | float 143 | conditional entropy 144 | ''' 145 | #informal: relatively hard function to comprehend. 146 | marg = np.apply_along_axis(lambda x: x.sum(),axis_marginal,table) 147 | table=np.apply_along_axis(lambda x: x*marg**-1,1-axis_marginal,table) 148 | table=np.apply_along_axis(lambda x: Itheory.entropy(x),axis_marginal,table) 149 | table = np.nan_to_num(table,0) 150 | c_ent = np.matmul(marg, table) 151 | return c_ent 152 | 153 | @staticmethod 154 | def rel_entropy(prob_dist1, prob_dist2): 155 | ''' 156 | Calculates relative entropy 157 | 158 | Parameters 159 | ---------- 160 | prob_dist1 : numpy-array 161 | marginal probability of a distribution 162 | prob_dist2 : numpy-array 163 | marginal probability of a distribution 164 | 165 | Returns 166 | ---------- 167 | float 168 | relative entropy 169 | ''' 170 | div = prob_dist1/prob_dist2 171 | if np.isinf(div).any(): 172 | return np.inf 173 | else: 174 | log = np.log2(div) 175 | log[np.isinf(log)]=0 176 | log[np.isnan(log)]=0 177 | rel_entropy = np.sum(prob_dist1*log) 178 | return rel_entropy 179 | 180 | @staticmethod 181 | def mutual_information(prob_dist1, prob_dist2, table): 182 | ''' 183 | Calculates mutual information between 2 distributions 184 | 185 | Parameters 186 | ---------- 187 | prob_dist1 : numpy-array 188 | marginal probability of a distribution 189 | prob_dist2 : numpy-array 190 | marginal probability of a distribtuion 191 | table: numpy-matrix 192 | Joint probability distribution matrix (can be generated 193 | by `dist_table` function) 194 | 195 | Returns 196 | ---------- 197 | float 198 | Mutual information 199 | ''' 200 | div = table/(prob_dist1.reshape(-1,1)* prob_dist2) 201 | if np.isinf(div).any(): 202 | return np.inf 203 | else: 204 | log = np.log2(div) 205 | log[np.isinf(log)]=0 206 | log[np.isnan(log)]=0 207 | mutual_inf = np.sum(table*log) 208 | return mutual_inf 209 | 210 | # TO-DO: Add docs to utility. 211 | class Utility(): 212 | 213 | @staticmethod 214 | def make_ax_look_good(ax): 215 | ''' 216 | Makes axes look fancy :) 217 | 218 | Parameters 219 | ---------- 220 | ax : matplotlib axes 221 | ''' 222 | ax.grid(alpha=0.2) 223 | for spine in ax.spines: 224 | ax.spines[spine].set_visible(False) 225 | 226 | @staticmethod 227 | def less_ent_d(s, step_m = 2, low_boundary = 4, times =2 ): 228 | ''' 229 | Changes distribution such that the entropy descends. 230 | ''' 231 | mult = 2 232 | s_m = s.mean().round().astype('int') 233 | vals = np.array(np.unique(s,return_counts=True)) 234 | vals_gtlw = (vals[0,vals[1,:]>low_boundary]) 235 | vals_gtlw = vals_gtlw[(vals_gtlws_m)] 236 | np.set_printoptions(threshold=np.nan) 237 | for i in range(0,times): 238 | for indx, val in enumerate(vals_gtlw[vals_gtlws_m]): 245 | step_arr=np.zeros(shape=(len(s[s==val]),),dtype='int') 246 | step_arr[:int(len(s[s==val])/3)]=-step_m 247 | s[s==val]+=step_arr 248 | mult+=1 249 | 250 | return s 251 | 252 | @staticmethod 253 | def less_ent(dist, multiplier=0.01,steps=2, low_border=0.004): 254 | ''' 255 | Chages data such that the entropy descends. 256 | ''' 257 | size = len(dist) 258 | s_indx = np.argmax(dist!=0) 259 | e_indx = np.argmax(np.flip(dist)!=0) 260 | m_indx_d = np.round(((size-(s_indx+e_indx))/2)).astype('int') 261 | m_indx = m_indx_d+s_indx 262 | # print(s_indx, -e_indx, m_indx) 263 | for step in range(0,steps): 264 | for i in range(s_indx,m_indx): 265 | distance = np.abs(m_indx - i) 266 | distance_coef = distance/m_indx_d 267 | buff = dist[i]*distance_coef* multiplier 268 | if (dist[i]-buff)<(low_border) and dist[i]>=low_border: 269 | buff = (dist[i]-low_border) 270 | dist[i], dist[i+1] = dist[i]-buff, dist[i+1]+buff 271 | 272 | for i in range(size-1-e_indx, m_indx,-1): 273 | distance = np.abs(m_indx - i) 274 | distance_coef = distance/m_indx_d 275 | buff = dist[i]*distance_coef* multiplier 276 | if (dist[i]-buff)<(low_border) and dist[i]>=low_border: 277 | buff = (dist[i]-low_border) 278 | dist[i], dist[i-1] = dist[i]-buff, dist[i-1]+buff 279 | return dist 280 | 281 | #this function shuffles values of !probability distribution! 282 | #dist: a probability distribution 283 | #times: greater - more shufflier 284 | #multiplier: greater - more shufflier 285 | @staticmethod 286 | def shuffle_dist(dist, times=10, multiplier=0.4): 287 | ''' 288 | Changes values of distribtuions. Depends on variables that are passed. 289 | 290 | Parameters 291 | ---------- 292 | dist : nparray 293 | distribution of a random variable 294 | times : int 295 | amount of times for the process to repeat 296 | multiplier : float 297 | determines how much the function will change the distribution on a iteration 298 | 299 | Returns 300 | ---------- 301 | dist : nparray 302 | changed distribtuion 303 | ''' 304 | deck = dist[dist!=0] 305 | start_index = np.argmax(dist!=0) 306 | end_index = start_index+deck.shape[0] 307 | buff=0 308 | for i in range(0, times): 309 | for i in range(start_index, end_index): 310 | if np.random.choice([True,False]): 311 | buff, dist[i] = dist[i]*multiplier, (dist[i]-dist[i]*multiplier)+buff 312 | dist[np.random.randint(start_index, end_index)]+=buff 313 | buff=0 314 | return dist 315 | 316 | @staticmethod 317 | def _decide(chance): 318 | if np.random.rand()count_low_boundary)and(vals[np.where(uni==data[i])]If you don't understand what is $$\\frac{\\text{d}}{\\text{d}\\theta_n}J$$\n", 83 | "I would recommend you to visit [this site](https://www.khanacademy.org/math/differential-calculus). By reaching the topic \"Product, quotient, & chain rules\" you should get enough knowledge to understand the formula and what it's doing.\n", 84 | " \n", 85 | " \n", 86 | " - - - \n", 87 | "You can read more in these books:\n", 88 | " 1. Sarkar D., Bali R., Sharma T. - Practical Machine Learning with Python (page №263)\n", 89 | " 2. S.Raschka - Python Machine Learning (page №89)\n" 90 | ] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": 11, 95 | "metadata": {}, 96 | "outputs": [], 97 | "source": [ 98 | "def gradient_descent(X,y,thetas,alpha,num_of_iterations): \n", 99 | " Xn = np.insert(X,0,1,axis=1) # inserts x0 parameter [1]\n", 100 | " clr=0\n", 101 | " for itera in range(0,num_of_iterations):\n", 102 | " new_theta = [] # thetas need to be updated simultaneously\n", 103 | " for idx, theta in enumerate(thetas): # for every theta\n", 104 | " calc=np.sum((y-func_l(Xn,thetas))*Xn[:,idx]) # that's calculates derivative of cost function prior to theta\n", 105 | " new_theta.append(theta+alpha*calc) \n", 106 | " \n", 107 | " # only for visualization\n", 108 | " # ------------------------\n", 109 | " if itera%(num_of_iterations/6)==0: \n", 110 | " clr+=1\n", 111 | " plt.plot(X,\n", 112 | " func_l(X,thetas),\n", 113 | " color=[1.0-clr/10,0+clr/10,0])\n", 114 | " # ------------------------\n", 115 | " thetas = np.array(new_theta)\n", 116 | " return thetas" 117 | ] 118 | }, 119 | { 120 | "cell_type": "markdown", 121 | "metadata": {}, 122 | "source": [ 123 | "## Linear function\n", 124 | "\n", 125 | "Equation for line: \n", 126 | "$$y = kx +b$$ \n", 127 | "Where K - slope of the line that describes both the direction and the steepness of the line. \n", 128 | "Equation for line could have multiple coefficients (k) \n", 129 | "\n", 130 | "$$y = \\theta_0[2]+\\theta_1X_0+...+\\theta_nX_n$$\n", 131 | "\n", 132 | "So by changing thetas we change slope of the line \n", 133 | "Theta 0 will be b (bias) (sets graph upper or downer)" 134 | ] 135 | }, 136 | { 137 | "cell_type": "code", 138 | "execution_count": 36, 139 | "metadata": {}, 140 | "outputs": [], 141 | "source": [ 142 | "def func_l(X,thetas):\n", 143 | " func = np.sum(np.multiply(X,thetas[:]),axis=1)#\n", 144 | " return func" 145 | ] 146 | }, 147 | { 148 | "cell_type": "markdown", 149 | "metadata": {}, 150 | "source": [ 151 | "## Start function\n", 152 | "Function below plots points on graph and start other function" 153 | ] 154 | }, 155 | { 156 | "cell_type": "code", 157 | "execution_count": 37, 158 | "metadata": {}, 159 | "outputs": [], 160 | "source": [ 161 | "def start(X, y, thetas, alpha, iteration_count):\n", 162 | " plt.figure(figsize = [10,10])\n", 163 | " plt.scatter(X, y)\n", 164 | " plt.plot(X, func_l(X,thetas), label=\"start linear\", color=\"red\")\n", 165 | " print(\"before training {}\".format(cost_function(X, y, thetas)))\n", 166 | " new_tets = gradient_descent(X, y, thetas, alpha, iteration_count)\n", 167 | " print(\"after training {}\".format(cost_function(X, y, new_tets)))\n", 168 | " plt.plot(X, func_l(X, new_tets), label=\"after training\", color=\"green\")\n", 169 | " plt.legend()\n", 170 | " plt.show()" 171 | ] 172 | }, 173 | { 174 | "cell_type": "markdown", 175 | "metadata": {}, 176 | "source": [ 177 | "## Settings section\n", 178 | "In cell below you can change parameters and watch how do they affects gradient descent" 179 | ] 180 | }, 181 | { 182 | "cell_type": "code", 183 | "execution_count": 60, 184 | "metadata": {}, 185 | "outputs": [], 186 | "source": [ 187 | "parameters = { \n", 188 | " \"alpha\": 0.0001, #default 0.0001\n", 189 | " \"iteration_count\":600, #default 600 (if you want to change - make sure that the number can be divided by 6)\n", 190 | "}" 191 | ] 192 | }, 193 | { 194 | "cell_type": "markdown", 195 | "metadata": {}, 196 | "source": [ 197 | "## Start section\n", 198 | "A function in cell below starts the algorithm" 199 | ] 200 | }, 201 | { 202 | "cell_type": "code", 203 | "execution_count": 61, 204 | "metadata": { 205 | "scrolled": false 206 | }, 207 | "outputs": [ 208 | { 209 | "data": { 210 | "application/javascript": [ 211 | "/* Put everything inside the global mpl namespace */\n", 212 | "window.mpl = {};\n", 213 | "\n", 214 | "\n", 215 | "mpl.get_websocket_type = function() {\n", 216 | " if (typeof(WebSocket) !== 'undefined') {\n", 217 | " return WebSocket;\n", 218 | " } else if (typeof(MozWebSocket) !== 'undefined') {\n", 219 | " return MozWebSocket;\n", 220 | " } else {\n", 221 | " alert('Your browser does not have WebSocket support.' +\n", 222 | " 'Please try Chrome, Safari or Firefox ≥ 6. ' +\n", 223 | " 'Firefox 4 and 5 are also supported but you ' +\n", 224 | " 'have to enable WebSockets in about:config.');\n", 225 | " };\n", 226 | "}\n", 227 | "\n", 228 | "mpl.figure = function(figure_id, websocket, ondownload, parent_element) {\n", 229 | " this.id = figure_id;\n", 230 | "\n", 231 | " this.ws = websocket;\n", 232 | "\n", 233 | " this.supports_binary = (this.ws.binaryType != undefined);\n", 234 | "\n", 235 | " if (!this.supports_binary) {\n", 236 | " var warnings = document.getElementById(\"mpl-warnings\");\n", 237 | " if (warnings) {\n", 238 | " warnings.style.display = 'block';\n", 239 | " warnings.textContent = (\n", 240 | " \"This browser does not support binary websocket messages. \" +\n", 241 | " \"Performance may be slow.\");\n", 242 | " }\n", 243 | " }\n", 244 | "\n", 245 | " this.imageObj = new Image();\n", 246 | "\n", 247 | " this.context = undefined;\n", 248 | " this.message = undefined;\n", 249 | " this.canvas = undefined;\n", 250 | " this.rubberband_canvas = undefined;\n", 251 | " this.rubberband_context = undefined;\n", 252 | " this.format_dropdown = undefined;\n", 253 | "\n", 254 | " this.image_mode = 'full';\n", 255 | "\n", 256 | " this.root = $('
');\n", 257 | " this._root_extra_style(this.root)\n", 258 | " this.root.attr('style', 'display: inline-block');\n", 259 | "\n", 260 | " $(parent_element).append(this.root);\n", 261 | "\n", 262 | " this._init_header(this);\n", 263 | " this._init_canvas(this);\n", 264 | " this._init_toolbar(this);\n", 265 | "\n", 266 | " var fig = this;\n", 267 | "\n", 268 | " this.waiting = false;\n", 269 | "\n", 270 | " this.ws.onopen = function () {\n", 271 | " fig.send_message(\"supports_binary\", {value: fig.supports_binary});\n", 272 | " fig.send_message(\"send_image_mode\", {});\n", 273 | " if (mpl.ratio != 1) {\n", 274 | " fig.send_message(\"set_dpi_ratio\", {'dpi_ratio': mpl.ratio});\n", 275 | " }\n", 276 | " fig.send_message(\"refresh\", {});\n", 277 | " }\n", 278 | "\n", 279 | " this.imageObj.onload = function() {\n", 280 | " if (fig.image_mode == 'full') {\n", 281 | " // Full images could contain transparency (where diff images\n", 282 | " // almost always do), so we need to clear the canvas so that\n", 283 | " // there is no ghosting.\n", 284 | " fig.context.clearRect(0, 0, fig.canvas.width, fig.canvas.height);\n", 285 | " }\n", 286 | " fig.context.drawImage(fig.imageObj, 0, 0);\n", 287 | " };\n", 288 | "\n", 289 | " this.imageObj.onunload = function() {\n", 290 | " fig.ws.close();\n", 291 | " }\n", 292 | "\n", 293 | " this.ws.onmessage = this._make_on_message_function(this);\n", 294 | "\n", 295 | " this.ondownload = ondownload;\n", 296 | "}\n", 297 | "\n", 298 | "mpl.figure.prototype._init_header = function() {\n", 299 | " var titlebar = $(\n", 300 | " '
');\n", 302 | " var titletext = $(\n", 303 | " '
');\n", 305 | " titlebar.append(titletext)\n", 306 | " this.root.append(titlebar);\n", 307 | " this.header = titletext[0];\n", 308 | "}\n", 309 | "\n", 310 | "\n", 311 | "\n", 312 | "mpl.figure.prototype._canvas_extra_style = function(canvas_div) {\n", 313 | "\n", 314 | "}\n", 315 | "\n", 316 | "\n", 317 | "mpl.figure.prototype._root_extra_style = function(canvas_div) {\n", 318 | "\n", 319 | "}\n", 320 | "\n", 321 | "mpl.figure.prototype._init_canvas = function() {\n", 322 | " var fig = this;\n", 323 | "\n", 324 | " var canvas_div = $('
');\n", 325 | "\n", 326 | " canvas_div.attr('style', 'position: relative; clear: both; outline: 0');\n", 327 | "\n", 328 | " function canvas_keyboard_event(event) {\n", 329 | " return fig.key_event(event, event['data']);\n", 330 | " }\n", 331 | "\n", 332 | " canvas_div.keydown('key_press', canvas_keyboard_event);\n", 333 | " canvas_div.keyup('key_release', canvas_keyboard_event);\n", 334 | " this.canvas_div = canvas_div\n", 335 | " this._canvas_extra_style(canvas_div)\n", 336 | " this.root.append(canvas_div);\n", 337 | "\n", 338 | " var canvas = $('');\n", 339 | " canvas.addClass('mpl-canvas');\n", 340 | " canvas.attr('style', \"left: 0; top: 0; z-index: 0; outline: 0\")\n", 341 | "\n", 342 | " this.canvas = canvas[0];\n", 343 | " this.context = canvas[0].getContext(\"2d\");\n", 344 | "\n", 345 | " var backingStore = this.context.backingStorePixelRatio ||\n", 346 | "\tthis.context.webkitBackingStorePixelRatio ||\n", 347 | "\tthis.context.mozBackingStorePixelRatio ||\n", 348 | "\tthis.context.msBackingStorePixelRatio ||\n", 349 | "\tthis.context.oBackingStorePixelRatio ||\n", 350 | "\tthis.context.backingStorePixelRatio || 1;\n", 351 | "\n", 352 | " mpl.ratio = (window.devicePixelRatio || 1) / backingStore;\n", 353 | "\n", 354 | " var rubberband = $('');\n", 355 | " rubberband.attr('style', \"position: absolute; left: 0; top: 0; z-index: 1;\")\n", 356 | "\n", 357 | " var pass_mouse_events = true;\n", 358 | "\n", 359 | " canvas_div.resizable({\n", 360 | " start: function(event, ui) {\n", 361 | " pass_mouse_events = false;\n", 362 | " },\n", 363 | " resize: function(event, ui) {\n", 364 | " fig.request_resize(ui.size.width, ui.size.height);\n", 365 | " },\n", 366 | " stop: function(event, ui) {\n", 367 | " pass_mouse_events = true;\n", 368 | " fig.request_resize(ui.size.width, ui.size.height);\n", 369 | " },\n", 370 | " });\n", 371 | "\n", 372 | " function mouse_event_fn(event) {\n", 373 | " if (pass_mouse_events)\n", 374 | " return fig.mouse_event(event, event['data']);\n", 375 | " }\n", 376 | "\n", 377 | " rubberband.mousedown('button_press', mouse_event_fn);\n", 378 | " rubberband.mouseup('button_release', mouse_event_fn);\n", 379 | " // Throttle sequential mouse events to 1 every 20ms.\n", 380 | " rubberband.mousemove('motion_notify', mouse_event_fn);\n", 381 | "\n", 382 | " rubberband.mouseenter('figure_enter', mouse_event_fn);\n", 383 | " rubberband.mouseleave('figure_leave', mouse_event_fn);\n", 384 | "\n", 385 | " canvas_div.on(\"wheel\", function (event) {\n", 386 | " event = event.originalEvent;\n", 387 | " event['data'] = 'scroll'\n", 388 | " if (event.deltaY < 0) {\n", 389 | " event.step = 1;\n", 390 | " } else {\n", 391 | " event.step = -1;\n", 392 | " }\n", 393 | " mouse_event_fn(event);\n", 394 | " });\n", 395 | "\n", 396 | " canvas_div.append(canvas);\n", 397 | " canvas_div.append(rubberband);\n", 398 | "\n", 399 | " this.rubberband = rubberband;\n", 400 | " this.rubberband_canvas = rubberband[0];\n", 401 | " this.rubberband_context = rubberband[0].getContext(\"2d\");\n", 402 | " this.rubberband_context.strokeStyle = \"#000000\";\n", 403 | "\n", 404 | " this._resize_canvas = function(width, height) {\n", 405 | " // Keep the size of the canvas, canvas container, and rubber band\n", 406 | " // canvas in synch.\n", 407 | " canvas_div.css('width', width)\n", 408 | " canvas_div.css('height', height)\n", 409 | "\n", 410 | " canvas.attr('width', width * mpl.ratio);\n", 411 | " canvas.attr('height', height * mpl.ratio);\n", 412 | " canvas.attr('style', 'width: ' + width + 'px; height: ' + height + 'px;');\n", 413 | "\n", 414 | " rubberband.attr('width', width);\n", 415 | " rubberband.attr('height', height);\n", 416 | " }\n", 417 | "\n", 418 | " // Set the figure to an initial 600x600px, this will subsequently be updated\n", 419 | " // upon first draw.\n", 420 | " this._resize_canvas(600, 600);\n", 421 | "\n", 422 | " // Disable right mouse context menu.\n", 423 | " $(this.rubberband_canvas).bind(\"contextmenu\",function(e){\n", 424 | " return false;\n", 425 | " });\n", 426 | "\n", 427 | " function set_focus () {\n", 428 | " canvas.focus();\n", 429 | " canvas_div.focus();\n", 430 | " }\n", 431 | "\n", 432 | " window.setTimeout(set_focus, 100);\n", 433 | "}\n", 434 | "\n", 435 | "mpl.figure.prototype._init_toolbar = function() {\n", 436 | " var fig = this;\n", 437 | "\n", 438 | " var nav_element = $('
')\n", 439 | " nav_element.attr('style', 'width: 100%');\n", 440 | " this.root.append(nav_element);\n", 441 | "\n", 442 | " // Define a callback function for later on.\n", 443 | " function toolbar_event(event) {\n", 444 | " return fig.toolbar_button_onclick(event['data']);\n", 445 | " }\n", 446 | " function toolbar_mouse_event(event) {\n", 447 | " return fig.toolbar_button_onmouseover(event['data']);\n", 448 | " }\n", 449 | "\n", 450 | " for(var toolbar_ind in mpl.toolbar_items) {\n", 451 | " var name = mpl.toolbar_items[toolbar_ind][0];\n", 452 | " var tooltip = mpl.toolbar_items[toolbar_ind][1];\n", 453 | " var image = mpl.toolbar_items[toolbar_ind][2];\n", 454 | " var method_name = mpl.toolbar_items[toolbar_ind][3];\n", 455 | "\n", 456 | " if (!name) {\n", 457 | " // put a spacer in here.\n", 458 | " continue;\n", 459 | " }\n", 460 | " var button = $('