├── .DS_Store
├── Figures
├── Central_Dogma_Statistics.png
├── Comparing_Models.png
├── DL.jpeg
├── DecisionTree.png
├── KNN.png
├── ROC.png
├── TF_DAG.png
├── biasvariance.png
├── ensembling.png
├── heaps.png
├── manhattan_euclidean.png
├── pca.png
├── power-law.png
├── sql_joins.jpg
├── sql_joins2.png
├── svm.jpeg
└── svm.png
├── README.md
├── Screenshots
├── .DS_Store
├── screenshot1.png
└── screenshot2.png
├── data-science-cheatsheet.pdf
└── data_science_cheatsheet.tex
/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ml874/Data-Science-Cheatsheet/ec13ff999fddd9a1a88a285cb8e1d00649bba5d9/.DS_Store
--------------------------------------------------------------------------------
/Figures/Central_Dogma_Statistics.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ml874/Data-Science-Cheatsheet/ec13ff999fddd9a1a88a285cb8e1d00649bba5d9/Figures/Central_Dogma_Statistics.png
--------------------------------------------------------------------------------
/Figures/Comparing_Models.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ml874/Data-Science-Cheatsheet/ec13ff999fddd9a1a88a285cb8e1d00649bba5d9/Figures/Comparing_Models.png
--------------------------------------------------------------------------------
/Figures/DL.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ml874/Data-Science-Cheatsheet/ec13ff999fddd9a1a88a285cb8e1d00649bba5d9/Figures/DL.jpeg
--------------------------------------------------------------------------------
/Figures/DecisionTree.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ml874/Data-Science-Cheatsheet/ec13ff999fddd9a1a88a285cb8e1d00649bba5d9/Figures/DecisionTree.png
--------------------------------------------------------------------------------
/Figures/KNN.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ml874/Data-Science-Cheatsheet/ec13ff999fddd9a1a88a285cb8e1d00649bba5d9/Figures/KNN.png
--------------------------------------------------------------------------------
/Figures/ROC.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ml874/Data-Science-Cheatsheet/ec13ff999fddd9a1a88a285cb8e1d00649bba5d9/Figures/ROC.png
--------------------------------------------------------------------------------
/Figures/TF_DAG.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ml874/Data-Science-Cheatsheet/ec13ff999fddd9a1a88a285cb8e1d00649bba5d9/Figures/TF_DAG.png
--------------------------------------------------------------------------------
/Figures/biasvariance.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ml874/Data-Science-Cheatsheet/ec13ff999fddd9a1a88a285cb8e1d00649bba5d9/Figures/biasvariance.png
--------------------------------------------------------------------------------
/Figures/ensembling.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ml874/Data-Science-Cheatsheet/ec13ff999fddd9a1a88a285cb8e1d00649bba5d9/Figures/ensembling.png
--------------------------------------------------------------------------------
/Figures/heaps.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ml874/Data-Science-Cheatsheet/ec13ff999fddd9a1a88a285cb8e1d00649bba5d9/Figures/heaps.png
--------------------------------------------------------------------------------
/Figures/manhattan_euclidean.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ml874/Data-Science-Cheatsheet/ec13ff999fddd9a1a88a285cb8e1d00649bba5d9/Figures/manhattan_euclidean.png
--------------------------------------------------------------------------------
/Figures/pca.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ml874/Data-Science-Cheatsheet/ec13ff999fddd9a1a88a285cb8e1d00649bba5d9/Figures/pca.png
--------------------------------------------------------------------------------
/Figures/power-law.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ml874/Data-Science-Cheatsheet/ec13ff999fddd9a1a88a285cb8e1d00649bba5d9/Figures/power-law.png
--------------------------------------------------------------------------------
/Figures/sql_joins.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ml874/Data-Science-Cheatsheet/ec13ff999fddd9a1a88a285cb8e1d00649bba5d9/Figures/sql_joins.jpg
--------------------------------------------------------------------------------
/Figures/sql_joins2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ml874/Data-Science-Cheatsheet/ec13ff999fddd9a1a88a285cb8e1d00649bba5d9/Figures/sql_joins2.png
--------------------------------------------------------------------------------
/Figures/svm.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ml874/Data-Science-Cheatsheet/ec13ff999fddd9a1a88a285cb8e1d00649bba5d9/Figures/svm.jpeg
--------------------------------------------------------------------------------
/Figures/svm.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ml874/Data-Science-Cheatsheet/ec13ff999fddd9a1a88a285cb8e1d00649bba5d9/Figures/svm.png
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 | **Update (2019-12-18)**: The *Data Science Cheatsheet* has evovled into a book! Check out *Cracking the Data Science Interview* [here](https://www.amazon.com/dp/171068013X/ref=sr_1_8?keywords=cracking+the+data+science+interview&qid=1576688426&sr=8-8)! This also means that the Cheatsheet will be getting a makeover soon- stay tuned!
3 |
4 | # Data Science Cheatsheet
5 | This cheatsheet is currently a 9-page reference in basic data science that covers basic concepts in probability, statistics, statistical learning, machine learning, big data frameworks and SQL.
6 |
7 | The cheatsheet is loosely based off of *The Data Science Design Manual* by Steven S. Skiena and *An Introduction to Statistical Learning* by Gareth James, Daniela Witten, Trevor Hastie and Robert Tibshirani.
8 |
9 | Inspired by William Chen's *The Only Probability Cheatsheet You'll Ever Need*, located [here](https://github.com/wzchen/probability_cheatsheet).
10 |
11 | ## Future Additions
12 | * ~~Graph Theory~~
13 | * Algorithms and Data Structures
14 | * Python
15 | * Advanced SQL (SQL Part II)
16 | * Data Science on the Cloud- AWS/GCP/Azure
17 | * Linear Algebra
18 | * Data Engineering
19 |
20 | ## Screenshots
21 | 
22 | 
23 |
24 |
25 | ## License
26 | This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License.
27 | 
28 |
29 | ## Changelog
30 | **2020-04-18** Added Graph Theory Section
31 |
32 | **2018-08-13** Added Python Data Structures Section
33 |
34 | **2018-08-12** Added Feature Engineering Section
35 |
36 | **2018-08-10**: Added Data Science Cheat Sheet
37 |
38 | ## Contact
39 | Feel free to suggest comments, updates, and potential improvements!
40 |
41 | **Maverick Lin**: Reach out to me via [Quora](https://www.quora.com/profile/Maverick-Lin) or through my [website](http://mavericklin.com/). Cheers.
42 |
--------------------------------------------------------------------------------
/Screenshots/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ml874/Data-Science-Cheatsheet/ec13ff999fddd9a1a88a285cb8e1d00649bba5d9/Screenshots/.DS_Store
--------------------------------------------------------------------------------
/Screenshots/screenshot1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ml874/Data-Science-Cheatsheet/ec13ff999fddd9a1a88a285cb8e1d00649bba5d9/Screenshots/screenshot1.png
--------------------------------------------------------------------------------
/Screenshots/screenshot2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ml874/Data-Science-Cheatsheet/ec13ff999fddd9a1a88a285cb8e1d00649bba5d9/Screenshots/screenshot2.png
--------------------------------------------------------------------------------
/data-science-cheatsheet.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ml874/Data-Science-Cheatsheet/ec13ff999fddd9a1a88a285cb8e1d00649bba5d9/data-science-cheatsheet.pdf
--------------------------------------------------------------------------------
/data_science_cheatsheet.tex:
--------------------------------------------------------------------------------
1 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
2 | % MatPlotLib and Random Cheatsheet
3 | %
4 | % Edited by Maverick Lin
5 | %
6 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
7 | \documentclass[9pt]{extarticle}
8 | % \documentclass{article}
9 | \usepackage[landscape]{geometry}
10 | \usepackage{url}
11 | \usepackage{multicol}
12 | \usepackage{amsmath}
13 | \usepackage{amsfonts}
14 | \usepackage{tikz}
15 | \usetikzlibrary{decorations.pathmorphing}
16 | \usepackage{amsmath,amssymb}
17 |
18 | \usepackage{colortbl}
19 | \usepackage{xcolor}
20 | \usepackage{mathtools}
21 | \usepackage{amsmath,amssymb}
22 | \usepackage{enumitem}
23 | \usepackage{tabto}
24 | \usepackage{enumitem}
25 | \usepackage{graphicx}
26 | \usepackage{tabu}
27 | \usepackage{minted}
28 |
29 |
30 | \title{Data Science Cheatsheet}
31 | \usepackage[english]{babel}
32 | \usepackage[utf8]{inputenc}
33 |
34 | \advance\topmargin-.8in
35 | \advance\textheight3in
36 | \advance\textwidth3in
37 | \advance\oddsidemargin-1.5in
38 | \advance\evensidemargin-1.5in
39 | \parindent0pt
40 | \parskip2pt
41 | \newcommand{\hr}{\centerline{\rule{3.5in}{1pt}}}
42 | %\colorbox[HTML]{e4e4e4}{\makebox[\textwidth-2\fboxsep][l]{texto}
43 | \begin{document}
44 |
45 | \begin{center}{\huge{\textbf{Data Science Cheatsheet}}}\\
46 | {\large Compiled by Maverick Lin (\url{http://mavericklin.com})}\\
47 | {\normalsize Last Updated \today}
48 | \end{center}
49 | \begin{multicols*}{3}
50 |
51 | \tikzstyle{mybox} = [draw=black, fill=white, very thick, rectangle, rounded corners, inner sep=10pt, inner ysep=10pt]
52 | \tikzstyle{fancytitle} =[fill=black, text=white, font=\bfseries]
53 | %------------ What is Data Science? ---------------
54 | \begin{tikzpicture}
55 | \node [mybox] (box){%
56 | \begin{minipage}{0.3\textwidth}
57 | Multi-disciplinary field that brings together concepts from computer science, statistics/machine learning, and data analysis to understand and extract insights from the ever-increasing amounts of data.\\
58 |
59 | Two paradigms of data research.
60 | \setlist{nolistsep}
61 | \begin{enumerate}[noitemsep]
62 | \item \textbf{Hypothesis-Driven:} Given a problem, what kind of data do we need to help solve it?
63 | \item \textbf{Data-Driven:} Given some data, what interesting problems can be solved with it?\\
64 | \end{enumerate}
65 |
66 | The heart of data science is to always ask questions. Always be curious about the world.
67 | \begin{enumerate}[noitemsep]
68 | \item What can we learn from this data?
69 | \item What actions can we take once we find whatever it is we are looking for?
70 | \end{enumerate}
71 |
72 |
73 | \end{minipage}
74 | };
75 | \node[fancytitle, right=10pt] at (box.north west) {What is Data Science?};
76 | \end{tikzpicture}
77 |
78 |
79 | %------------ Types of Data ---------------
80 | \begin{tikzpicture}
81 | \node [mybox] (box){%
82 | \begin{minipage}{0.3\textwidth}
83 | \textbf{Structured:} Data that has predefined structures. e.g. tables, spreadsheets, or relational databases.\\
84 | \textbf{Unstructured Data}: Data with no predefined structure, comes in any size or form, cannot be easily stored in tables. e.g. blobs of text, images, audio\\
85 | \textbf{Quantitative Data:} Numerical. e.g. height, weight\\
86 | \textbf{Categorical Data:} Data that can be labeled or divided into groups. e.g. race, sex, hair color.\\
87 | \textbf{Big Data:} Massive datasets, or data that contains greater \textit{variety} arriving in increasing \textit{volumes} and with ever-higher \textit{velocity} (3 Vs). Cannot fit in the memory of a single machine.\\
88 |
89 | {\color{blue} \textbf{Data Sources/Fomats}}\\
90 | \textbf{Most Common Data Formats} CSV, XML, SQL, JSON, Protocol Buffers\\
91 | \textbf{Data Sources} Companies/Proprietary Data, APIs, Government, Academic, Web Scraping/Crawling
92 |
93 | \end{minipage}
94 |
95 | };
96 | \node[fancytitle, right=10pt] at (box.north west) {Types of Data};
97 | \end{tikzpicture}
98 |
99 |
100 | %------------ Main Types of Problems -----------------
101 | \begin{tikzpicture}
102 | \node [mybox] (box){%
103 | \begin{minipage}{0.3\textwidth}
104 | Two problems arise repeatedly in data science.\\
105 | \textbf{Classification:} Assigning something to a discrete set of possibilities. e.g. spam or non-spam, Democrat or Republican, blood type (A, B, AB, O) \\
106 | \textbf{Regression:} Predicting a numerical value. e.g. someone's income, next year GDP, stock price
107 | \end{minipage}
108 | };
109 | \node[fancytitle, right=10pt] at (box.north west) {Main Types of Problems};
110 | \end{tikzpicture}
111 |
112 |
113 | %------------ Probability Overview -------------------
114 | \begin{tikzpicture}
115 | \node [mybox] (box){%
116 | \begin{minipage}{0.3\textwidth}
117 | Probability theory provides a framework for reasoning about likelihood of events.\\
118 | \newline
119 | {\color{blue} \textbf{Terminology}}\\
120 | \textbf{Experiment:} procedure that yields one of a possible set of outcomes e.g. repeatedly tossing a die or coin \\
121 | \textbf{Sample Space S:} set of possible outcomes of an experiment e.g. if tossing a die, S = {(1,2,3,4,5,6)}\\
122 | \textbf{Event E:} set of outcomes of an experiment e.g. event that a roll is 5, or the event that sum of 2 rolls is 7\\
123 | \textbf{Probability of an Outcome s or P(s):} number that satisfies 2 properties
124 | \setlist{nolistsep}
125 | \begin{enumerate}[noitemsep]
126 | \item for each outcome s, 0 $\leq$ P(s) $\leq$ 1
127 | \item $\sum$ p(s) = 1\\
128 | \end{enumerate}
129 | \textbf{Probability of Event E:} sum of the probabilities of the outcomes of the experiment: p(E) = $\sum_{s\subset E}$ p(s)\\
130 | \textbf{Random Variable V:} numerical function on the outcomes of a probability space\\
131 | \textbf{Expected Value of Random Variable V:} E(V) = $\sum_{s\subset S}$ p(s) * V(s)\\
132 |
133 | {\color{blue} \textbf{Independence, Conditional, Compound}}\\
134 | \textbf{Independent Events:} A and B are independent iff:\\
135 | \centerline{P(A $\cap $ B) = P(A)P(B)}\\
136 | \centerline{P($A|B$) = P(A)}\\
137 | \centerline{P($B|A$) = P(B)}\\
138 | \textbf{Conditional Probability:} P($A|B$) = P(A,B)/P(B)\\
139 | \textbf{Bayes Theorem:} P($A|B$) = P($B|A$)P(A)/P(B)\\
140 | \textbf{Joint Probability:} P(A,B) = P($B|A$)P(A)\\
141 | \textbf{Marginal Probability:} P(A)\\
142 |
143 | {\color{blue} \textbf{Probability Distributions}}\\
144 | \textbf{Probability Density Function (PDF)} Gives the probability that a rv takes on the value x: $p_{X}(x) = P(X=x)$\\
145 | \textbf{Cumulative Density Function (CDF}) Gives the probability that a random variable is less than or equal to x: $F_{X}(x) = P(X\leq x)$\\
146 | \textit{Note}: The PDF and the CDF of a given random variable contain exactly the same information.
147 |
148 | \end{minipage}
149 | };
150 | \node[fancytitle, right=10pt] at (box.north west) {Probability Overview};
151 | \end{tikzpicture}
152 |
153 |
154 | %------------ Descriptive Statistics ---------------------
155 | \begin{tikzpicture}
156 | \node [mybox] (box){%
157 | \begin{minipage}{0.3\textwidth}
158 | Provides a way of capturing a given data set or sample. There are two main types: \textbf{centrality} and \textbf{variability} measures.\\
159 | \newline
160 | {\color{blue} \textbf{Centrality}}\\
161 | \textbf{Arithmetic Mean} Useful to characterize symmetric distributions without outliers $\mu_{X} = \frac{1}{n} \sum x$\\
162 | \textbf{Geometric Mean} Useful for averaging ratios. Always less than arithmetic mean =$\sqrt[\leftroot{-2}\uproot{2}n]{a_{1}a_{2}...a_{3}}$\\
163 | \textbf{Median} Exact middle value among a dataset. Useful for skewed distribution or data with outliers.\\
164 | \textbf{Mode} Most frequent element in a dataset.\\
165 |
166 | {\color{blue} \textbf{Variability}}\\
167 | \textbf{Standard Deviation } Measures the squares differences between the individual elements and the mean.
168 | For a \textit{sample} from some population, this is calculated by the following:\\
169 | \centerline{$\sigma = \sqrt{\frac{\sum_{i=1}^N (x_i - \overline{x})^2}{N-1}}$}\\
170 | To calculate it for a \textit{population}, replace the denominator with $N$.\\
171 | \textbf{Variance} V = $\sigma^2$\\
172 |
173 | % {\color{blue} \textbf{Characterizing Distributions}}\\
174 | % Taken together, the mean and the standard deviation can characterize any distribution. Good practice to report both mean and standard deviation to characterize your distribution: $\mu \pm \sigma$\\
175 |
176 |
177 |
178 | {\color{blue} \textbf{Interpreting Variance}}\\
179 | Variance is an inherent part of the universe. It is impossible to obtain the same results after repeated observations of the same event due to random noise/error. Variance can be explained away by attributing to sampling or measurement errors. Other times, the variance is due to the random fluctuations of the universe.\\
180 |
181 | {\color{blue} \textbf{Correlation Analysis}}
182 |
183 | Correlation coefficients r(X,Y) is a statistic that measures the degree that Y is a function of X and vice versa. Correlation values range from -1 to 1, where 1 means fully correlated, -1 means negatively-correlated, and 0 means no correlation.\\
184 | \textbf{Pearson Coefficient} Measures the degree of the relationship between linearly related variables\\
185 | \centerline{r = $\frac{Cov(X,Y)}{\sigma(X)\sigma(Y)}$}\\
186 | \textbf{Spearman Rank Coefficient} Computed on ranks and depicts monotonic relationships\\
187 |
188 | \textit{Note:} Correlation does not imply causation!
189 | \end{minipage}
190 | };
191 | \node[fancytitle, right=10pt] at (box.north west) {Descriptive Statistics};
192 | \end{tikzpicture}
193 |
194 | \newpage
195 | % ------------ Data Cleaning----------------
196 | \begin{tikzpicture}
197 | \node [mybox] (box){%
198 | \begin{minipage}{0.3\textwidth}
199 | Data Cleaning is the process of turning raw data into a clean and analyzable data set. "Garbage in, garbage out." Make sure garbage doesn't get put in.\\
200 |
201 | {\color{blue} \textbf{Errors vs. Artifacts}}
202 | \setlist{nolistsep}
203 | \begin{enumerate}[noitemsep]
204 | \item \textbf{Errors:} information that is lost during acquisition and can never be recovered e.g. power outage, crashed servers
205 | \item \textbf{Artifacts:} systematic problems that arise from the data cleaning process. these problems can be corrected but we must first discover them\\
206 | \end{enumerate}
207 |
208 | {\color{blue} \textbf{Data Compatibility}}\\
209 | Data compatibility problems arise when merging datasets. Make sure you are comparing "apples to apples" and not "apples to oranges". Main types of conversions/unifications:
210 |
211 | \begin{itemize}[noitemsep]
212 | \item \textbf{units} (metric vs. imperial)
213 | \item \textbf{numbers} (decimals vs. integers),
214 | \item \textbf{names} (John Smith vs. Smith, John),
215 | \item \textbf{time/dates} (UNIX vs. UTC vs. GMT),
216 | \item \textbf{currency} (currency type, inflation-adjusted, dividends)\\
217 | \end{itemize}
218 |
219 | {\color{blue} \textbf{Data Imputation}}\\
220 | Process of dealing with missing values. The proper methods depend on the type of data we are working with. General methods include:
221 |
222 | \begin{itemize}[noitemsep]
223 | \item Drop all records containing missing data
224 | \item Heuristic-Based: make a reasonable guess based on knowledge of the underlying domain
225 | \item Mean Value: fill in missing data with the mean
226 | \item Random Value
227 | \item Nearest Neighbor: fill in missing data using similar data points
228 | \item Interpolation: use a method like linear regression to predict the value of the missing data\\
229 | \end{itemize}
230 |
231 | {\color{blue} \textbf{Outlier Detection}}\\
232 | Outliers can interfere with analysis and often arise from mistakes during data collection. It makes sense to run a "sanity check".\\
233 |
234 | {\color{blue} \textbf{Miscellaneous}}\\
235 | Lowercasing, removing non-alphanumeric, repairing, unidecode, removing unknown characters\\
236 |
237 | \textit{Note:} When cleaning data, always maintain both the raw data and the cleaned version(s). The raw data should be kept intact and preserved for future use. Any type of data cleaning/analysis should be done on a copy of the raw data.
238 | \end{minipage}
239 | };
240 | \node[fancytitle, right=10pt] at (box.north west) {Data Cleaning};
241 | \end{tikzpicture}
242 |
243 |
244 | % ------------ Feature Engineering -----------------
245 | \begin{tikzpicture}
246 | \node [mybox] (box){%
247 | \begin{minipage}{0.3\textwidth}
248 | \setlist{nolistsep}
249 |
250 | Feature engineering is the process of using domain knowledge to create features or input variables that help machine learning algorithms perform better. Done correctly, it can help increase the predictive power of your models. Feature engineering is more of an art than science. FE is one of the most important steps in creating a good model. As Andrew Ng puts it:
251 | \begin{center}
252 | \textit{“Coming up with features is difficult, time-consuming, requires expert knowledge. ‘Applied machine learning’ is basically feature engineering.”}
253 | \end{center}
254 |
255 | {\color{blue} \textbf{Continuous Data}}\\
256 | \textbf{Raw Measures}: data that hasn't been transformed yet\\
257 | \textbf{Rounding}: sometimes precision is noise; round to nearest integer, decimal etc..\\
258 | \textbf{Scaling}: log, z-score, minmax scale\\
259 | \textbf{Imputation}: fill in missing values using mean, median, model output, etc..\\
260 | \textbf{Binning}: transforming numeric features into categorical ones (or binned) e.g. values between 1-10 belong to A, between 10-20 belong to B, etc.\\
261 | \textbf{Interactions}: interactions between features: e.g. subtraction, addition, multiplication, statistical test\\
262 | \textbf{Statistical}: log/power transform (helps turn skewed distributions more normal), Box-Cox\\
263 | \textbf{Row Statistics}: number of NaN's, 0's, negative values, max, min, etc\\
264 | \textbf{Dimensionality Reduction}: using PCA, clustering, factor analysis etc\\
265 |
266 | {\color{blue} \textbf{Discrete Data}}\\
267 | \textbf{Encoding}: since some ML algorithms cannot work on categorical data, we need to turn categorical data into numerical data or vectors\\
268 | \textbf{Ordinal Values}: convert each distinct feature into a random number (e.g. [r,g,b] becomes [1,2,3])\\
269 | \textbf{One-Hot Encoding}: each of the m features becomes a vector of length m with containing only one 1 (e.g. [r, g, b] becomes [[1,0,0],[0,1,0],[0,0,1]])\\
270 | \textbf{Feature Hashing Scheme:} turns arbitrary features into indices in a vector or matrix\\
271 | \textbf{Embeddings}: if using words, convert words to vectors (word embeddings)
272 |
273 |
274 |
275 |
276 |
277 |
278 |
279 | % \includegraphics[width=\textwidth]{}
280 |
281 | \end{minipage}
282 | };
283 | \node[fancytitle, right=10pt] at (box.north west) {Feature Engineering};
284 | \end{tikzpicture}
285 | % ------------------------------------------------------------------------
286 |
287 |
288 |
289 | %------------ Statistical Analysis -----------------
290 | \begin{tikzpicture}
291 | \node [mybox] (box){%
292 | \begin{minipage}{0.3\textwidth}
293 | Process of statistical reasoning: there is an underlying population of possible things we can potentially observe and only a small subset of them are actually sampled (ideally at random). Probability theory describes what properties our sample should have given the properties of the population, but \textbf{\textit{statistical inference}} allows us to deduce what the full population is like after analyzing the sample.
294 | \includegraphics[width=\linewidth]{Figures/Central_Dogma_Statistics.png}
295 |
296 | {\color{blue} \textbf{Sampling From Distributions}}\\
297 | \textbf{Inverse Transform Sampling} Sampling points from a given probability distribution is sometimes necessary to run simulations or whether your data fits a particular distribution. The general technique is called \textit{inverse transform sampling} or Smirnov transform. First draw a random number \textit{p} between [0,1]. Compute value x such that the CDF equals \textit{p}: $F_{X}(x)$ = \textit{p}. Use x as the value to be the random value drawn from the distribution described by $F_{X}(x)$.\\
298 | \newline
299 | \textbf{Monte Carlo Sampling} In higher dimensions, correctly sampling from a given distribution becomes more tricky. Generally want to use Monte Carlo methods, which typically follow these rules: define a domain of possible inputs, generate random inputs from a probability distribution over the domain, perform a deterministic calculation, and analyze the results.
300 | \end{minipage}
301 | };
302 | \node[fancytitle, right=10pt] at (box.north west) {Statistical Analysis};
303 | \end{tikzpicture}
304 |
305 |
306 | %------------ Statistical Distributions -----------------
307 | \begin{tikzpicture}
308 | \node [mybox] (box){%
309 | \begin{minipage}{0.3\textwidth}
310 | \textbf{Binomial Distribution} (Discrete)\\
311 | Assume X is distributed Bin(n,p). X is the number of "successes" that we will achieve in n independent trials, where each trial is either a success or failure and each success occurs with the same probability p and each failure occurs with probability q=1-p.\\
312 | PMF: $P(X=x) = \binom{n}{k} p^x (1-p)^{n-x}$\\
313 | EV: $\mu = np$ \space\space Variance = npq
314 | \newline
315 |
316 | \textbf{Normal/Gaussian Distribution} (Continuous)\\
317 | Assume X in distributed $\mathcal{N}(\mu,\,\sigma^{2})$. It is a bell-shaped and symmetric distribution. Bulk of the values lie close to the mean and no value is too extreme. Generalization of the binomial distribution as n $\to \infty$.\\
318 | PDF: $P(x) = \frac{1}{{\sigma \sqrt {2\pi } }}e^{{{ - \left( {x - \mu } \right)^2 } \mathord{\left/ {\vphantom {{ - \left( {x - \mu } \right)^2 } {2\sigma ^2 }}} \right. \kern-\nulldelimiterspace} {2\sigma ^2 }}}$\\
319 | EV: $\mu$ \space\space Variance: $\sigma^2$\\
320 | \textbf{Implications}: 68\%-95\%-99\% rule. 68\% of probability mass fall within 1$\sigma$ of the mean, 95\% within 2$\sigma$, and 99.7\% within 3$\sigma$.\\
321 |
322 | \textbf{Poisson Distribution} (Discrete)\\
323 | Assume X is distributed Pois($\lambda$). Poisson expresses the probability of a given number of events occurring in a fixed interval of time/space if these events occur independently and with a known constant rate $\lambda$.\\
324 | PMF: $P\left( x \right) = \frac{{e^{ - \lambda } \lambda ^x }}{{x!}}$ \space\space EV: $\lambda$ \space\space Variance = $\lambda$\\
325 | \newline
326 | \textbf{Power Law Distributions} (Continuous)\\
327 | Many data distributions have much longer tails than the normal or Poisson distributions. In other words, the change in one quantity varies as a \textit{power} of another quantity. It helps measure the inequality in the world. e.g. wealth, word frequency and Pareto Principle (80/20 Rule) \\
328 | PDF: P(X=x) = c$x^{-\alpha}$, where $\alpha$ is the law's exponent and c is the normalizing constant
329 | \begin{center}
330 | \includegraphics[width=5cm, height=3cm]{Figures/power-law.png}
331 | \end{center}
332 | \end{minipage}
333 | };
334 | \node[fancytitle, right=10pt] at (box.north west) {Classic Statistical Distributions};
335 | \end{tikzpicture}
336 |
337 |
338 | % ------------ Modeling Overview -----------------
339 | \begin{tikzpicture}
340 | \node [mybox] (box){%
341 | \begin{minipage}{0.3\textwidth}
342 | Modeling is the process of incorporating information into a tool which can forecast and make predictions. Usually, we are dealing with statistical modeling where we want to analyze relationships between variables. Formally, we want to estimate a function $f(X)$ such that:
343 | \begin{center}
344 | $Y = f(X) + \epsilon$
345 | \end{center}
346 | where X = ($X_{1}, X_{2},...X_{p}$) represents the input variables, Y represents the output variable, and $\epsilon$ represents random error.\\
347 |
348 | \textit{\textbf{Statistical learning}} is set of approaches for estimating this $f(X)$. \\
349 |
350 | {\color{blue} \textbf{Why Estimate f(X)?}}\\
351 | \textbf{Prediction}: once we have a good estimate $\hat{f}(X)$, we can use it to make predictions on new data. We treat $\hat{f}$ as a black box, since we only care about the accuracy of the predictions, not why or how it works. \\
352 | \textbf{Inference}: we want to understand the relationship between X and Y. We can no longer treat $\hat{f}$ as a black box since we want to understand how Y changes with respect to X = ($X_{1}, X_{2},...X_{p}$)\\
353 |
354 | \setlist{nolistsep}
355 | {\color{blue} \textbf{More About $\epsilon$}}\\
356 | The error term $\epsilon$ is composed of the reducible and irreducible error, which will prevent us from ever obtaining a perfect $\hat{f}$ estimate.
357 | \begin{itemize}[noitemsep]
358 | \item \textbf{Reducible}: error that can potentially be reduced by using the most appropriate statistical learning technique to estimate $f$. The goal is to minimize the reducible error.
359 | \item \textbf{Irreducible}: error that cannot be reduced no matter how well we estimate $f$. Irreducible error is unknown and unmeasurable and will always be an upper bound for $\epsilon$.\\
360 | \end{itemize}
361 |
362 | \textit{Note}: There will always be trade-offs between model flexibility (prediction) and model interpretability (inference). This is just another case of the bias-variance trade-off. Typically, as flexibility increases, interpretability decreases. Much of statistical learning/modeling is finding a way to balance the two.
363 | \end{minipage}
364 | };
365 | \node[fancytitle, right=10pt] at (box.north west) {Modeling- Overview};
366 | \end{tikzpicture}
367 |
368 |
369 | %------------ Modeling- Philosophies -----------------
370 | \begin{tikzpicture}
371 | \node [mybox] (box){%
372 | \begin{minipage}{0.3\textwidth}
373 | Modeling is the process of incorporating information into a tool which can forecast and make predictions. Designing and validating models is important, as well as evaluating the performance of models. Note that the best forecasting model may not be the most accurate one.\\
374 |
375 | {\color{blue} \textbf{Philosophies of Modeling}}\\
376 | \textbf{Occam's Razor} Philosophical principle that the simplest explanation is the best explanation. In modeling, if we are given two models that predicts equally well, we should choose the simpler one. Choosing the more complex one can often result in overfitting.\\
377 | \textbf{Bias Variance Trade-Off} Inherent part of predictive modeling, where models with lower bias will have higher variance and vice versa. Goal is to achieve low bias and low variance.
378 |
379 | \setlist{nolistsep}
380 | \begin{itemize}[noitemsep]
381 | \item \textbf{\textit{Bias}}: error from incorrect assumptions to make target function easier to learn (high bias $\to$ missing relevant relations or underfitting)
382 | \item \textbf{\textit{Variance}}: error from sensitivity to fluctuations in the dataset, or how much the target estimate would differ if different training data was used (high variance $\to$ modeling noise or overfitting)
383 | \end{itemize}
384 |
385 | \begin{center}
386 | \includegraphics[width=5cm, height=3cm]{Figures/biasvariance.png}
387 | \end{center}
388 |
389 | \textbf{No Free Lunch Theorem} No single machine learning algorithm is better than all the others on all problems. It is common to try multiple models and find one that works best for a particular problem.\\
390 |
391 |
392 | {\color{blue} \textbf{Thinking Like Nate Silver}}\\
393 | \textbf{1. Think Probabilistically} Probabilistic forecasts are more meaningful than concrete statements and should be reported as probability distributions (including $\sigma$ along with mean prediction $\mu$.\\
394 | \textbf{2. Incorporate New Information} Use live models, which continually updates using new information. To update, use Bayesian reasoning to calculate how probabilities change in response to new evidence.\\
395 | \textbf{3. Look For Consensus Forecast} Use multiple distinct sources of evidence. Some models operate this way, such as boosting and bagging, which uses large number of weak classifiers to produce a strong one.
396 | \end{minipage}
397 | };
398 | \node[fancytitle, right=10pt] at (box.north west) {Modeling- Philosophies};
399 | \end{tikzpicture}
400 |
401 |
402 | %------------ Modeling- Taxonomy -----------------
403 | \begin{tikzpicture}
404 | \node [mybox] (box){%
405 | \begin{minipage}{0.3\textwidth}
406 | There are many different types of models. It is important to understand the trade-offs and when to use a certain type of model.\\
407 |
408 | {\color{blue} \textbf{Parametric vs. Nonparametric}}
409 | \setlist{nolistsep}
410 | \begin{itemize}[noitemsep]
411 | \item \textbf{Parametric}: models that first make an assumption about a function form, or shape, of $f$ (linear). Then fits the model. This reduces estimating $f$ to just estimating set of parameters, but if our assumption was wrong, will lead to bad results.
412 | \item \textbf{Non-Parametric}: models that don't make any assumptions about $f$, which allows them to fit a wider range of shapes; but may lead to overfitting
413 | \end{itemize}
414 |
415 | {\color{blue} \textbf{Supervised vs. Unsupervised}}
416 | \setlist{nolistsep}
417 | \begin{itemize}[noitemsep]
418 | \item \textbf{Supervised}: models that fit input variables $x_{i} = (x_{1}, x_{2},...x_{n}$) to a known output variables $y_{i} = (y_{1}, y_{2},...y_{n}$)
419 | \item \textbf{Unsupervised}: models that take in input variables $x_{i} = (x_{1}, x_{2},...x_{n}$), but they do not have an associated output to supervise the training. The goal is understand relationships between the variables or observations.
420 | \end{itemize}
421 |
422 | {\color{blue} \textbf{Blackbox vs. Descriptive}}
423 | \setlist{nolistsep}
424 | \begin{itemize}[noitemsep]
425 | \item \textbf{Blackbox}: models that make decisions, but we do not know what happens "under the hood" e.g. deep learning, neural networks
426 | \item \textbf{Descriptive}: models that provide insight into \textit{why} they make their decisions e.g. linear regression, decision trees
427 | \end{itemize}
428 |
429 | {\color{blue} \textbf{First-Principle vs. Data-Driven}}
430 | \setlist{nolistsep}
431 | \begin{itemize}[noitemsep]
432 | \item \textbf{First-Principle}: models based on a prior belief of how the system under investigation works, incorporates domain knowledge (ad-hoc)
433 | \item \textbf{Data-Driven}: models based on observed correlations between input and output variables
434 | \end{itemize}
435 |
436 | {\color{blue} \textbf{Deterministic vs. Stochastic}}
437 | \setlist{nolistsep}
438 | \begin{itemize}[noitemsep]
439 | \item \textbf{Deterministic}: models that produce a single "prediction" e.g. yes or no, true or false
440 | \item \textbf{Stochastic}: models that produce probability distributions over possible events
441 | \end{itemize}
442 |
443 | {\color{blue} \textbf{Flat vs. Hierarchical}}
444 | \setlist{nolistsep}
445 | \begin{itemize}[noitemsep]
446 | \item \textbf{Flat}: models that solve problems on a single level, no notion of subproblems
447 | \item \textbf{Hierarchical}: models that solve several different nested subproblems
448 | \end{itemize}
449 | \end{minipage}
450 |
451 | };
452 | \node[fancytitle, right=10pt] at (box.north west) {Modeling- Taxonomy};
453 | \end{tikzpicture}
454 |
455 | %------------ Modeling- Evaluation -----------------
456 | \begin{tikzpicture}
457 | \node [mybox] (box){%
458 | \begin{minipage}{0.3\textwidth}
459 | Need to determine how good our model is. Best way to assess models is out-of-sample predictions (data points your model has never seen).\\
460 |
461 | {\color{blue} \textbf{Classification}}
462 |
463 | \begin{center}
464 | \footnotesize
465 | \begin{tabular}{ |c|c|c| }
466 | \hline
467 | & Predicted Yes & Predicted No \\
468 | \hline
469 | Actual Yes & True Positives (TP) & False Negatives (FN) \\
470 | Actual No & False Positives (FP) & True Negatives (TN) \\
471 | \hline
472 | \end{tabular}
473 | \end{center}
474 | \textbf{Accuracy}: ratio of correct predictions over total predictions. Misleading when class sizes are substantially different. $accuracy = \frac{TP + TN}{TP + TN + FN + FP}$\\
475 | \textbf{Precision}: how often the classifier is correct when it predicts positive: $precision = \frac{TP}{TP + FP}$ \\
476 | \textbf{Recall}: how often the classifier is correct for all positive instances: $recall = \frac{TP}{TP + FN}$ \\
477 | \textbf{F-Score}: single measurement to describe performance: F = 2 $\cdot$ $\frac{\text{precision} \cdot \text{recall}}{\text{precision + recall}}$ \\
478 | \textbf{ROC Curves}: plots true positive rates and false positive rates for various thresholds, or where the model determines if a data point is positive or negative (e.g. if $>$0.8, classify as positive). Best possible area under the ROC curve (AUC) is 1, while random is 0.5, or the main diagonal line.\\
479 | % \begin{center}
480 | % \includegraphics[width=5cm, height=3cm]{ROC.png}
481 | % \end{center}
482 |
483 | {\color{blue} \textbf{Regression}}\\
484 | Errors are defined as the difference between a prediction y$\prime$ and the actual result y.\\
485 | \textbf{Absolute Error}: $\Delta = y\prime - y$\\
486 | \textbf{Squared Error}: $\Delta^2 = (y\prime - y)^2$\\
487 | \textbf{Mean-Squared Error}: $MSE = \frac{1}{n}\sum_{i=1}^{n}(y\prime_{i} - y_i)^2$\\
488 | \textbf{Root Mean-Squared Error}: RMSD = $\sqrt{MSE}$\\
489 | % \textit{Note}: For squared errors, outliers have a disproportionate effect. \\
490 | \textbf{Absolute Error Distribution:} Plot absolute error distribution: should be symmetric, centered around 0, bell-shaped, and contain rare extreme outliers.
491 | \end{minipage}
492 | };
493 | \node[fancytitle, right=10pt] at (box.north west) {Modeling- Evaluation Metrics};
494 | \end{tikzpicture}
495 |
496 |
497 | % ------------ Modeling- Evaluation Environment-----------------
498 | \begin{tikzpicture}
499 | \node [mybox] (box){%
500 | \begin{minipage}{0.3\textwidth}
501 | Evaluation metrics provides use with the tools to estimate errors, but what should be the process to obtain the best estimate? Resampling involves repeatedly drawing samples from a training set and refitting a model to each sample, which provides us with additional information compared to fitting the model once, such as obtaining a better estimate for the test error.\\
502 |
503 | {\color{blue} \textbf{Key Concepts}}\\
504 | \textbf{Training Data}: data used to fit your models or the set used for learning\\
505 | \textbf{Validation Data}: data used to tune the parameters of a model\\
506 | \textbf{Test Data}: data used to evaluate how good your model is. Ideally your model should never touch this data until final testing/evaluation\\
507 |
508 | {\color{blue} \textbf{Cross Validation}}\\
509 | Class of methods that estimate test error by holding out a subset of training data from the fitting process.\\
510 | \textbf{Validation Set}: split data into training set and validation set. Train model on training and estimate test error using validation. e.g. 80-20 split\\
511 | \textbf{Leave-One-Out CV (LOOCV)}: split data into training set and validation set, but the validation set consists of 1 observation. Then repeat n-1 times until all observations have been used as validation. Test erro is the average of these n test error estimates.\\
512 | \textbf{k-Fold CV}: randomly divide data into k groups (folds) of approximately equal size. First fold is used as validation and the rest as training. Then repeat k times and find average of the k estimates.\\
513 |
514 | {\color{blue} \textbf{Bootstrapping}}\\
515 | Methods that rely on random sampling with replacement. Bootstrapping helps with quantifying uncertainty associated with a given estimate or model.\\
516 |
517 | {\color{blue} \textbf{Amplifying Small Data Sets}}\\
518 | What can we do it we don't have enough data?
519 | \setlist{nolistsep}
520 |
521 | \begin{itemize}[noitemsep]
522 | \item \textbf{Create Negative Examples}- e.g. classifying presidential candidates, most people would be unqualified so label most as unqualified
523 | \item \textbf{Synthetic Data}- create additional data by adding noise to the real data
524 | \end{itemize}
525 | \end{minipage}
526 | };
527 | \node[fancytitle, right=10pt] at (box.north west) {Modeling- Evaluation Environment};
528 | \end{tikzpicture}
529 |
530 |
531 | % ------------ Machine Learning- Linear Regression -----------------
532 | \begin{tikzpicture}
533 | \node [mybox] (box){%
534 | \begin{minipage}{0.3\textwidth}
535 | Linear regression is a simple and useful tool for predicting a quantitative response. The relationship between input variables X = ($X_{1}, X_{2},...X_{p}$) and output variable Y takes the form:
536 |
537 | \begin{center}
538 | $Y \approx \beta_{0} + \beta_{1}X_{1} + ... + \beta_{p}X_{p} + \epsilon$
539 | \end{center}
540 |
541 | $\beta_{0}...\beta_{p}$ are the unknown coefficients (parameters) which we are trying to determine. The best coefficients will lead us to the best "fit", which can be found by minimizing the \textit{residual sum squares} (RSS), or the sum of the squared differences between the actual $i$th value and the predicted $i$th value. RSS = $\sum_{i=1}^{n} e_{i}^2$, where $e_{i} = y_{i} - \hat{y_{i}}$\\
542 |
543 | % \begin{center}
544 |
545 | % \end{center}
546 |
547 | {\color{blue} \textbf{How to find best fit?}}\\
548 | \textbf{Matrix Form}: We can solve the closed-form equation for coefficient vector $w$: $w = (X^{T}X)^{-1}X^{T}Y$. X represents the input data and Y represents the output data. This method is used for smaller matrices, since inverting a matrix is computationally expensive.\\
549 | \textbf{Gradient Descent}: First-order optimization algorithm. We can find the minimum of a \textit{convex} function by starting at an arbitrary point and repeatedly take steps in the downward direction, which can be found by taking the negative direction of the gradient. After several iterations, we will eventually converge to the minimum. In our case, the minimum corresponds to the coefficients with the minimum error, or the best line of fit. The learning rate $\alpha$ determines the size of the steps we take in the downward direction.\\
550 |
551 | Gradient descent algorithm in two dimensions. Repeat until convergence.
552 | \setlist{nolistsep}
553 |
554 | \begin{enumerate}
555 | \item $w_0^{t+1} := w_0^{t} - \alpha \frac{\partial}{\partial w_0}J(w_0, w_1)$
556 | \item $w_1^{t+1} := w_1^{t} - \alpha \frac{\partial}{\partial w_1}J(w_0, w_1)$\\
557 | \end{enumerate}
558 |
559 | For non-convex functions, gradient descent no longer guarantees an optimal solutions since there may be local minimas. Instead, we should run the algorithm from different starting points and use the best local minima we find for the solution.\\
560 | \textbf{Stochastic Gradient Descent}: instead of taking a step after sampling the \textit{entire} training set, we take a small batch of training data at random to determine our next step. Computationally more efficient and may lead to faster convergence.
561 | \end{minipage}
562 | };
563 | \node[fancytitle, right=10pt] at (box.north west) {Linear Regression};
564 | \end{tikzpicture}
565 |
566 |
567 | % ------------ Linear Regression II -----------------
568 | \begin{tikzpicture}
569 | \node [mybox] (box){%
570 | \begin{minipage}{0.3\textwidth}
571 |
572 | {\color{blue} \textbf{Improving Linear Regression}}\\
573 | \textbf{Subset/Feature Selection}: approach involves identifying a subset of the $p$ predictors that we believe to be best related to the response. Then we fit model using the reduced set of variables.
574 | \setlist{nolistsep}
575 | \begin{itemize}
576 | \item Best, Forward, and Backward Subset Selection
577 | \end{itemize}
578 |
579 | \textbf{Shrinkage/Regularization}: all variables are used, but estimated coefficients are shrunken towards zero relative to the least squares estimate. $\lambda$ represents the tuning parameter- as $\lambda$ increases, flexibility decreases $\to$ decreased variance but increased bias. The tuning parameter is key in determining the sweet spot between under and over-fitting. In addition, while Ridge will always produce a model with $p$ variables, Lasso can force coefficients to be equal to zero.
580 | \begin{itemize}
581 | \item Lasso (L1): min RSS + $\lambda \sum_{j=1}^{p}|\beta_j|$
582 |
583 | \item Ridge (L2): min RSS + $\lambda \sum_{j=1}^{p}\beta_j^{2}$
584 | \end{itemize}
585 | \textbf{Dimension Reduction}: projecting $p$ predictors into a M-dimensional subspace, where M $<$ $p$. This is achieved by computing M different linear combinations of the variables. Can use PCA.\\
586 | \textbf{Miscellaneous}: Removing outliers, feature scaling, removing multicollinearity (correlated variables)\\
587 |
588 | {\color{blue} \textbf{Evaluating Model Accuracy}}\\
589 | Residual Standard Error (RSE): RSE = $\sqrt{\frac{1}{n-2}RSS}$. Generally, the smaller the better.\\
590 | $R^2$: Measure of fit that represents the proportion of variance explained, or the \textit{variability in Y that can be explained using X}. It takes on a value between 0 and 1. Generally the higher the better. $R^2 = 1 - \frac{RSS}{TSS}$, where Total Sum of Squares (TSS) = $\sum{(y_{i}-\bar{y})}^2$\\
591 |
592 | {\color{blue} \textbf{Evaluating Coefficient Estimates}}\\
593 | Standard Error (SE) of the coefficients can be used to perform hypothesis tests on the coefficients:\\
594 | $H_0$: No relationship between X and Y, $H_a$: Some relationship exists. A p-value can be obtained and can be interpreted as follows: a small p-value indicates that a relationship between the predictor (X) and the response (Y) exists. Typical p-value cutoffs are around 5 or 1 $\%$.
595 |
596 | \end{minipage}
597 | };
598 | \node[fancytitle, right=10pt] at (box.north west) {Linear Regression II};
599 | \end{tikzpicture}
600 |
601 |
602 | % ------------ Logistic Regression -----------------
603 | \begin{tikzpicture}
604 | \node [mybox] (box){%
605 | \begin{minipage}{0.3\textwidth}
606 |
607 | Logistic regression is used for classification, where the response variable is categorical rather than numerical.\\
608 |
609 | The model works by predicting the probability that Y belongs to a particular category by first fitting the data to a linear regression model, which is then passed to the logistic function (below). The logistic function will always produce a S-shaped curve, so regardless of X, we can always obtain a sensible answer (between 0 and 1). If the probability is above a certain predetermined threshold (e.g. P(Yes) $>$ 0.5), then the model will predict Yes.
610 | \begin{center}
611 | $p(X) = \frac{e^{\beta_{0}+\beta_{1}X_{1} + ... + \beta_{p}X_{p}}}{1+e^{\beta_{0}+\beta_{1}X_{1} + ... + \beta_{p}X_{p}}}$
612 | \end{center}
613 |
614 | {\color{blue} \textbf{How to find best coefficients?}}\\
615 | \textbf{Maximum Likelihood}: The coefficients $\beta_0...\beta_{p}$ are unknown and must be estimated from the training data. We seek estimates for $\beta_0...\beta_{p}$ such that the predicted probability $\hat{p}(x_i)$ of each observation is a number close to one if its observed in a certain class and close to zero otherwise. This is done by maximizing the likelihood function:
616 | \begin{center}
617 | $l(\beta_0,\beta_1) = \displaystyle \prod_{i:y_i=1} p(x_{i}) \displaystyle \prod_{i':y_{i'=1}} (1- p(x_{i}))$
618 | \end{center}
619 |
620 |
621 | {\color{blue} \textbf{Potential Issues}}\\
622 | \textbf{Imbalanced Classes}: imbalance in classes in training data lead to poor classifiers. It can result in a lot of false positives and also lead to few training data. Solutions include forcing balanced data by removing observations from the larger class, replicate data from the smaller class, or heavily weigh the training examples toward instances of the larger class.\\
623 | \textbf{Multi-Class Classification}: the more classes you try to predict, the harder it will be for the the classifier to be effective. It is possible with logistic regression, but another approach, such as Linear Discriminant Analysis (LDA), may prove better.
624 |
625 |
626 | \setlist{nolistsep}
627 | \end{minipage}
628 | };
629 | \node[fancytitle, right=10pt] at (box.north west) {Logistic Regression};
630 | \end{tikzpicture}
631 |
632 |
633 | % ------------ Distance/Network Methods -----------------
634 | \begin{tikzpicture}
635 | \node [mybox] (box){%
636 | \begin{minipage}{0.3\textwidth}
637 | \setlist{nolistsep}
638 |
639 |
640 | Interpreting examples as points in space provides a way to find natural groupings or clusters among data e.g. which stars are the closest to our sun? Networks can also be built from point sets (vertices) by connecting related points.\\
641 |
642 | {\color{blue} \textbf{Measuring Distances/Similarity Measure}}\\
643 | There are several ways of measuring distances between points \textit{a} and \textit{b} in \textit{d} dimensions- with closer distances implying similarity.\\
644 |
645 | {\color{cyan} Minkowski Distance Metric}: $d_{k}(a,b) = \sqrt[k]{\sum_{i=1}^{d}{|a_{i}-b_{i}|^k}}$\\
646 | The parameter k provides a way to tradeoff between the largest and the total dimensional difference. In other words, larger values of k place more emphasis on large differences between feature values than smaller values. Selecting the right k can significantly impact the the meaningfulness of your distance function. The most popular values are 1 and 2.
647 | \begin{itemize}
648 | \item Manhattan (k=1): city block distance, or the sum of the absolute difference between two points
649 | \item Euclidean (k=2): straight line distance
650 | \end{itemize}
651 | \begin{center}
652 | \includegraphics[width=4cm]{Figures/manhattan_euclidean.png}\\
653 | \end{center}
654 |
655 | {\color{cyan} Weighted Minkowski}: $d_{k}(a,b) = \sqrt[k]{\sum_{i=1}^{d}{w_{i}|a_{i}-b_{i}|^k}}$, in some scenarios, not all dimensions are equal. Can convey this idea using $w_{i}$. Generally not a good idea- should normalize data by Z-scores before computing distances.\\
656 |
657 | {\color{cyan} Cosine Similarity}: $cos(a,b) = \frac{a \cdot b}{|a||b|}$, calculates the similarity between 2 non-zero vectors, where $a \cdot b$ is the dot product (normalized between 0 and 1), higher values imply more similar vectors\\
658 |
659 | {\color{cyan} Kullback-Leibler Divergence}: $KL(A||B)=\sum_{i=i}^{d}{a_{i}log_{2}{\frac{a_{i}}{b_{i}}}}$\\
660 | KL divergence measures the distances between probability distributions by measuring the uncertainty gained or uncertainty lost when replacing distribution A with distribution B. However, this is not a metric but forms the basis for the Jensen-Shannon Divergence Metric.\\
661 | {\color{cyan} Jensen-Shannon}: $JS(A,B) = \frac{1}{2}KL(A||M)+ \frac{1}{2}KL(B||M)$, where M is the average of A and B. The JS function is the right metric for calculating distances between probability distributions
662 | \end{minipage}
663 | };
664 | \node[fancytitle, right=10pt] at (box.north west) {Distance/Network Methods};
665 | \end{tikzpicture}
666 |
667 |
668 |
669 |
670 |
671 |
672 |
673 |
674 |
675 |
676 | % ------------------------------------------------------------------------
677 | % ------------ Nearest Neighbor Classification -----------------
678 | \begin{tikzpicture}
679 | \node [mybox] (box){%
680 | \begin{minipage}{0.3\textwidth}
681 | \setlist{nolistsep}
682 |
683 |
684 | Distance functions allow us to identify the points closest to a given target, or the \textit{nearest neighbors (NN)} to a given point. The advantages of NN include simplicity, interpretability and non-linearity.\\
685 |
686 | {\color{blue} \textbf{k-Nearest Neighbors}}\\
687 | Given a positive integer k and a point $x_{0}$, the KNN classifier first identifies k points in the training data most similar to $x_{0}$, then estimates the conditional probability of $x_{0}$ being in class $j$ as the fraction of the k points whose values belong to $j$. The optimal value for k can be found using cross validation.
688 | \includegraphics[width=\textwidth]{Figures/KNN.png}
689 |
690 | {\color{blue} \textbf{KNN Algorithm}}
691 | \begin{enumerate}
692 | \item Compute distance D(a,b) from point b to all points
693 | \item Select k closest points and their labels
694 | \item Output class with most frequent labels in k points
695 | \end{enumerate}
696 |
697 | {\color{blue} \textbf{Optimizing KNN}}\\
698 | Comparing a query point a in d dimensions against n training examples computes with a runtime of $O(nd)$, which can cause lag as points reach millions or billions. Popular choices to speed up KNN include:
699 | \begin{itemize}
700 | \item \textbf{Vernoi Diagrams}: partitioning plane into regions based on distance to points in a specific subset of the plane
701 | \item \textbf{Grid Indexes}: carve up space into d-dimensional boxes or grids and calculate the NN in the same cell as the point
702 | \item \textbf{Locality Sensitive Hashing (LSH)}: abandons the idea of finding the exact nearest neighbors. Instead, batch up nearby points to quickly find the most appropriate bucket B for our query point. LSH is defined by a hash function $h(p)$ that takes a point/vector as input and produces a number/ code as output, such that it is likely that h(a) = h(b) if a and b are close to each other, and h(a)!= h(b) if they are far apart.
703 |
704 | \end{itemize}
705 |
706 |
707 |
708 |
709 |
710 | \end{minipage}
711 | };
712 | \node[fancytitle, right=10pt] at (box.north west) {Nearest Neighbor Classification};
713 | \end{tikzpicture}
714 | % ------------------------------------------------------------------------
715 |
716 | % ------------ Clustering -----------------
717 | \begin{tikzpicture}
718 | \node [mybox] (box){%
719 | \begin{minipage}{0.3\textwidth}
720 | \setlist{nolistsep}
721 |
722 | \textbf{Clustering} is the problem of grouping points by similarity using distance metrics, which ideally reflect the similarities you are looking for. Often items come from logical "sources" and clustering is a good way to reveal those origins. Perhaps the first thing to do with any data set. Possible applications include: hypothesis development, modeling over smaller subsets of data, data reduction, outlier detection.\\
723 |
724 |
725 | {\color{blue} \textbf{K-Means Clustering}}\\
726 | Simple and elegant algorithm to partition a dataset into K distinct, non-overlapping clusters.
727 | \begin{enumerate}
728 | \item Choose a K. Randomly assign a number between 1 and K to each observation. These serve as initial cluster assignments
729 | \item Iterate until cluster assignments stop changing
730 | \begin{enumerate}
731 | \item For each of the K clusters, compute the cluster centroid. The kth cluster centroid is the vector of the p feature means for the observations in the kth cluster.
732 | \item Assign each observation to the cluster whose centroid is closest (where closest is defined using distance metric).
733 | \end{enumerate}
734 | \end{enumerate}
735 |
736 | Since the results of the algorithm depends on the initial random assignments, it is a good idea to repeat the algorithm from different random initializations to obtain the best overall results. Can use MSE to determine which cluster assignment is better.\\
737 |
738 | {\color{blue} \textbf{Hierarchical Clustering}}\\
739 | Alternative clustering algorithm that does not require us to commit to a particular K. Another advantage is that it results in a nice visualization called a \textbf{dendrogram}. Observations that fuse at bottom are similar, where those at the top are quite different- we draw conclusions based on the location on the vertical rather than horizontal axis.
740 | \begin{enumerate}
741 | \item Begin with n observations and a measure of all the $\frac{(n){n-1}}{2}$ pairwise dissimilarities. Treat each observation as its own cluster.
742 | \item For i = n, n-1, ...2
743 | \begin{enumerate}
744 | \item Examine all pairwise inter-cluster dissimilarities among the i clusters and identify the pair of clusters that are least dissimilar ( most similar). Fuse these two clusters. The dissimilarity between these two clusters indicates height in dendrogram where fusion should be placed.
745 | \item Assign each observation to the cluster whose centroid is closest (where closest is defined using distance metric).
746 | \end{enumerate}
747 | \end{enumerate}
748 |
749 | \textbf{Linkage}: Complete (max dissimilarity), Single (min), Average, Centroid (between centroids of cluster A and B)
750 |
751 |
752 |
753 | % \includegraphics[width=\textwidth]{}
754 |
755 | \end{minipage}
756 | };
757 | \node[fancytitle, right=10pt] at (box.north west) {Clustering};
758 | \end{tikzpicture}
759 |
760 |
761 | % ------------ Machine Learning -----------------
762 | \begin{tikzpicture}
763 | \node [mybox] (box){%
764 | \begin{minipage}{0.3\textwidth}
765 | \setlist{nolistsep}
766 |
767 |
768 | {\color{blue} \textbf{Comparing ML Algorithms}}\\
769 | \textbf{Power and Expressibility}: ML methods differ in terms of complexity. Linear regression fits linear functions while NN define piecewise-linear separation boundaries. More complex models can provide more accurate models, but at the risk of overfitting.\\
770 | \textbf{Interpretability}: some models are more transparent and understandable than others (white box vs. black box models)\\
771 | \textbf{Ease of Use}: some models feature few parameters/decisions (linear regression/NN), while others require more decision making to optimize (SVMs)\\
772 | \textbf{Training Speed}: models differ in how fast they fit the necessary parameters\\
773 | \textbf{Prediction Speed}: models differ in how fast they make predictions given a query\\
774 | \includegraphics[width=\textwidth]{Figures/Comparing_Models.png}\\
775 |
776 |
777 | {\color{blue} \textbf{Naive Bayes}}\\
778 | Naive Bayes methods are a set of supervised learning algorithms based on applying Bayes’ theorem with the "naive" assumption of independence between every pair of features.\\
779 |
780 | \textbf{Problem}: Suppose we need to classify vector X = $x_{1}...x_{n}$ into $m$ classes, $C_{1}...C_{m}$. We need to compute the probability of each possible class given X, so we can assign X the label of the class with highest probability. We can calculate a probability using the Bayes' Theorem:
781 |
782 | $$ P(C_{i} | X) = \frac{P(X | C_{i}) P(C_{i})} {P(X)} $$
783 |
784 | Where:
785 | \begin{enumerate}
786 | \item $P(C_{i})$: the prior probability of belonging to class $i$
787 | \item $P(X)$: normalizing constant, or probability of seeing the given input vector over all possible input vectors
788 | \item $P(X | C_{i})$: the conditional probability of seeing input vector X given we know the class is $C_{i}$\\
789 | \end{enumerate}
790 |
791 | The prediction model will formally look like:
792 | \begin{center}
793 | $ C(X) = argmax_{i\in classes(t)}\ \frac{P(X | C_{i}) P(C_{i})} {P(X)} $
794 | \end{center}
795 |
796 | where C(X) is the prediction returned for input X.
797 | \end{minipage}
798 | };
799 | \node[fancytitle, right=10pt] at (box.north west) {Machine Learning Part I};
800 | \end{tikzpicture}
801 |
802 |
803 | % ------------ Machine Learning Part II -----------------
804 | \begin{tikzpicture}
805 | \node [mybox] (box){%
806 | \begin{minipage}{0.3\textwidth}
807 | \setlist{nolistsep}
808 |
809 |
810 | {\color{blue} \textbf{Decision Trees}}\\
811 | Binary branching structure used to classify an arbitrary input vector X. Each node in the tree contains a simple feature comparison against some field ($x_{i} > 42$?). Result of each comparison is either true or false, which determines if we should proceed along to the left or right child of the given node. Also known as sometimes called classification and regression trees (CART).
812 | \includegraphics[width=\textwidth]{Figures/DecisionTree.png}
813 |
814 | \textbf{Advantages}: Non-linearity, support for categorical variables, easy to interpret, application to regression.\\
815 | \textbf{Disadvantages}: Prone to overfitting, instable (not robust to noise), high variance, low bias\\
816 |
817 | \textbf{\textit{Note}}: rarely do models just use one decision tree. Instead, we aggregate many decision trees using methods like ensembling, bagging, and boosting.\\
818 |
819 | {\color{blue} \textbf{Ensembles, Bagging, Random Forests, Boosting}}\\
820 | \textbf{Ensemble learning} is the strategy of combining many different classifiers/models into one predictive model. It revolves around the idea of voting: a so-called "wisdom of crowds" approach. The most predicted class will be the final prediction.\\
821 | \textbf{Bagging}: ensemble method that works by taking B bootstrapped subsamples of the training data and constructing B trees, each tree training on a distinct subsample as\\
822 | \textbf{Random Forests}: builds on bagging by decorrelating the trees. We do everything the same like in bagging, but when we build the trees, everytime we consider a split, a random sample of the p predictors is chosen as split candidates, not the full set (typically m $\approx$ $\sqrt{p}$). When m = p, then we are just doing bagging.\\
823 | \textbf{Boosting}: the main idea is to improve our model where it is not performing well by using information from previously constructed classifiers. Slow learner. Has 3 tuning parameters: number of classifiers B, learning parameter $\lambda$, interaction depth d (controls interaction order of model).
824 | \end{minipage}
825 | };
826 | \node[fancytitle, right=10pt] at (box.north west) {Machine Learning Part II};
827 | \end{tikzpicture}
828 |
829 | % ------------ Machine Learning Part III -----------------
830 | \begin{tikzpicture}
831 | \node [mybox] (box){%
832 | \begin{minipage}{0.3\textwidth}
833 | \setlist{nolistsep}
834 |
835 |
836 | {\color{blue} \textbf{Support Vector Machines}}\\
837 | Work by constructing a hyperplane that separates points between two classes. The hyperplane is determined using the maximal margin hyperplane, which is the hyperplane that is the maximum distance from the training observations. This distance is called the margin. Points that fall on one side of the hyperplane are classified as -1 and the other +1.
838 | \includegraphics[width=\textwidth, height=3.5cm]{Figures/svm.png}
839 |
840 |
841 | {\color{blue} \textbf{Principal Component Analysis (PCA)}}\\
842 | \textbf{Principal components} allow us to summarize a set of correlated variables with a smaller set of variables that collectively explain most of the variability in the original set. Essentially, we are "dropping" the least important feature variables. \\
843 |
844 | \textbf{Principal Component Analysis} is the process by which principal components are calculated and the use of them to analyzing and understanding the data. PCA is an unsupervised approach and is used for dimensionality reduction, feature extraction, and data visualization. Variables after performing PCA are independent. Scaling variables is also important while performing PCA.
845 | \includegraphics[width=\textwidth]{Figures/pca.png}
846 |
847 | \end{minipage}
848 | };
849 | \node[fancytitle, right=10pt] at (box.north west) {Machine Learning Part III};
850 | \end{tikzpicture}
851 |
852 |
853 | % ------------ Machine Learning Part IV -----------------
854 | \begin{tikzpicture}
855 | \node [mybox] (box){%
856 | \begin{minipage}{0.3\textwidth}
857 | \setlist{nolistsep}
858 |
859 | {\color{blue}\textbf{ML Terminology and Concepts}}\\
860 |
861 | {\color{cyan} \textbf{Features}}: input data/variables used by the ML model\\
862 | {\color{cyan}\textbf{Feature Engineering}}: transforming input features to be more useful for the models. e.g. mapping categories to buckets, normalizing between -1 and 1, removing null\\
863 | {\color{cyan}\textbf{Train/Eval/Test}}: training is data used to optimize the model, evaluation is used to asses the model on new data during training, test is used to provide the final result\\
864 | {\color{cyan}\textbf{Classification/Regression}}: regression is prediction a number (e.g. housing price), classification is prediction from a set of categories(e.g. predicting red/blue/green)\\
865 | {\color{cyan}\textbf{Linear Regression}}: predicts an output by multiplying and summing input features with weights and biases\\
866 | {\color{cyan}\textbf{Logistic Regression}}: similar to linear regression but predicts a probability\\
867 | {\color{cyan}\textbf{Overfitting}}: model performs great on the input data but poorly on the test data (combat by dropout, early stopping, or reduce \# of nodes or layers)\\
868 | {\color{cyan}\textbf{Bias/Variance}}: how much output is determined by the features. more variance often can mean overfitting, more bias can mean a bad model \\
869 | {\color{cyan}\textbf{Regularization}}: variety of approaches to reduce overfitting, including adding the weights to the loss function, randomly dropping layers (dropout)\\
870 | {\color{cyan}\textbf{Ensemble Learning}}: training multiple models with different parameters to solve the same problem \\
871 | {\color{cyan}\textbf{A/B testing}}: statistical way of comparing 2+ techniques to determine which technique performs better and also if difference is statistically significant \\
872 | {\color{cyan}\textbf{Baseline Model}}: simple model/heuristic used as reference point for comparing how well a model is performing \\
873 | {\color{cyan}\textbf{Bias}}: prejudice or favoritism towards some things, people, or groups over others that can affect collection/sampling and interpretation of data, the design of a system, and how users interact with a system\\
874 | {\color{cyan}\textbf{Dynamic Model}}: model that is trained online in a continuously updating fashion\\
875 | {\color{cyan}\textbf{Static Model}}: model that is trained offline\\
876 | {\color{cyan}\textbf{Normalization}}: process of converting an actual range of values into a standard range of values, typically -1 to +1\\
877 | {\color{cyan}\textbf{Independently and Identically Distributed (i.i.d)}}: data drawn from a distribution that doesn't change, and where each value drawn doesn't depend on previously drawn values; ideal but rarely found in real life\\
878 | {\color{cyan}\textbf{Hyperparameters}}: the "knobs" that you tweak during successive runs of training a model\\
879 | {\color{cyan}\textbf{Generalization}}: refers to a model's ability to make correct predictions on new, previously unseen data as opposed to the data used to train the model\\
880 | {\color{cyan}\textbf{Cross-Entropy}}: quantifies the difference between two probability distributions
881 |
882 | \end{minipage}
883 | };
884 |
885 | \node[fancytitle, right=10pt] at (box.north west) {Machine Learning Part IV};
886 | \end{tikzpicture}
887 |
888 |
889 | % ------------ Deep Learning Part I -----------------
890 | \begin{tikzpicture}
891 | \node [mybox] (box){%
892 | \begin{minipage}{0.3\textwidth}
893 | \setlist{nolistsep}
894 |
895 | {\color{blue} \textbf{What is Deep Learning?}}\\
896 | Deep learning is a subset of machine learning. One popular DL technique is based on Neural Networks (NN), which loosely mimic the human brain and the code structures are arranged in layers. Each layer's input is the previous layer's output, which yields progressively higher-level features and defines a hierarchy. A Deep Neural Network is just a NN that has more than 1 hidden layer.
897 |
898 | \includegraphics[width=\textwidth]{Figures/DL.jpeg}
899 |
900 | Recall that statistical learning is all about approximating $f(X)$. Neural networks are known as \textbf{universal approximators}, meaning no matter how complex a function is, there exists a NN that can (approximately) do the job. We can increase the approximation (or complexity) by adding more hidden layers and neurons.\\
901 |
902 | {\color{blue} \textbf{Popular Architectures}}\\
903 | There are different kinds of NNs that are suitable for certain problems, which depend on the NN's architecture.\\
904 |
905 | \textbf{Linear Classifier}: takes input features and combines them with weights and biases to predict output value\\
906 | \textbf{DNN}: deep neural net, contains intermediate layers of nodes that represent “hidden features” and activation functions to represent non-linearity\\
907 | \textbf{CNN}: convolutional NN, has a combination of convolutional, pooling, dense layers. popular for image classification. \\
908 | \textbf{Transfer Learning}: use existing trained models as starting points and add additional layers for the specific use case. idea is that highly trained existing models know general features that serve as a good starting point for training a small network on specific examples \\
909 | \textbf{RNN}: recurrent NN, designed for handling a sequence of inputs that have "memory" of the sequence. LSTMs are a fancy version of RNNs, popular for NLP\\
910 | \textbf{GAN}: general adversarial NN, one model creates fake examples, and another model is served both fake example and real examples and is asked to distinguish\\
911 | \textbf{Wide and Deep}: combines linear classifiers with deep neural net classifiers, "wide" linear parts represent memorizing specific examples and “deep” parts represent understanding high level features
912 |
913 |
914 | \end{minipage}
915 | };
916 | \node[fancytitle, right=10pt] at (box.north west) {Deep Learning Part I};
917 | \end{tikzpicture}
918 |
919 |
920 | % ------------ Deep Learning Part II -----------------
921 | \begin{tikzpicture}
922 | \node [mybox] (box){%
923 | \begin{minipage}{0.3\textwidth}
924 | \setlist{nolistsep}
925 |
926 | {\color{blue} \textbf{Tensorflow}}\\
927 | Tensorflow is an open source software library for numerical computation using data flow graphs. Everything in TF is a graph, where nodes represent operations on data and edges represent the data. Phase 1 of TF is building up a computation graph and phase 2 is executing it. It is also distributed, meaning it can run on either a cluster of machines or just a single machine. \\
928 | TF is extremely popular/suitable for working with Neural Networks, since the way TF sets up the computational graph pretty much resembles a NN.\\
929 |
930 | \includegraphics[width=\textwidth]{Figures/TF_DAG.png}
931 |
932 |
933 | {\color{blue} \textbf{Tensors}}\\
934 | In a graph, tensors are the edges and are multidimensional data arrays that flow through the graph. Central unit of data in TF and consists of a set of primitive values shaped into an array of any number of dimensions.\\
935 | A tensor is characterized by its rank (\# dimensions in tensor), shape (\# of dimensions and size of each dimension), data type (data type of each element in tensor).\\
936 |
937 | {\color{blue} \textbf{Placeholders and Variables}}\\
938 | \textbf{Variables}: best way to represent shared, persistent state manipulated by your program. These are the parameters of the ML model are altered/trained during the training process. Training variables. \\
939 | \textbf{Placeholders}: way to specify inputs into a graph that hold the place for a Tensor that will be fed at runtime. They are assigned once, do not change after. Input nodes
940 |
941 | % In ML, feature vectors are inputs/attributes which the ML algorithm focuses on. Each data point is a list (vector) of such vectors: aka feature vector. The output is a label or number.\\
942 | % Deep Learning is a representation ML based system that figures out by themselves what features to focus on. Neural networks are the most common class of deep learning algorithms, which are composed of neurons.
943 | \end{minipage}
944 | };
945 | \node[fancytitle, right=10pt] at (box.north west) {Deep Learning Part II};
946 | \end{tikzpicture}
947 |
948 | % ------------ Deep Learning Part III -----------------
949 | \begin{tikzpicture}
950 | \node [mybox] (box){%
951 | \begin{minipage}{0.3\textwidth}
952 | \setlist{nolistsep}
953 |
954 | {\color{blue}\textbf{Deep Learning Terminology and Concepts}}\\
955 |
956 | {\color{cyan}\textbf{Neuron}}: node in a NN, typically taking in multiple input values and generating one output value, calculates the output value by applying an activation function (nonlinear transformation) to a weighted sum of input values\\
957 | {\color{cyan}\textbf{Weights}}: edges in a NN, the goal of training is to determine the optimal weight for each feature; if weight = 0, corresponding feature does not contribute\\
958 | {\color{cyan}\textbf{Neural Network}}: composed of neurons (simple building blocks that actually “learn”), contains activation functions that makes it possible to predict non-linear outputs\\
959 | {\color{cyan}\textbf{Activation Functions}}: mathematical functions that introduce non-linearity to a network e.g. RELU, tanh\\
960 | {\color{cyan}\textbf{Sigmoid Function}}: function that maps very negative numbers to a number very close to 0, huge numbers close to 1, and 0 to .5. Useful for predicting probabilities\\
961 | {\color{cyan}\textbf{Gradient Descent/Backpropagation}}: fundamental loss optimizer algorithms, of which the other optimizers are usually based. Backpropagation is similar to gradient descent but for neural nets \\
962 | {\color{cyan}\textbf{Optimizer}}: operation that changes the weights and biases to reduce loss e.g. Adagrad or Adam\\
963 | {\color{cyan}\textbf{Weights / Biases}}: weights are values that the input features are multiplied by to predict an output value. Biases are the value of the output given a weight of 0. \\
964 | {\color{cyan}\textbf{Converge}}: algorithm that converges will eventually reach an optimal answer, even if very slowly. An algorithm that doesn’t converge may never reach an optimal answer. \\
965 | {\color{cyan}\textbf{Learning Rate}}: rate at which optimizers change weights and biases. High learning rate generally trains faster but risks not converging, whereas a lower rate trains slower \\
966 | {\color{cyan}\textbf{Numerical Instability}}: issues with very large/small values due to limits of floating point numbers in computers\\
967 | {\color{cyan}\textbf{Embeddings}}: mapping from discrete objects, such as words, to vectors of real numbers. useful because classifiers/neural networks work well on vectors of real numbers\\
968 | {\color{cyan}\textbf{Convolutional Layer}}: series of convolutional operations, each acting on a different slice of the input matrix\\
969 | {\color{cyan}\textbf{Dropout}}: method for regularization in training NNs, works by removing a random selection of some units in a network layer for a single gradient step\\
970 | {\color{cyan}\textbf{Early Stopping}}: method for regularization that involves ending model training early\\
971 | {\color{cyan}\textbf{Gradient Descent}}: technique to minimize loss by computing the gradients of loss with respect to the model's parameters, conditioned on training data\\
972 | {\color{cyan}\textbf{Pooling}}: Reducing a matrix (or matrices) created by an earlier convolutional layer to a smaller matrix. Pooling usually involves taking either the maximum or average value across the pooled area
973 |
974 | % \includegraphics[width=\textwidth]{}
975 |
976 | \end{minipage}
977 | };
978 | \node[fancytitle, right=10pt] at (box.north west) {Deep Learning Part III};
979 | \end{tikzpicture}
980 |
981 |
982 | %------------ Big Data- Hadoop Overview ---------------
983 | \begin{tikzpicture}
984 | \node [mybox] (box){%
985 | \begin{minipage}{0.3\textwidth}
986 | \setlist{nolistsep}
987 |
988 | Data can no longer fit in memory on one machine (monolithic), so a new way of computing was devised using a group of computers to process this "big data" (distributed). Such a group is called a cluster, which makes up server farms. All of these servers have to be coordinated in the following ways: partition data, coordinate computing tasks, handle fault tolerance/recovery, and allocate capacity to process.\\
989 |
990 | {\color{blue} \textbf{Hadoop}}\\
991 | Hadoop is an open source \textit{distributed} processing framework that manages data processing and storage for big data applications running in clustered systems. It is comprised of 3 main components:
992 | \begin{itemize}
993 | \item \textbf{Hadoop Distributed File System (HDFS)}: a distributed file system that provides high-throughput access to application data by partitioning data across many machines
994 | \item \textbf{YARN}: framework for job scheduling and cluster resource management (task coordination)
995 | \item \textbf{MapReduce}: YARN-based system for parallel processing of large data sets on multiple machines\\
996 | \end{itemize}
997 |
998 | {\color{blue} \textbf{HDFS}}\\
999 | Each disk on a different machine in a cluster is comprised of 1 master node and the rest are workers/data nodes. The \textbf{master node} manages the overall file system by storing the directory structure and the metadata of the files. The \textbf{data nodes} physically store the data. Large files are broken up and distributed across multiple machines, which are also replicated across multiple machines to provide fault tolerance.\\
1000 |
1001 | {\color{blue} \textbf{MapReduce}}\\
1002 | Parallel programming paradigm which allows for processing of huge amounts of data by running processes on multiple machines. Defining a MapReduce job requires two stages: map and reduce.
1003 |
1004 | \begin{itemize}
1005 | \item \textbf{Map}: operation to be performed in parallel on small portions of the dataset. the output is a key-value pair $$
1006 | \item \textbf{Reduce}: operation to combine the results of Map\\
1007 | \end{itemize}
1008 |
1009 | {\color{blue} \textbf{YARN- Yet Another Resource Negotiator}}\\
1010 | Coordinates tasks running on the cluster and assigns new nodes in case of failure. Comprised of 2 subcomponents: the resource manager and the node manager. The \textbf{resource manager} runs on a single master node and schedules tasks across nodes. The \textbf{node manager} runs on all other nodes and manages tasks on the individual node.
1011 |
1012 | % A typical job process: the user defines defines the map and reduce tasks using MapReduce API, the job is triggered on the Hadoop cluster, YARN figures out where and how to run the job and stores the result in HDFS.
1013 |
1014 | \end{minipage};
1015 |
1016 | };
1017 | \node[fancytitle, right=10pt] at (box.north west) {Big Data- Hadoop Overview};
1018 | \end{tikzpicture}
1019 |
1020 |
1021 | %------------ Big Data- Hadoop Ecosystem ---------------
1022 | \begin{tikzpicture}
1023 | \node [mybox] (box){%
1024 | \begin{minipage}{0.3\textwidth}
1025 | An entire ecosystem of tools have emerged around Hadoop, which are based on interacting with HDFS. Below are some popular ones:\\
1026 |
1027 | {\color{cyan} \textbf{Hive}}: data warehouse software built o top of Hadoop that facilitates reading, writing, and managing large datasets residing in distributed storage using SQL-like queries (HiveQL). Hive abstracts away underlying MapReduce jobs and returns HDFS in the form of tables (not HDFS). \\
1028 | {\color{cyan} \textbf{Pig}}: high level scripting language (Pig Latin) that enables writing complex data transformations. It pulls unstructured/incomplete data from sources, cleans it, and places it in a database/data warehouses. Pig performs ETL into data warehouse while Hive queries from data warehouse to perform analysis (GCP: DataFlow).\\
1029 | {\color{cyan} \textbf{Spark}}: framework for writing fast, distributed programs for data processing and analysis. Spark solves similar problems as Hadoop MapReduce but with a fast in-memory approach. It is an unified engine that supports SQL queries, streaming data, machine learning and graph processing. Can operate separately from Hadoop but integrates well with Hadoop. Data is processed using Resilient Distributed Datasets (RDDs), which are immutable, lazily evaluated, and tracks lineage. \\
1030 | {\color{cyan} \textbf{Hbase}}: non-relational, NoSQL, column-oriented database management system that runs on top of HDFS. Well suited for sparse data sets (GCP: BigTable) \\
1031 | {\color{cyan} \textbf{Flink/Kafka}}: stream processing framework. Batch streaming is for bounded, finite datasets, with periodic updates, and delayed processing. Stream processing is for unbounded datasets, with continuous updates, and immediate processing. Stream data and stream processing must be decoupled via a message queue. Can group streaming data (windows) using tumbling (non-overlapping time), sliding (overlapping time), or session (session gap) windows. \\
1032 | {\color{cyan} \textbf{Beam}}: programming model to define and execute data processing pipelines, including ETL, batch and stream (continuous) processing. After building the pipeline, it is executed by one of Beam’s distributed processing back-ends (Apache Apex, Apache Flink, Apache Spark, and Google Cloud Dataflow). Modeled as a Directed Acyclic Graph (DAG).\\
1033 | {\color{cyan} \textbf{Oozie}}: workflow scheduler system to manage Hadoop jobs\\
1034 | {\color{cyan} \textbf{Sqoop}}: transferring framework to transfer large amounts of data into HDFS from relational databases (MySQL)
1035 | \end{minipage};
1036 | };
1037 | \node[fancytitle, right=10pt] at (box.north west) {Big Data- Hadoop Ecosystem};
1038 | \end{tikzpicture}
1039 |
1040 | % ------------ Python- Data Structures -----------------
1041 | \begin{tikzpicture}
1042 | \node [mybox] (box){%
1043 | \begin{minipage}{0.3\textwidth}
1044 | \setlist{nolistsep}
1045 | Graph theory is the study of graphs, which are structures used to model relationships between objects. For example, we can model friendships, computers, social networks, and transportation systems all as graphs. These graphs can then be analyzed to uncover hidden patterns or connections that were previously unidentifiable or calculate statistical properties of the networks and predict how the networks will evolve over time.\\
1046 |
1047 | \noindent Formally, a graph $G = (V, E)$ consists of a set $V$ of vertices (nodes) and a set $E$ of edges (connects two nodes together). An edge represents a relationship between the nodes it connects (friendship between 2 people, connection between 2 computers, etc.). A directed graph is where the edges have a direction or order (otherwise undirected).\\
1048 |
1049 | \noindent A weighted graph is a graph where the edges show the intensity of the relationships using weights:
1050 | \begin{itemize}
1051 | \item \textbf{Binary Weight}: 0 or 1 weight, tells us if a link exists between 2 nodes
1052 | \item \textbf{Numeric Weight}: expresses how strong the connection is between a node and other nodes
1053 | \item \textbf{Normalized Weight}: variant of numeric weight where all the outgoing edges of a node sum to 1\\
1054 | \end{itemize}
1055 |
1056 | \noindent A graph can be represented:
1057 | \begin{itemize}
1058 | \item \textbf{Graphically}: a picture that displays all the nodes, edges, and weights
1059 | \item \textbf{Mathematically}: an adjacency matrix A of size ($n$, $n$) ($n$ nodes) and $a_{i, j} = 1$ if a link exists between nodes \textit{i} and \textit{j}, 0 otherwise. A weight matrix $W$ expresses the edge weights between nodes of a network. An adjacency list is an abstract representation of the adjacency matrix, and provides a list of all the connections present in the network (weight list is similar).\\
1060 | \end{itemize}
1061 |
1062 | \noindent Applications of graph theory include:
1063 | \begin{itemize}
1064 | \item \textbf{Route Optimization}: model the transportation of a commodity from one place to another
1065 | \item \textbf{Job Scheduling}: model and find the optimal scheduling of jobs or tasks
1066 | \item \textbf{Fraud Detection}: model fraud transactions and uncover rings of fraudsters working together
1067 | \item \textbf{Sociology and Economics}: model groups of people to see how they will act and evolve over time
1068 | \item \textbf{Epidemiology}: model how a disease will spread through a network and how fast it will spread\\
1069 |
1070 | \end{itemize}
1071 |
1072 | \noindent Python's NetworkX and Spark's GraphX offer graph capabilities.
1073 |
1074 | \end{minipage}
1075 | };
1076 | \node[fancytitle, right=10pt] at (box.north west) {Graph Theory};
1077 | \end{tikzpicture}
1078 |
1079 |
1080 | % ------------ SQL Part I -----------------
1081 | \begin{tikzpicture}
1082 | \node [mybox] (box){%
1083 | \begin{minipage}{0.3\textwidth}
1084 | Structured Query Language (SQL) is a declarative language used to access \& manipulate data in databases. Usually the database is a Relational Database Management System (RDBMS), which stores data arranged in relational database tables. A table is arranged in columns and rows, where columns represent characteristics of stored data and rows represent actual data entries.\\
1085 |
1086 | {\color{blue} \textbf{Basic Queries}}\\
1087 | - filter columns: \textbf{SELECT} col1, col3... \textbf{FROM} table1\\
1088 | - filter the rows: \textbf{WHERE} col4 = 1 \textbf{AND} col5 = 2\\
1089 | - aggregate the data: \textbf{GROUP BY}…\\
1090 | - limit aggregated data: \textbf{HAVING} count(*) $>$ 1 \\
1091 | - order of the results: \textbf{ORDER BY} col2\\
1092 |
1093 | Useful Keywords for \textbf{SELECT}\\
1094 | \textbf{DISTINCT}- return unique results\\
1095 | \textbf{BETWEEN} a AND b- limit the range, the values can be numbers, text, or dates\\
1096 | \textbf{LIKE}- pattern search within the column text\\
1097 | \textbf{IN} (a, b, c) - check if the value is contained among given\\
1098 |
1099 | {\color{blue} \textbf{Data Modification}}\\
1100 | - update specific data with the \textbf{WHERE} clause: \\ \textbf{UPDATE} table1 \textbf{SET} col1 = 1 \textbf{WHERE} col2 = 2\\
1101 | - insert values manually\\ \textbf{INSERT} \textbf{INTO} table1 (col1,col3) \textbf{VALUES} (val1,val3);\\
1102 | - by using the results of a query\\
1103 | \textbf{INSERT} \textbf{INTO} table1 (col1,col3) \textbf{SELECT} col,col2 FROM table2;\\
1104 |
1105 | % {\color{blue} \textbf{Views}}\\
1106 | % A \textbf{VIEW} is a virtual table, which is a result of a query. Can be used to create virtual tables of complex queries.\\
1107 | % \textbf{CREATE VIEW} view1 \textbf{AS}\\
1108 | % \textbf{SELECT} col1, col2\\
1109 | % \textbf{FROM} table1\\
1110 | % \textbf{WHERE} …\\
1111 |
1112 |
1113 | {\color{blue} \textbf{Joins}}\\
1114 | The JOIN clause is used to combine rows from two or more tables, based on a related column between them.\\
1115 | \includegraphics[width=\textwidth]{Figures/sql_joins.jpg}
1116 |
1117 | \setlist{nolistsep}
1118 | \end{minipage}
1119 | };
1120 | \node[fancytitle, right=10pt] at (box.north west) {SQL Part I};
1121 | \end{tikzpicture}
1122 |
1123 |
1124 |
1125 | % ------------ Python- Data Structures -----------------
1126 | \begin{tikzpicture}
1127 | \node [mybox] (box){%
1128 | \begin{minipage}{0.3\textwidth}
1129 | \setlist{nolistsep}
1130 |
1131 | Data structures are a way of storing and manipulating data and each data structure has its own strengths and weaknesses. Combined with algorithms, data structures allow us to efficiently solve problems. It is important to know the main types of data structures that you will need to efficiently solve problems.\\
1132 |
1133 | \textbf{Lists}: or arrays, ordered sequences of objects, mutable
1134 | \begin{minted}{python}
1135 | >>> l = [42, 3.14, "hello","world"]
1136 | \end{minted}
1137 |
1138 | \textbf{Tuples}: like lists, but immutable
1139 | \begin{minted}{python}
1140 | >>> t = (42, 3.14, "hello","world")
1141 | \end{minted}
1142 |
1143 | \textbf{Dictionaries}: hash tables, key-value pairs, unsorted
1144 | \begin{minted}{python}
1145 | >>> d = {"life": 42, "pi": 3.14}
1146 | \end{minted}
1147 |
1148 | \textbf{Sets}: mutable, unordered sequence of unique elements. frozensets are just immutable sets
1149 | \begin{minted}{python}
1150 | >>> s = set([42, 3.14, "hello","world"])
1151 | \end{minted}
1152 |
1153 | {\color{blue} \textbf{Collections Module}}\\
1154 | \textbf{deque}: double-ended queue, generalization of stacks and queues; supports append, appendLeft, pop, rotate, etc
1155 | \begin{minted}{python}
1156 | >>> s = deque([42, 3.14, "hello","world"])
1157 | \end{minted}
1158 |
1159 | \textbf{Counter}: dict subclass, unordered collection where elements are stored as keys and counts stored as values
1160 | \begin{minted}{python}
1161 | >>> c = Counter('apple')
1162 | >>> print(c)
1163 | Counter({'p': 2, 'a': 1, 'l': 1, 'e': 1})
1164 | \end{minted}
1165 |
1166 | {\color{blue} \textbf{heqpq Module}}\\
1167 | \textbf{Heap Queue}: priority queue, heaps are binary trees for which every parent node has a value greater than or equal to any of its children (max-heap), order is important; supports push, pop, pushpop, heapify, replace functionality
1168 | \begin{minted}{python}
1169 | >>> heap = []
1170 | >>> for n in data:
1171 | ... heappush(heap, n)
1172 | >>> heap
1173 | [0, 1, 3, 6, 2, 8, 4, 7, 9, 5]
1174 | \end{minted}
1175 |
1176 | \includegraphics[width=\textwidth, height=3.5cm]{Figures/heaps.png}
1177 | \end{minipage}
1178 | };
1179 | \node[fancytitle, right=10pt] at (box.north west) {Python- Data Structures};
1180 | \end{tikzpicture}
1181 |
1182 |
1183 |
1184 | % ------------------------------------------------------------------------
1185 | % ------------ Recommended Resources -----------------
1186 | \begin{tikzpicture}
1187 | \node [mybox] (box){%
1188 | \begin{minipage}{0.3\textwidth}
1189 | \setlist{nolistsep}
1190 |
1191 | \begin{itemize}
1192 | \item Data Science Design Manual\\
1193 | (\url{www.springer.com/us/book/9783319554433)}
1194 | \item Introduction to Statistical Learning \\
1195 | (\url{www-bcf.usc.edu/~gareth/ISL/})
1196 | \item Probability Cheatsheet \\(\url{/www.wzchen.com/probability-cheatsheet/})
1197 | \item Google's Machine Learning Crash Course\\
1198 | (\url{developers.google.com/machine-learning/crash-course/})
1199 |
1200 | \end{itemize}
1201 |
1202 | % \includegraphics[width=\textwidth]{}
1203 |
1204 | \end{minipage}
1205 | };
1206 | \node[fancytitle, right=10pt] at (box.north west) {Recommended Resources};
1207 | \end{tikzpicture}
1208 | % ------------------------------------------------------------------------
1209 |
1210 |
1211 |
1212 | \end{multicols*}
1213 | \end{document}
1214 | Contact GitHub API Training Shop Blog About
1215 | © 2016 GitHub, Inc. Terms Privacy Security Status Help
1216 |
--------------------------------------------------------------------------------