├── README.md
├── GradientDescent_starterCode.ipynb
└── GradientDescent_completeCode.ipynb
/README.md:
--------------------------------------------------------------------------------
1 | # GradientDescent
2 | a quick exercise to practice gradient descent
3 |
4 |
5 | 
6 |
7 |
Watch on YouTube
8 | This repository was created to accomodate my YouTube tutorial:
9 |
10 | https://youtu.be/jwStsp8JUPU
11 |
12 |
13 | content:
14 |
15 | - GradientDescent_starterCode: is a fun exercise where you can test your understanding of Gradient Descent.
16 | - GradientDescent_completeCode: in case you couldn't figure it out on your own.
17 |
18 | author: Mariya Sha
19 |
20 |
21 | dependencies:
22 |
23 | - Numpy
24 |
--------------------------------------------------------------------------------
/GradientDescent_starterCode.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "## Gradient Descent\n",
8 | "\n",
9 | "#### STARTER CODE - please modify the gradient descent function!!!\n",
10 | "\n",
11 | "as shown in the following tutorial: https://youtu.be/jwStsp8JUPU\n",
12 | "
\n",
13 | "by: Mariya Sha\n",
14 | "
\n",
15 | "enjoy! :)"
16 | ]
17 | },
18 | {
19 | "cell_type": "code",
20 | "execution_count": null,
21 | "metadata": {},
22 | "outputs": [],
23 | "source": [
24 | "import numpy as np\n",
25 | "\n",
26 | "# Activation Function\n",
27 | "def sigmoid(w_sum):\n",
28 | " return 1/(1+np.exp(-w_sum))\n",
29 | "\n",
30 | "# Get Prediction\n",
31 | "def predict(features, weights, bias):\n",
32 | " return sigmoid(np.dot(features, weights) + bias)\n",
33 | "\n",
34 | "# Loss Function\n",
35 | "def cross_entropy(target, pred):\n",
36 | " return -(target*np.log10(pred)+(1-target)*(np.log10(1-pred)))\n",
37 | "\n",
38 | "# Update Weights\n",
39 | "def gradient_descent(x, y, weights, bias, learnrate, pred):\n",
40 | " '''\n",
41 | " argument data types:\n",
42 | " feature, weights = lists with 3 items\n",
43 | " target = integer (0 or 1)\n",
44 | " prediction, l_rate, bias = floating point numbers\n",
45 | " '''\n",
46 | " return print(\"please define the function\")\n",
47 | "\n",
48 | "# Data\n",
49 | "features = np.array(([0.1,0.5,0.2],[0.2,0.3,0.1],[0.7,0.4,0.2],[0.1,0.4,0.3]))\n",
50 | "targets = np.array([0,1,0,1])\n",
51 | "epochs = 10\n",
52 | "learnrate = 0.1\n",
53 | "errors = []\n",
54 | "weights = np.array([0.4, 0.2, 0.6])\n",
55 | "bias = 0.5\n",
56 | "\n",
57 | "new_weights = []\n",
58 | "\n",
59 | "for e in range(epochs):\n",
60 | " for x, y in zip(features, targets):\n",
61 | " pred = predict(x, weights, bias)\n",
62 | " error = cross_entropy(y, pred)\n",
63 | " #weights, bias = gradient_descent(x, y, weights, bias, learnrate, pred)\n",
64 | " \n",
65 | " # Printing out the log-loss error on the training set\n",
66 | " out = predict(features, weights, bias)\n",
67 | " loss = np.mean(cross_entropy(targets, out))\n",
68 | " errors.append(loss)\n",
69 | " print(\"\\n========== Epoch\", e,\"==========\")\n",
70 | " print(\"Average loss: \", loss)\n"
71 | ]
72 | },
73 | {
74 | "cell_type": "code",
75 | "execution_count": null,
76 | "metadata": {},
77 | "outputs": [],
78 | "source": []
79 | }
80 | ],
81 | "metadata": {
82 | "kernelspec": {
83 | "display_name": "Python 3 (ipykernel)",
84 | "language": "python",
85 | "name": "python3"
86 | },
87 | "language_info": {
88 | "codemirror_mode": {
89 | "name": "ipython",
90 | "version": 3
91 | },
92 | "file_extension": ".py",
93 | "mimetype": "text/x-python",
94 | "name": "python",
95 | "nbconvert_exporter": "python",
96 | "pygments_lexer": "ipython3",
97 | "version": "3.9.7"
98 | }
99 | },
100 | "nbformat": 4,
101 | "nbformat_minor": 4
102 | }
103 |
--------------------------------------------------------------------------------
/GradientDescent_completeCode.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "## Gradient Descent\n",
8 | "#### COMPLETE CODE\n",
9 | "\n",
10 | "\n",
11 | "as shown in the following tutorial: https://youtu.be/jwStsp8JUPU\n",
12 | "
\n",
13 | "by: Mariya Sha\n",
14 | "
\n",
15 | "enjoy! :)"
16 | ]
17 | },
18 | {
19 | "cell_type": "code",
20 | "execution_count": 2,
21 | "metadata": {},
22 | "outputs": [
23 | {
24 | "name": "stdout",
25 | "output_type": "stream",
26 | "text": [
27 | "\n",
28 | "========== Epoch 0 ==========\n",
29 | "Average loss: 0.33944294852408824\n",
30 | "\n",
31 | "========== Epoch 1 ==========\n",
32 | "Average loss: 0.33147083470744965\n",
33 | "\n",
34 | "========== Epoch 2 ==========\n",
35 | "Average loss: 0.32499752814643046\n",
36 | "\n",
37 | "========== Epoch 3 ==========\n",
38 | "Average loss: 0.31973828925825093\n",
39 | "\n",
40 | "========== Epoch 4 ==========\n",
41 | "Average loss: 0.3154527992144233\n",
42 | "\n",
43 | "========== Epoch 5 ==========\n",
44 | "Average loss: 0.31194241308286647\n",
45 | "\n",
46 | "========== Epoch 6 ==========\n",
47 | "Average loss: 0.30904555254773175\n",
48 | "\n",
49 | "========== Epoch 7 ==========\n",
50 | "Average loss: 0.3066324010104734\n",
51 | "\n",
52 | "========== Epoch 8 ==========\n",
53 | "Average loss: 0.30459963557320424\n",
54 | "\n",
55 | "========== Epoch 9 ==========\n",
56 | "Average loss: 0.30286560086854775\n"
57 | ]
58 | }
59 | ],
60 | "source": [
61 | "import numpy as np\n",
62 | "\n",
63 | "# Activation Function\n",
64 | "def sigmoid(w_sum):\n",
65 | " return 1/(1+np.exp(-w_sum))\n",
66 | "\n",
67 | "# Get Prediction\n",
68 | "def predict(features, weights, bias):\n",
69 | " return sigmoid(np.dot(features, weights) + bias)\n",
70 | "\n",
71 | "# Loss Function\n",
72 | "def cross_entropy(target, pred):\n",
73 | " return -(target*np.log10(pred)+(1-target)*(np.log10(1-pred)))\n",
74 | "\n",
75 | "# Update Weights\n",
76 | "def gradient_descent(x, y, weights, bias, learnrate, pred):\n",
77 | " new_weights = []\n",
78 | " bias += learnrate*(y-pred)\n",
79 | "\n",
80 | " for w,xi in zip(weights,x):\n",
81 | " new_weight = w + learnrate*(y-pred)*xi\n",
82 | " new_weights.append(new_weight) \n",
83 | " return new_weights, bias\n",
84 | "\n",
85 | "# Data\n",
86 | "features = np.array(([0.1,0.5,0.2],[0.2,0.3,0.1],[0.7,0.4,0.2],[0.1,0.4,0.3]))\n",
87 | "targets = np.array([0,1,0,1])\n",
88 | "\n",
89 | "epochs = 10\n",
90 | "learnrate = 0.1\n",
91 | " \n",
92 | "errors = []\n",
93 | "weights = np.array([0.4, 0.2, 0.6])\n",
94 | "bias = 0.5\n",
95 | "\n",
96 | "new_weights = []\n",
97 | "\n",
98 | "for e in range(epochs):\n",
99 | " for x, y in zip(features, targets):\n",
100 | " pred = predict(x, weights, bias)\n",
101 | " error = cross_entropy(y, pred)\n",
102 | " weights, bias = gradient_descent(x, y, weights, bias, learnrate, pred)\n",
103 | " \n",
104 | " # Printing out the log-loss error on the training set\n",
105 | " out = predict(features, weights, bias)\n",
106 | " loss = np.mean(cross_entropy(targets, out))\n",
107 | " errors.append(loss)\n",
108 | " print(\"\\n========== Epoch\", e,\"==========\")\n",
109 | " print(\"Average loss: \", loss)\n"
110 | ]
111 | },
112 | {
113 | "cell_type": "code",
114 | "execution_count": null,
115 | "metadata": {},
116 | "outputs": [],
117 | "source": []
118 | }
119 | ],
120 | "metadata": {
121 | "kernelspec": {
122 | "display_name": "Python 3 (ipykernel)",
123 | "language": "python",
124 | "name": "python3"
125 | },
126 | "language_info": {
127 | "codemirror_mode": {
128 | "name": "ipython",
129 | "version": 3
130 | },
131 | "file_extension": ".py",
132 | "mimetype": "text/x-python",
133 | "name": "python",
134 | "nbconvert_exporter": "python",
135 | "pygments_lexer": "ipython3",
136 | "version": "3.9.7"
137 | }
138 | },
139 | "nbformat": 4,
140 | "nbformat_minor": 4
141 | }
142 |
--------------------------------------------------------------------------------