├── .gitignore
├── 100_3DayPullbacks.ipynb
├── 102_DeltaHedging.ipynb
├── 103_PoissonJumps.ipynb
├── 104_TrackingError.ipynb
├── 105_CalmarRatio.ipynb
├── 106_ThetaDecay.ipynb
├── 107_RiskOfRuin.ipynb
├── 109_Expectancy.ipynb
├── 10_SharpeRatio.ipynb
├── 111_PrincipalComponentsRegression.ipynb
├── 112_LlamaIndexFinancialStatement.ipynb
├── 113_VectorBTPairsTrading.ipynb
├── 114_SortinoRatio.ipynb
├── 115_PandasMarketDataAnalysis.ipynb
├── 116_AutomateWithIBAPI.ipynb
├── 117_PortfolioVariance.ipynb
├── 119_FinancialRatiosInADB.ipynb
├── 11_InformationRatio.ipynb
├── 120_PortfolioHedge.ipynb
├── 121_RiskBudgets.ipynb
├── 122_MonthEndFlowEffects.ipynb
├── 123_PolarsDuckDB.ipynb
├── 124_IB_InsyncOptionsData.ipynb
├── 125_BlackLitterman.ipynb
├── 126_PyfolioReloaded.ipynb
├── 128_LlamaIndexStockPrices.ipynb
├── 129_FamaFrenchHedging.ipynb
├── 12_GARCH.ipynb
├── 130_PlottingWithMatplotlib.ipynb
├── 131_ReplicateQQQ.ipynb
├── 132_Autocorrelation.ipynb
├── 133_ReportAgent.ipynb
├── 134_MarkowitzMeanVariance.ipynb
├── 136_0DTE.ipynb
├── 137_PDFToStrategy.ipynb
├── 139_0DTELevelsStrategy.ipynb
├── 13_SortinoRatio.ipynb
├── 140_OptionsWithPolars.ipynb
├── 141_AcceleratedPCA.ipynb
├── 142_DollarNeutral.ipynb
├── 144_EquityResearch.ipynb
├── 145_TailRatio.ipynb
├── 146_SkewKurtosis.ipynb
├── 148_OptionsSkew.ipynb
├── 149_VolatilityTermStructure.ipynb
├── 14_RiskParity.ipynb
├── 150_SharpeOptimization.ipynb
├── 151_MeanReversion.ipynb
├── 152_MeanReversion.ipynb
├── 15_StockPriceDatabase.ipynb
├── 16_OmegaRatio.ipynb
├── 19_VolatilitySurface.ipynb
├── 20_BetaHedge.ipynb
├── 21_PairsTrading.ipynb
├── 22_PortfolioPCA.ipynb
├── 23_ConditionalValueAtRisk.ipynb
├── 24_FactorAnalysis.ipynb
├── 25_KalmanFilter.ipynb
├── 26_HurstExponent.ipynb
├── 27_Decomposition.ipynb
├── 28_FlowEffectsBacktrader.ipynb
├── 2_RealizedVolatilityModels.ipynb
├── 30_AnimatedYieldCurve.ipynb
├── 31_BarrierOptionPricing.ipynb
├── 32_KellyBetting.ipynb
├── 34_ZiplineBacktesting.ipynb
├── 35_TechnicalAnalysis.ipynb
├── 36_TreynorRatio.ipynb
├── 37_FiltersForForex.ipynb
├── 38_AmericanOptionPricer.ipynb
├── 3_FlowEffects.ipynb
├── 41_ThetaDataStreamingStraddle.ipynb
├── 42_VectorbtWalkforwardAnalysis.ipynb
├── 43_OpenBBandRiskParity.ipynb
├── 44_ZiplinePipeline.ipynb
├── 45_AlphaLens.ipynb
├── 46_CalmarRatio.ipynb
├── 47_LangChainPDF.ipynb
├── 48_KMeans.ipynb
├── 49_LangChainSQL.ipynb
├── 4_VolatilityCones.ipynb
├── 50_FactorEngineeringML.ipynb
├── 51_IVTermStructureSkew.ipynb
├── 52_AssetClassTrendFollowing.ipynb
├── 53_PolarsVsPandas.ipynb
├── 54_BlackLitterman.ipynb
├── 55_DownsideDeviation.ipynb
├── 56_UpsideCapture.ipynb
├── 58_BattingAverage.ipynb
├── 59_StoringDataHDF5.ipynb
├── 5_ValueAtRisk.ipynb
├── 60_VectorbtOptimizeOrderType.ipynb
├── 62_VectobtPatterns.ipynb
├── 64_LangChainSentimentAnalysis.ipynb
├── 65_KMedoids.ipynb
├── 67_SABR.ipynb
├── 68_VectorbtProjections.ipynb
├── 6_RollingZScore.ipynb
├── 73_OptionsBacktesting.ipynb
├── 74_VectorbtParamCV.ipynb
├── 75_ArcticDBOptions.ipynb
├── 78_ValueAtRiskMonteCarlo.ipynb
├── 79_PCAYieldCurve.ipynb
├── 7_Drawdown.ipynb
├── 80_VectorBTFlowEffects.ipynb
├── 81_EdgarFundamentalData.ipynb
├── 82_HierarchicalRiskParity.ipynb
├── 83_VectorBTMFT.ipynb
├── 84_ForecastEarningsWithStraddles.ipynb
├── 85_SkfolioMaxDiversification.ipynb
├── 88_QuantLibGreeks.ipynb
├── 89_OptimalPortfolioAgainstIndex.ipynb
├── 8_GeometricBrownianMotion.ipynb
├── 90_MarkovChainsRegime.ipynb
├── 92_Autoencoders.ipynb
├── 94_MPLFinanceForStockCharts.ipynb
├── 96_SectorHedge.ipynb
├── 97_VolatilitySwaps.ipynb
├── 98_HestonModel.ipynb
├── 99_ImpliedVolatilityWithQuantLib.ipynb
├── README.md
├── nvda.pdf
└── pqn.png
/.gitignore:
--------------------------------------------------------------------------------
1 | # Jupyter Notebook
2 | .ipynb_checkpoints
3 |
4 | # Mac stuff
5 | .DS_Store
6 | .virtual_documents
7 |
--------------------------------------------------------------------------------
/109_Expectancy.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "2b725ab1",
6 | "metadata": {},
7 | "source": [
8 | "

"
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "c686b10c",
14 | "metadata": {},
15 | "source": [
16 | "This code calculates the expectancy ratio of a series of trades. The expectancy ratio measures the average expected return per trade by considering the win rate, loss rate, and average profit/loss of trades. It is useful in financial trading to evaluate the performance of a trading strategy. The input is a DataFrame of trades with profit or loss values. The output is a single expectancy ratio value."
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "id": "652ddb60",
23 | "metadata": {
24 | "lines_to_next_cell": 1
25 | },
26 | "outputs": [],
27 | "source": [
28 | "import pandas as pd\n",
29 | "import numpy as np"
30 | ]
31 | },
32 | {
33 | "cell_type": "markdown",
34 | "id": "f54264f9",
35 | "metadata": {},
36 | "source": [
37 | "Define a function to calculate the expectancy ratio of trades."
38 | ]
39 | },
40 | {
41 | "cell_type": "code",
42 | "execution_count": null,
43 | "id": "d056583b",
44 | "metadata": {
45 | "lines_to_next_cell": 1
46 | },
47 | "outputs": [],
48 | "source": [
49 | "def calculate_expectancy_ratio(trades):\n",
50 | " \"\"\"Calculate the expectancy ratio of trades.\n",
51 | " \n",
52 | " This function computes the average expected return for a series of trades \n",
53 | " by considering their win rate, loss rate, and average profit/loss.\n",
54 | " \n",
55 | " Parameters\n",
56 | " ----------\n",
57 | " trades : pd.DataFrame\n",
58 | " DataFrame containing trade information with a 'Profit' column.\n",
59 | " \n",
60 | " Returns\n",
61 | " -------\n",
62 | " expectancy_ratio : float\n",
63 | " The calculated expectancy ratio.\n",
64 | " \"\"\"\n",
65 | " \n",
66 | " # Calculate the number of trades\n",
67 | " num_trades = len(trades)\n",
68 | " \n",
69 | " # Separate winning and losing trades\n",
70 | " winners = trades[trades['Profit'] > 0]\n",
71 | " losers = trades[trades['Profit'] <= 0]\n",
72 | " \n",
73 | " # Calculate win rate and loss rate\n",
74 | " win_rate = len(winners) / num_trades\n",
75 | " loss_rate = len(losers) / num_trades\n",
76 | " \n",
77 | " # Calculate average profit for winning trades and average loss for losing trades\n",
78 | " avg_win = winners['Profit'].mean()\n",
79 | " avg_loss = losers['Profit'].mean()\n",
80 | " \n",
81 | " # Compute the expectancy ratio\n",
82 | " expectancy_ratio = (win_rate * avg_win) + (loss_rate * avg_loss)\n",
83 | " \n",
84 | " return expectancy_ratio"
85 | ]
86 | },
87 | {
88 | "cell_type": "markdown",
89 | "id": "2611f9e5",
90 | "metadata": {},
91 | "source": [
92 | "Create a dictionary with trade data including trade numbers and corresponding profits/losses."
93 | ]
94 | },
95 | {
96 | "cell_type": "code",
97 | "execution_count": null,
98 | "id": "7c26a120",
99 | "metadata": {},
100 | "outputs": [],
101 | "source": [
102 | "trade_data = {\n",
103 | " 'Trade': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n",
104 | " 'Profit': [100, -50, 200, -100, 300, -150, 400, -200, 500, -250]\n",
105 | "}"
106 | ]
107 | },
108 | {
109 | "cell_type": "markdown",
110 | "id": "b84b5061",
111 | "metadata": {},
112 | "source": [
113 | "Convert the trade data dictionary into a pandas DataFrame."
114 | ]
115 | },
116 | {
117 | "cell_type": "code",
118 | "execution_count": null,
119 | "id": "3f057971",
120 | "metadata": {},
121 | "outputs": [],
122 | "source": [
123 | "trades = pd.DataFrame(trade_data)"
124 | ]
125 | },
126 | {
127 | "cell_type": "markdown",
128 | "id": "4ce371e5",
129 | "metadata": {},
130 | "source": [
131 | "Calculate the expectancy ratio using the defined function and print the result."
132 | ]
133 | },
134 | {
135 | "cell_type": "code",
136 | "execution_count": null,
137 | "id": "4086f225",
138 | "metadata": {},
139 | "outputs": [],
140 | "source": [
141 | "expectancy_ratio = calculate_expectancy_ratio(trades)\n",
142 | "print(f\"Expectancy Ratio: {expectancy_ratio}\")"
143 | ]
144 | },
145 | {
146 | "cell_type": "markdown",
147 | "id": "49299264",
148 | "metadata": {},
149 | "source": [
150 | "PyQuant News is where finance practitioners level up with Python for quant finance, algorithmic trading, and market data analysis. Looking to get started? Check out the fastest growing, top-selling course to get started with Python for quant finance. For educational purposes. Not investment advise. Use at your own risk."
151 | ]
152 | }
153 | ],
154 | "metadata": {
155 | "jupytext": {
156 | "cell_metadata_filter": "-all",
157 | "main_language": "python",
158 | "notebook_metadata_filter": "-all"
159 | }
160 | },
161 | "nbformat": 4,
162 | "nbformat_minor": 5
163 | }
164 |
--------------------------------------------------------------------------------
/10_SharpeRatio.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "1ed30b1e",
6 | "metadata": {},
7 | "source": [
8 | ""
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "550cc5f3",
14 | "metadata": {},
15 | "source": [
16 | "This code downloads historical price data for SPY and AAPL from Yahoo Finance, calculates daily returns, and computes the Sharpe ratio for these returns. It includes a function to determine the Sharpe ratio, adjusting for a daily benchmark return. The code then plots the rolling 30-day Sharpe ratio for AAPL and visualizes the histogram of these Sharpe ratios. Additionally, it compares the rolling 30-day Sharpe ratio of AAPL against SPY and plots the histogram of the differences."
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "id": "dcabe4c7",
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "import yfinance as yf\n",
27 | "import numpy as np"
28 | ]
29 | },
30 | {
31 | "cell_type": "markdown",
32 | "id": "46f6a31c",
33 | "metadata": {},
34 | "source": [
35 | "Download historical price data for SPY and AAPL from Yahoo Finance"
36 | ]
37 | },
38 | {
39 | "cell_type": "code",
40 | "execution_count": null,
41 | "id": "2a83f550",
42 | "metadata": {},
43 | "outputs": [],
44 | "source": [
45 | "data = yf.download([\"SPY\", \"AAPL\"], start=\"2020-01-01\", end=\"2022-07-31\")"
46 | ]
47 | },
48 | {
49 | "cell_type": "markdown",
50 | "id": "2018e200",
51 | "metadata": {},
52 | "source": [
53 | "Extract adjusted closing prices for SPY and AAPL"
54 | ]
55 | },
56 | {
57 | "cell_type": "code",
58 | "execution_count": null,
59 | "id": "c977a8bc",
60 | "metadata": {},
61 | "outputs": [],
62 | "source": [
63 | "closes = data['Adj Close']\n",
64 | "spy_returns = closes.SPY.pct_change().dropna()\n",
65 | "aapl_returns = closes.AAPL.pct_change().dropna()"
66 | ]
67 | },
68 | {
69 | "cell_type": "markdown",
70 | "id": "9741f742",
71 | "metadata": {},
72 | "source": [
73 | "Define a function to calculate the Sharpe ratio of a strategy"
74 | ]
75 | },
76 | {
77 | "cell_type": "code",
78 | "execution_count": null,
79 | "id": "f618f290",
80 | "metadata": {},
81 | "outputs": [],
82 | "source": [
83 | "def sharpe_ratio(returns, adjustment_factor=0.0):\n",
84 | " \"\"\"\n",
85 | " Determines the Sharpe ratio of a strategy.\n",
86 | " \n",
87 | " Parameters\n",
88 | " ----------\n",
89 | " returns : pd.Series or np.ndarray\n",
90 | " Daily returns of the strategy, noncumulative.\n",
91 | " adjustment_factor : int, float\n",
92 | " Constant daily benchmark return throughout the period.\n",
93 | "\n",
94 | " Returns\n",
95 | " -------\n",
96 | " sharpe_ratio : float\n",
97 | "\n",
98 | " Note\n",
99 | " -----\n",
100 | " See https://en.wikipedia.org/wiki/Sharpe_ratio for more details.\n",
101 | " \"\"\"\n",
102 | "\n",
103 | " # Adjust returns by subtracting the benchmark return\n",
104 | "\n",
105 | " returns_risk_adj = returns - adjustment_factor\n",
106 | "\n",
107 | " # Print the annualized standard deviation of the risk-adjusted returns\n",
108 | "\n",
109 | " print(returns_risk_adj.std() * np.sqrt(252))\n",
110 | "\n",
111 | " # Return the annualized Sharpe ratio\n",
112 | "\n",
113 | " return (\n",
114 | " returns_risk_adj.mean() / returns_risk_adj.std()\n",
115 | " ) * np.sqrt(252)"
116 | ]
117 | },
118 | {
119 | "cell_type": "markdown",
120 | "id": "035cea96",
121 | "metadata": {},
122 | "source": [
123 | "Calculate the Sharpe ratio for SPY daily returns"
124 | ]
125 | },
126 | {
127 | "cell_type": "code",
128 | "execution_count": null,
129 | "id": "23dfb39c",
130 | "metadata": {},
131 | "outputs": [],
132 | "source": [
133 | "sharpe_ratio(spy_returns)"
134 | ]
135 | },
136 | {
137 | "cell_type": "markdown",
138 | "id": "19aebd83",
139 | "metadata": {},
140 | "source": [
141 | "Calculate the Sharpe ratio for AAPL daily returns"
142 | ]
143 | },
144 | {
145 | "cell_type": "code",
146 | "execution_count": null,
147 | "id": "12292244",
148 | "metadata": {},
149 | "outputs": [],
150 | "source": [
151 | "sharpe_ratio(aapl_returns)"
152 | ]
153 | },
154 | {
155 | "cell_type": "markdown",
156 | "id": "77252a24",
157 | "metadata": {},
158 | "source": [
159 | "Plot the rolling 30-day Sharpe ratio for AAPL"
160 | ]
161 | },
162 | {
163 | "cell_type": "code",
164 | "execution_count": null,
165 | "id": "75215b9c",
166 | "metadata": {},
167 | "outputs": [],
168 | "source": [
169 | "aapl_returns.rolling(30).apply(sharpe_ratio).plot()"
170 | ]
171 | },
172 | {
173 | "cell_type": "markdown",
174 | "id": "5ac119f9",
175 | "metadata": {},
176 | "source": [
177 | "Plot the histogram of the rolling 30-day Sharpe ratios for AAPL"
178 | ]
179 | },
180 | {
181 | "cell_type": "code",
182 | "execution_count": null,
183 | "id": "bae6269a",
184 | "metadata": {},
185 | "outputs": [],
186 | "source": [
187 | "aapl_returns.rolling(30).apply(sharpe_ratio).hist(bins=50)"
188 | ]
189 | },
190 | {
191 | "cell_type": "markdown",
192 | "id": "3b70d6ff",
193 | "metadata": {},
194 | "source": [
195 | "Compare the rolling 30-day Sharpe ratio of AAPL against SPY and plot the histogram of the differences"
196 | ]
197 | },
198 | {
199 | "cell_type": "code",
200 | "execution_count": null,
201 | "id": "0796ca2e",
202 | "metadata": {},
203 | "outputs": [],
204 | "source": [
205 | "(\n",
206 | " aapl_returns.rolling(30).apply(sharpe_ratio)\n",
207 | " - spy_returns.rolling(30).apply(sharpe_ratio)\n",
208 | ").hist(bins=50)"
209 | ]
210 | },
211 | {
212 | "cell_type": "markdown",
213 | "id": "cef5bf53",
214 | "metadata": {},
215 | "source": [
216 | "PyQuant News is where finance practitioners level up with Python for quant finance, algorithmic trading, and market data analysis. Looking to get started? Check out the fastest growing, top-selling course to get started with Python for quant finance. For educational purposes. Not investment advise. Use at your own risk."
217 | ]
218 | }
219 | ],
220 | "metadata": {
221 | "jupytext": {
222 | "cell_metadata_filter": "-all",
223 | "main_language": "python",
224 | "notebook_metadata_filter": "-all"
225 | },
226 | "kernelspec": {
227 | "display_name": "Python 3 (ipykernel)",
228 | "language": "python",
229 | "name": "python3"
230 | },
231 | "language_info": {
232 | "codemirror_mode": {
233 | "name": "ipython",
234 | "version": 3
235 | },
236 | "file_extension": ".py",
237 | "mimetype": "text/x-python",
238 | "name": "python",
239 | "nbconvert_exporter": "python",
240 | "pygments_lexer": "ipython3",
241 | "version": "3.10.13"
242 | }
243 | },
244 | "nbformat": 4,
245 | "nbformat_minor": 5
246 | }
247 |
--------------------------------------------------------------------------------
/139_0DTELevelsStrategy.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "02a51520",
6 | "metadata": {},
7 | "source": [
8 | ""
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "cf893a63",
14 | "metadata": {},
15 | "source": [
16 | "These libraries help us analyze financial data and create visualizations"
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "id": "8afdfe22",
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "import pandas as pd\n",
27 | "import numpy as np\n",
28 | "import matplotlib.pyplot as plt\n",
29 | "import yfinance as yf"
30 | ]
31 | },
32 | {
33 | "cell_type": "markdown",
34 | "id": "4282946b",
35 | "metadata": {},
36 | "source": [
37 | "### Load and prepare our data"
38 | ]
39 | },
40 | {
41 | "cell_type": "markdown",
42 | "id": "d7192e40",
43 | "metadata": {},
44 | "source": [
45 | "We start by loading our data from a CSV file and preparing it for analysis. The file contains many useful columns with Gamma levels. We'll focus on the 0DTE (immediate expiration) support and resistance levels."
46 | ]
47 | },
48 | {
49 | "cell_type": "code",
50 | "execution_count": null,
51 | "id": "0c483412",
52 | "metadata": {},
53 | "outputs": [],
54 | "source": [
55 | "df = pd.read_csv('spy_levels.csv')\n",
56 | "df['Date'] = pd.to_datetime(df['Date'])\n",
57 | "df.set_index('Date', inplace=True)\n",
58 | "levels = df[[\"Call Resistance 0DTE\", \"Put Support 0DTE\"]]\n",
59 | "levels.columns = [\"resistance\", \"support\"]"
60 | ]
61 | },
62 | {
63 | "cell_type": "code",
64 | "execution_count": null,
65 | "id": "2b45beea",
66 | "metadata": {},
67 | "outputs": [],
68 | "source": [
69 | "spy = yf.download(\"SPY\", start=levels.index.min(), end=levels.index.max()).Close\n",
70 | "levels = levels.copy()\n",
71 | "levels.loc[:, \"spy\"] = spy\n",
72 | "levels[[\"resistance\", \"support\"]] = levels[[\"resistance\", \"support\"]].shift()\n",
73 | "levels.dropna(inplace=True)"
74 | ]
75 | },
76 | {
77 | "cell_type": "markdown",
78 | "id": "ead26499",
79 | "metadata": {},
80 | "source": [
81 | "We load our data from a CSV file and convert the 'Date' column to datetime. We set the 'Date' as the index and select specific columns for our analysis. We then download SPY (S&P 500 ETF) closing prices using yfinance and add them to our dataset. Because the signals generated apply to the following trading day, we shift them one day forward. Finally, we remove any rows with missing data to ensure our analysis is based on complete information."
82 | ]
83 | },
84 | {
85 | "cell_type": "markdown",
86 | "id": "78ec4be9",
87 | "metadata": {},
88 | "source": [
89 | "### Implement our trading strategy"
90 | ]
91 | },
92 | {
93 | "cell_type": "markdown",
94 | "id": "adc3ca84",
95 | "metadata": {},
96 | "source": [
97 | "Next, we define our trading strategy based on support and resistance levels."
98 | ]
99 | },
100 | {
101 | "cell_type": "code",
102 | "execution_count": null,
103 | "id": "24148f51",
104 | "metadata": {},
105 | "outputs": [],
106 | "source": [
107 | "strategy = levels.copy()\n",
108 | "strategy[\"position\"] = 0"
109 | ]
110 | },
111 | {
112 | "cell_type": "code",
113 | "execution_count": null,
114 | "id": "5b0f8486",
115 | "metadata": {},
116 | "outputs": [],
117 | "source": [
118 | "strategy.loc[strategy.spy >= strategy.resistance, 'position'] = -1\n",
119 | "strategy.loc[strategy.spy <= strategy.support, 'position'] = 1"
120 | ]
121 | },
122 | {
123 | "cell_type": "code",
124 | "execution_count": null,
125 | "id": "864c442b",
126 | "metadata": {},
127 | "outputs": [],
128 | "source": [
129 | "strategy.position = (\n",
130 | " strategy\n",
131 | " .position\n",
132 | " .replace(0, np.nan)\n",
133 | " .ffill()\n",
134 | " .fillna(0)\n",
135 | ")"
136 | ]
137 | },
138 | {
139 | "cell_type": "code",
140 | "execution_count": null,
141 | "id": "e3fc39e2",
142 | "metadata": {},
143 | "outputs": [],
144 | "source": [
145 | "strategy[\"spy_returns\"] = strategy.spy.pct_change()\n",
146 | "strategy[\"strategy_returns\"] = strategy.spy_returns * strategy.position.shift(1)"
147 | ]
148 | },
149 | {
150 | "cell_type": "markdown",
151 | "id": "b3e4e721",
152 | "metadata": {},
153 | "source": [
154 | "We create a copy of our data and add a 'position' column. We set our position to -1 (sell) when SPY price is above resistance and 1 (buy) when it's below support. We then forward-fill our positions to maintain them until the next signal. Finally, we calculate the returns for SPY and our strategy by multiplying SPY returns with our previous day's position."
155 | ]
156 | },
157 | {
158 | "cell_type": "markdown",
159 | "id": "1ca3ce8f",
160 | "metadata": {},
161 | "source": [
162 | "### Evaluate our strategy performance"
163 | ]
164 | },
165 | {
166 | "cell_type": "markdown",
167 | "id": "b04b4b54",
168 | "metadata": {},
169 | "source": [
170 | "Finally, we calculate cumulative returns and visualize our strategy's performance."
171 | ]
172 | },
173 | {
174 | "cell_type": "code",
175 | "execution_count": null,
176 | "id": "c6d1c2b7",
177 | "metadata": {},
178 | "outputs": [],
179 | "source": [
180 | "strategy['cumulative_returns'] = (1 + strategy.strategy_returns).cumprod()\n",
181 | "np.sqrt(252) * strategy.strategy_returns.mean() / strategy.strategy_returns.std()"
182 | ]
183 | },
184 | {
185 | "cell_type": "code",
186 | "execution_count": null,
187 | "id": "7319ae37",
188 | "metadata": {},
189 | "outputs": [],
190 | "source": [
191 | "plt.figure(figsize=(12, 6))\n",
192 | "plt.plot(strategy.cumulative_returns, label='Strategy')\n",
193 | "plt.plot((1 + strategy.spy_returns).cumprod(), label='Buy and Hold')\n",
194 | "plt.legend()\n",
195 | "plt.title('Strategy Performance vs Buy and Hold')\n",
196 | "plt.ylabel('Cumulative Returns')\n",
197 | "plt.show()"
198 | ]
199 | },
200 | {
201 | "cell_type": "markdown",
202 | "id": "25ada3a2",
203 | "metadata": {},
204 | "source": [
205 | "We calculate the cumulative returns for our strategy and compute its Sharpe ratio. We then create a plot comparing our strategy's performance to a simple buy-and-hold approach. This visualization helps us understand how well our strategy performs compared to passive investing in the S&P 500."
206 | ]
207 | },
208 | {
209 | "cell_type": "markdown",
210 | "id": "f13d1c6b",
211 | "metadata": {},
212 | "source": [
213 | "PyQuant News is where finance practitioners level up with Python for quant finance, algorithmic trading, and market data analysis. Looking to get started? Check out the fastest growing, top-selling course to get started with Python for quant finance. For educational purposes. Not investment advise. Use at your own risk."
214 | ]
215 | }
216 | ],
217 | "metadata": {
218 | "jupytext": {
219 | "cell_metadata_filter": "-all",
220 | "main_language": "python",
221 | "notebook_metadata_filter": "-all"
222 | },
223 | "kernelspec": {
224 | "display_name": "Python 3 (ipykernel)",
225 | "language": "python",
226 | "name": "python3"
227 | },
228 | "language_info": {
229 | "codemirror_mode": {
230 | "name": "ipython",
231 | "version": 3
232 | },
233 | "file_extension": ".py",
234 | "mimetype": "text/x-python",
235 | "name": "python",
236 | "nbconvert_exporter": "python",
237 | "pygments_lexer": "ipython3",
238 | "version": "3.10.13"
239 | }
240 | },
241 | "nbformat": 4,
242 | "nbformat_minor": 5
243 | }
244 |
--------------------------------------------------------------------------------
/13_SortinoRatio.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "e8e4c152",
6 | "metadata": {},
7 | "source": [
8 | "
"
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "a57f8b45",
14 | "metadata": {},
15 | "source": [
16 | "This code downloads historical stock data for SPY and AAPL from Yahoo Finance and calculates daily returns. It then computes the Sortino ratio, a risk-adjusted performance metric, for these returns. The Sortino ratio focuses on downside risk, providing a better measure for evaluating strategies with asymmetric risk profiles. The code also visualizes the rolling Sortino ratio and its distribution for AAPL. This is useful for performance analysis and comparison of different investment strategies."
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "id": "8a9c9a42",
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "import yfinance as yf\n",
27 | "import numpy as np"
28 | ]
29 | },
30 | {
31 | "cell_type": "markdown",
32 | "id": "aac23142",
33 | "metadata": {},
34 | "source": [
35 | "Download historical adjusted closing prices for SPY and AAPL from Yahoo Finance"
36 | ]
37 | },
38 | {
39 | "cell_type": "code",
40 | "execution_count": null,
41 | "id": "e3627e86",
42 | "metadata": {},
43 | "outputs": [],
44 | "source": [
45 | "data = yf.download([\"SPY\", \"AAPL\"], start=\"2020-01-01\", end=\"2022-07-31\")"
46 | ]
47 | },
48 | {
49 | "cell_type": "markdown",
50 | "id": "778fee89",
51 | "metadata": {},
52 | "source": [
53 | "Extract adjusted closing prices and calculate daily returns for SPY and AAPL"
54 | ]
55 | },
56 | {
57 | "cell_type": "code",
58 | "execution_count": null,
59 | "id": "41003991",
60 | "metadata": {
61 | "lines_to_next_cell": 1
62 | },
63 | "outputs": [],
64 | "source": [
65 | "closes = data['Adj Close']\n",
66 | "spy_returns = closes.SPY.pct_change().dropna()\n",
67 | "aapl_returns = closes.AAPL.pct_change().dropna()"
68 | ]
69 | },
70 | {
71 | "cell_type": "markdown",
72 | "id": "dbb043d7",
73 | "metadata": {},
74 | "source": [
75 | "Define a function to calculate the Sortino ratio for a series of returns"
76 | ]
77 | },
78 | {
79 | "cell_type": "code",
80 | "execution_count": null,
81 | "id": "fefe6265",
82 | "metadata": {
83 | "lines_to_next_cell": 1
84 | },
85 | "outputs": [],
86 | "source": [
87 | "def sortino_ratio(returns, adjustment_factor=0.0):\n",
88 | " \"\"\"\n",
89 | " Determines the Sortino ratio of a strategy.\n",
90 | " \n",
91 | " Parameters\n",
92 | " ----------\n",
93 | " returns : pd.Series or np.ndarray\n",
94 | " Daily returns of the strategy, noncumulative.\n",
95 | " adjustment_factor : int, float\n",
96 | " Constant daily benchmark return throughout the period.\n",
97 | "\n",
98 | " Returns\n",
99 | " -------\n",
100 | " sortino_ratio : float\n",
101 | "\n",
102 | " Note\n",
103 | " -----\n",
104 | " See ``__ for more details.\n",
106 | " \"\"\"\n",
107 | "\n",
108 | " # Adjust returns by subtracting the adjustment factor and compute average annual return\n",
109 | " returns_risk_adj = np.asanyarray(returns - adjustment_factor)\n",
110 | " average_annual_return = returns_risk_adj.mean() * 252\n",
111 | "\n",
112 | " # Compute downside deviation by considering only negative deviations\n",
113 | " downside_diff = np.clip(returns_risk_adj, np.NINF, 0)\n",
114 | " np.square(downside_diff, out=downside_diff)\n",
115 | " annualized_downside_deviation = np.sqrt(downside_diff.mean()) * np.sqrt(252)\n",
116 | "\n",
117 | " # Calculate and return the Sortino ratio\n",
118 | " return average_annual_return / annualized_downside_deviation"
119 | ]
120 | },
121 | {
122 | "cell_type": "markdown",
123 | "id": "334d33ea",
124 | "metadata": {},
125 | "source": [
126 | "Calculate the Sortino ratio for SPY's daily returns"
127 | ]
128 | },
129 | {
130 | "cell_type": "code",
131 | "execution_count": null,
132 | "id": "0c281e5e",
133 | "metadata": {},
134 | "outputs": [],
135 | "source": [
136 | "sortino_ratio(spy_returns)"
137 | ]
138 | },
139 | {
140 | "cell_type": "markdown",
141 | "id": "cd306508",
142 | "metadata": {},
143 | "source": [
144 | "Calculate the Sortino ratio for AAPL's daily returns"
145 | ]
146 | },
147 | {
148 | "cell_type": "code",
149 | "execution_count": null,
150 | "id": "a6d8b4a1",
151 | "metadata": {},
152 | "outputs": [],
153 | "source": [
154 | "sortino_ratio(aapl_returns)"
155 | ]
156 | },
157 | {
158 | "cell_type": "markdown",
159 | "id": "ea3d422f",
160 | "metadata": {},
161 | "source": [
162 | "Plot the rolling 30-day Sortino ratio for AAPL's returns"
163 | ]
164 | },
165 | {
166 | "cell_type": "code",
167 | "execution_count": null,
168 | "id": "b86b1ba0",
169 | "metadata": {},
170 | "outputs": [],
171 | "source": [
172 | "aapl_returns.rolling(30).apply(sortino_ratio).plot()"
173 | ]
174 | },
175 | {
176 | "cell_type": "markdown",
177 | "id": "bf0f5718",
178 | "metadata": {},
179 | "source": [
180 | "Plot the histogram of the rolling 30-day Sortino ratio for AAPL's returns"
181 | ]
182 | },
183 | {
184 | "cell_type": "code",
185 | "execution_count": null,
186 | "id": "dcf47150",
187 | "metadata": {},
188 | "outputs": [],
189 | "source": [
190 | "aapl_returns.rolling(30).apply(sortino_ratio).hist(bins=50)"
191 | ]
192 | },
193 | {
194 | "cell_type": "markdown",
195 | "id": "8eaa8c01",
196 | "metadata": {},
197 | "source": [
198 | "Plot the histogram of the difference between rolling 30-day Sortino ratios of AAPL and SPY"
199 | ]
200 | },
201 | {
202 | "cell_type": "code",
203 | "execution_count": null,
204 | "id": "3f3b8abb",
205 | "metadata": {},
206 | "outputs": [],
207 | "source": [
208 | "(\n",
209 | " aapl_returns.rolling(30).apply(sortino_ratio)\n",
210 | " - spy_returns.rolling(30).apply(sortino_ratio)\n",
211 | ").hist(bins=50)"
212 | ]
213 | },
214 | {
215 | "cell_type": "markdown",
216 | "id": "7ffcdc8a",
217 | "metadata": {},
218 | "source": [
219 | "Calculate the Sortino ratio for SPY's daily returns again"
220 | ]
221 | },
222 | {
223 | "cell_type": "code",
224 | "execution_count": null,
225 | "id": "221fcbf5",
226 | "metadata": {},
227 | "outputs": [],
228 | "source": [
229 | "sortino_ratio(spy_returns)"
230 | ]
231 | },
232 | {
233 | "cell_type": "markdown",
234 | "id": "ac229400",
235 | "metadata": {},
236 | "source": [
237 | "PyQuant News is where finance practitioners level up with Python for quant finance, algorithmic trading, and market data analysis. Looking to get started? Check out the fastest growing, top-selling course to get started with Python for quant finance. For educational purposes. Not investment advise. Use at your own risk."
238 | ]
239 | }
240 | ],
241 | "metadata": {
242 | "jupytext": {
243 | "cell_metadata_filter": "-all",
244 | "main_language": "python",
245 | "notebook_metadata_filter": "-all"
246 | }
247 | },
248 | "nbformat": 4,
249 | "nbformat_minor": 5
250 | }
251 |
--------------------------------------------------------------------------------
/145_TailRatio.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "0d4a1d64",
6 | "metadata": {},
7 | "source": [
8 | ""
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "0d75fc0a",
14 | "metadata": {},
15 | "source": [
16 | "These libraries help us download financial data, analyze it, and create visualizations"
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "id": "4c166733",
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "import matplotlib.pyplot as plt\n",
27 | "import numpy as np\n",
28 | "import pandas as pd\n",
29 | "import yfinance as yf"
30 | ]
31 | },
32 | {
33 | "cell_type": "markdown",
34 | "id": "3a841646",
35 | "metadata": {},
36 | "source": [
37 | "We download stock data for NVDA and AMD, calculate daily returns, and prepare it for analysis"
38 | ]
39 | },
40 | {
41 | "cell_type": "code",
42 | "execution_count": null,
43 | "id": "c0d3a0f4",
44 | "metadata": {
45 | "lines_to_next_cell": 1
46 | },
47 | "outputs": [],
48 | "source": [
49 | "df = (\n",
50 | " yf.download([\"NVDA\", \"AMD\"], start=\"2020-01-01\")\n",
51 | " .Close.pct_change(fill_method=None)\n",
52 | " .dropna()\n",
53 | ")"
54 | ]
55 | },
56 | {
57 | "cell_type": "markdown",
58 | "id": "5748e2c2",
59 | "metadata": {},
60 | "source": [
61 | "This code fetches the closing prices for NVIDIA and AMD stocks from January 1, 2020, to the present. It then calculates the daily percentage returns and removes any rows with missing data. The resulting dataframe contains the daily returns for both stocks, which we'll use for our analysis."
62 | ]
63 | },
64 | {
65 | "cell_type": "markdown",
66 | "id": "7ac3f2a8",
67 | "metadata": {},
68 | "source": [
69 | "### Calculate tail ratios"
70 | ]
71 | },
72 | {
73 | "cell_type": "markdown",
74 | "id": "5fdc085f",
75 | "metadata": {},
76 | "source": [
77 | "We define a function to compute the tail ratio and apply it to our stock data"
78 | ]
79 | },
80 | {
81 | "cell_type": "code",
82 | "execution_count": null,
83 | "id": "a1dcdb36",
84 | "metadata": {
85 | "lines_to_next_cell": 1
86 | },
87 | "outputs": [],
88 | "source": [
89 | "def tail_ratio(returns):\n",
90 | " return abs(np.percentile(returns, 95)) / abs(np.percentile(returns, 5))"
91 | ]
92 | },
93 | {
94 | "cell_type": "code",
95 | "execution_count": null,
96 | "id": "51f7d526",
97 | "metadata": {},
98 | "outputs": [],
99 | "source": [
100 | "tail_ratio_a = tail_ratio(df.AMD)\n",
101 | "tail_ratio_b = tail_ratio(df.NVDA)"
102 | ]
103 | },
104 | {
105 | "cell_type": "code",
106 | "execution_count": null,
107 | "id": "67322d51",
108 | "metadata": {},
109 | "outputs": [],
110 | "source": [
111 | "print(f\"Tail Ratio for AMD: {tail_ratio_a:.4f}\")\n",
112 | "print(f\"Tail Ratio for NVDA: {tail_ratio_b:.4f}\")"
113 | ]
114 | },
115 | {
116 | "cell_type": "markdown",
117 | "id": "5f0319c3",
118 | "metadata": {},
119 | "source": [
120 | "The tail ratio is a measure of the relative size of the positive and negative tails of a distribution. Our function calculates this by dividing the absolute value of the 95th percentile by the absolute value of the 5th percentile. We then apply this function to both AMD and NVIDIA returns and print the results. A higher tail ratio suggests more extreme positive returns relative to negative ones."
121 | ]
122 | },
123 | {
124 | "cell_type": "markdown",
125 | "id": "8945b903",
126 | "metadata": {},
127 | "source": [
128 | "### Visualize return distributions"
129 | ]
130 | },
131 | {
132 | "cell_type": "markdown",
133 | "id": "5175a9a1",
134 | "metadata": {},
135 | "source": [
136 | "We create a histogram to compare the return distributions of AMD and NVIDIA"
137 | ]
138 | },
139 | {
140 | "cell_type": "code",
141 | "execution_count": null,
142 | "id": "fcd17588",
143 | "metadata": {},
144 | "outputs": [],
145 | "source": [
146 | "plt.figure(figsize=(10, 6))\n",
147 | "plt.hist(df.AMD, bins=50, alpha=0.5, label=\"AMD\")\n",
148 | "plt.hist(df.NVDA, bins=50, alpha=0.5, label=\"NVDA\")\n",
149 | "plt.axvline(np.percentile(df.AMD, 5), color=\"r\", linestyle=\"dashed\", linewidth=2)\n",
150 | "plt.axvline(np.percentile(df.AMD, 95), color=\"r\", linestyle=\"dashed\", linewidth=2)\n",
151 | "plt.axvline(np.percentile(df.NVDA, 5), color=\"g\", linestyle=\"dashed\", linewidth=2)\n",
152 | "plt.axvline(np.percentile(df.NVDA, 95), color=\"g\", linestyle=\"dashed\", linewidth=2)\n",
153 | "plt.legend()\n",
154 | "plt.title(\"Return Distributions with 5th and 95th Percentiles\")\n",
155 | "plt.xlabel(\"Returns\")\n",
156 | "plt.ylabel(\"Frequency\")\n",
157 | "plt.show()"
158 | ]
159 | },
160 | {
161 | "cell_type": "markdown",
162 | "id": "8faffbc1",
163 | "metadata": {},
164 | "source": [
165 | "This visualization creates overlapping histograms of the daily returns for AMD and NVIDIA. We use 50 bins to show the distribution of returns for each stock. The 5th and 95th percentiles are marked with dashed lines for each stock. This helps us visually compare the return distributions and see how the tail ratios we calculated earlier relate to the actual data."
166 | ]
167 | },
168 | {
169 | "cell_type": "markdown",
170 | "id": "4f631a94",
171 | "metadata": {},
172 | "source": [
173 | "PyQuant News is where finance practitioners level up with Python for quant finance, algorithmic trading, and market data analysis. Looking to get started? Check out the fastest growing, top-selling course to get started with Python for quant finance. For educational purposes. Not investment advise. Use at your own risk."
174 | ]
175 | }
176 | ],
177 | "metadata": {
178 | "jupytext": {
179 | "cell_metadata_filter": "-all",
180 | "main_language": "python",
181 | "notebook_metadata_filter": "-all"
182 | },
183 | "kernelspec": {
184 | "display_name": "Python 3 (ipykernel)",
185 | "language": "python",
186 | "name": "python3"
187 | },
188 | "language_info": {
189 | "codemirror_mode": {
190 | "name": "ipython",
191 | "version": 3
192 | },
193 | "file_extension": ".py",
194 | "mimetype": "text/x-python",
195 | "name": "python",
196 | "nbconvert_exporter": "python",
197 | "pygments_lexer": "ipython3",
198 | "version": "3.10.13"
199 | }
200 | },
201 | "nbformat": 4,
202 | "nbformat_minor": 5
203 | }
204 |
--------------------------------------------------------------------------------
/146_SkewKurtosis.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "07ba76ac",
6 | "metadata": {},
7 | "source": [
8 | ""
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "c79ba47f",
14 | "metadata": {},
15 | "source": [
16 | "These libraries help us fetch financial data, manipulate it, and visualize the results"
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "id": "b0d09dc9",
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "import yfinance as yf\n",
27 | "import pandas as pd\n",
28 | "import numpy as np\n",
29 | "import matplotlib.pyplot as plt\n",
30 | "from scipy import stats"
31 | ]
32 | },
33 | {
34 | "cell_type": "markdown",
35 | "id": "8fe8a9b7",
36 | "metadata": {},
37 | "source": [
38 | "### Fetch and prepare SPY data"
39 | ]
40 | },
41 | {
42 | "cell_type": "markdown",
43 | "id": "3de0ae8e",
44 | "metadata": {},
45 | "source": [
46 | "We'll get SPY data for the last 5 years and calculate its daily returns."
47 | ]
48 | },
49 | {
50 | "cell_type": "code",
51 | "execution_count": null,
52 | "id": "effe5db9",
53 | "metadata": {},
54 | "outputs": [],
55 | "source": [
56 | "spy = yf.Ticker(\"SPY\")\n",
57 | "data = spy.history(period=\"5y\")\n",
58 | "returns = data['Close'].pct_change().dropna()"
59 | ]
60 | },
61 | {
62 | "cell_type": "markdown",
63 | "id": "7c971e77",
64 | "metadata": {},
65 | "source": [
66 | "We use yfinance to download SPY stock data for the past 5 years. Then, we calculate the daily percentage change in closing prices. This gives us the daily returns. We remove any missing values to ensure our data is clean and ready for analysis."
67 | ]
68 | },
69 | {
70 | "cell_type": "markdown",
71 | "id": "f1a494d9",
72 | "metadata": {},
73 | "source": [
74 | "### Calculate distribution statistics"
75 | ]
76 | },
77 | {
78 | "cell_type": "markdown",
79 | "id": "bffd7af6",
80 | "metadata": {},
81 | "source": [
82 | "Now we'll compute the skewness and kurtosis of the returns."
83 | ]
84 | },
85 | {
86 | "cell_type": "code",
87 | "execution_count": null,
88 | "id": "d29ddc33",
89 | "metadata": {},
90 | "outputs": [],
91 | "source": [
92 | "skewness = returns.skew()\n",
93 | "kurtosis = returns.kurtosis()"
94 | ]
95 | },
96 | {
97 | "cell_type": "code",
98 | "execution_count": null,
99 | "id": "d952d703",
100 | "metadata": {},
101 | "outputs": [],
102 | "source": [
103 | "print(f\"Skewness: {skewness:.4f}\")\n",
104 | "print(f\"Kurtosis: {kurtosis:.4f}\")"
105 | ]
106 | },
107 | {
108 | "cell_type": "markdown",
109 | "id": "c01fb8e0",
110 | "metadata": {},
111 | "source": [
112 | "We calculate two important measures of the returns distribution. Skewness tells us about the symmetry of the distribution. Kurtosis gives us information about the tails of the distribution. These values help us understand how the returns are distributed compared to a normal distribution."
113 | ]
114 | },
115 | {
116 | "cell_type": "markdown",
117 | "id": "22b4c641",
118 | "metadata": {},
119 | "source": [
120 | "### Visualize the distribution"
121 | ]
122 | },
123 | {
124 | "cell_type": "markdown",
125 | "id": "7af0cb80",
126 | "metadata": {},
127 | "source": [
128 | "Let's create a histogram of the returns and overlay a normal distribution for comparison."
129 | ]
130 | },
131 | {
132 | "cell_type": "code",
133 | "execution_count": null,
134 | "id": "671ea1f6",
135 | "metadata": {},
136 | "outputs": [],
137 | "source": [
138 | "plt.figure(figsize=(10, 6))\n",
139 | "returns.hist(bins=50, density=True, alpha=0.7)\n",
140 | "xmin, xmax = plt.xlim()\n",
141 | "x = np.linspace(xmin, xmax, 100)\n",
142 | "p = stats.norm.pdf(x, returns.mean(), returns.std())\n",
143 | "plt.plot(x, p, 'k', linewidth=2)\n",
144 | "plt.title(\"SPY Daily Returns Distribution\")\n",
145 | "plt.xlabel(\"Returns\")\n",
146 | "plt.ylabel(\"Frequency\")\n",
147 | "plt.show()"
148 | ]
149 | },
150 | {
151 | "cell_type": "markdown",
152 | "id": "fa7b4bf5",
153 | "metadata": {},
154 | "source": [
155 | "We create a histogram of the returns to visually represent their distribution. We set the number of bins to 50 for a detailed view. We also calculate and plot a normal distribution curve using the mean and standard deviation of our returns. This allows us to compare our actual returns distribution to what a normal distribution would look like."
156 | ]
157 | },
158 | {
159 | "cell_type": "markdown",
160 | "id": "eb689e9c",
161 | "metadata": {},
162 | "source": [
163 | "PyQuant News is where finance practitioners level up with Python for quant finance, algorithmic trading, and market data analysis. Looking to get started? Check out the fastest growing, top-selling course to get started with Python for quant finance. For educational purposes. Not investment advise. Use at your own risk."
164 | ]
165 | }
166 | ],
167 | "metadata": {
168 | "jupytext": {
169 | "cell_metadata_filter": "-all",
170 | "main_language": "python",
171 | "notebook_metadata_filter": "-all"
172 | },
173 | "kernelspec": {
174 | "display_name": "Python 3 (ipykernel)",
175 | "language": "python",
176 | "name": "python3"
177 | },
178 | "language_info": {
179 | "codemirror_mode": {
180 | "name": "ipython",
181 | "version": 3
182 | },
183 | "file_extension": ".py",
184 | "mimetype": "text/x-python",
185 | "name": "python",
186 | "nbconvert_exporter": "python",
187 | "pygments_lexer": "ipython3",
188 | "version": "3.10.13"
189 | }
190 | },
191 | "nbformat": 4,
192 | "nbformat_minor": 5
193 | }
194 |
--------------------------------------------------------------------------------
/148_OptionsSkew.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "32e8250e",
6 | "metadata": {},
7 | "source": [
8 | ""
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "4253164f",
14 | "metadata": {},
15 | "source": [
16 | "These libraries help us fetch stock data, analyze options, and visualize results"
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "id": "f7798a7a",
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "import numpy as np\n",
27 | "import pandas as pd\n",
28 | "import yfinance as yf\n",
29 | "from matplotlib import pyplot as plt"
30 | ]
31 | },
32 | {
33 | "cell_type": "markdown",
34 | "id": "38ac5ded",
35 | "metadata": {},
36 | "source": [
37 | "### Fetch option data for PLTR"
38 | ]
39 | },
40 | {
41 | "cell_type": "markdown",
42 | "id": "c4c8160a",
43 | "metadata": {},
44 | "source": [
45 | "We'll start by getting the options chain for Palantir Technologies (PLTR) with an expiration date of July 18, 2025."
46 | ]
47 | },
48 | {
49 | "cell_type": "code",
50 | "execution_count": null,
51 | "id": "5cd1367a",
52 | "metadata": {},
53 | "outputs": [],
54 | "source": [
55 | "ticker = \"PLTR\"\n",
56 | "stock = yf.Ticker(ticker)\n",
57 | "options_chain = stock.option_chain(\"2025-07-18\")"
58 | ]
59 | },
60 | {
61 | "cell_type": "code",
62 | "execution_count": null,
63 | "id": "909ed931",
64 | "metadata": {
65 | "lines_to_next_cell": 1
66 | },
67 | "outputs": [],
68 | "source": [
69 | "calls = options_chain.calls\n",
70 | "puts = options_chain.puts"
71 | ]
72 | },
73 | {
74 | "cell_type": "markdown",
75 | "id": "ccc58444",
76 | "metadata": {},
77 | "source": [
78 | "We use the yfinance library to fetch option data for PLTR. We create a Ticker object for PLTR and request its option chain for the specified date. The data is split into two DataFrames: one for call options and another for put options."
79 | ]
80 | },
81 | {
82 | "cell_type": "markdown",
83 | "id": "42da7e5e",
84 | "metadata": {},
85 | "source": [
86 | "### Clean and process option data"
87 | ]
88 | },
89 | {
90 | "cell_type": "markdown",
91 | "id": "5cbbea20",
92 | "metadata": {},
93 | "source": [
94 | "Now we'll define functions to clean our data and build an implied volatility grid."
95 | ]
96 | },
97 | {
98 | "cell_type": "code",
99 | "execution_count": null,
100 | "id": "003d4c50",
101 | "metadata": {},
102 | "outputs": [],
103 | "source": [
104 | "def clean(df):\n",
105 | " cols = [\"strike\", \"impliedVolatility\"]\n",
106 | " return (\n",
107 | " df[cols]\n",
108 | " .dropna(subset=[\"impliedVolatility\"])\n",
109 | " .drop_duplicates(\"strike\")\n",
110 | " .astype({\"strike\": float, \"impliedVolatility\": float})\n",
111 | " .sort_values(\"strike\")\n",
112 | " .set_index(\"strike\")\n",
113 | " )"
114 | ]
115 | },
116 | {
117 | "cell_type": "code",
118 | "execution_count": null,
119 | "id": "dc55ae3b",
120 | "metadata": {
121 | "lines_to_next_cell": 1
122 | },
123 | "outputs": [],
124 | "source": [
125 | "def build_iv_grid(df):\n",
126 | " df_iv = clean(df)\n",
127 | " strike_grid = np.arange(df_iv.index.min(), df_iv.index.max() + 1, 1.0)\n",
128 | " return (\n",
129 | " df_iv.reindex(strike_grid)\n",
130 | " .interpolate(method=\"linear\")\n",
131 | " .rename_axis(\"strike\")\n",
132 | " .reset_index()\n",
133 | " )"
134 | ]
135 | },
136 | {
137 | "cell_type": "markdown",
138 | "id": "42583d58",
139 | "metadata": {},
140 | "source": [
141 | "We define two functions: clean and build_iv_grid. The clean function filters and formats our data, keeping only the strike price and implied volatility columns. The build_iv_grid function creates a uniform grid of strike prices and interpolates the implied volatility values. This helps us create a smooth volatility curve."
142 | ]
143 | },
144 | {
145 | "cell_type": "markdown",
146 | "id": "ee2d1545",
147 | "metadata": {},
148 | "source": [
149 | "### Plot volatility skew for calls and puts"
150 | ]
151 | },
152 | {
153 | "cell_type": "markdown",
154 | "id": "26959c4b",
155 | "metadata": {},
156 | "source": [
157 | "Finally, we'll create and plot the volatility skew for both call and put options."
158 | ]
159 | },
160 | {
161 | "cell_type": "code",
162 | "execution_count": null,
163 | "id": "94de21ea",
164 | "metadata": {},
165 | "outputs": [],
166 | "source": [
167 | "call_skew = build_iv_grid(calls)"
168 | ]
169 | },
170 | {
171 | "cell_type": "code",
172 | "execution_count": null,
173 | "id": "4a09ac25",
174 | "metadata": {},
175 | "outputs": [],
176 | "source": [
177 | "plt.plot(call_skew.strike, call_skew.impliedVolatility)"
178 | ]
179 | },
180 | {
181 | "cell_type": "code",
182 | "execution_count": null,
183 | "id": "1e77dc57",
184 | "metadata": {},
185 | "outputs": [],
186 | "source": [
187 | "put_skew = build_iv_grid(puts)"
188 | ]
189 | },
190 | {
191 | "cell_type": "code",
192 | "execution_count": null,
193 | "id": "1ecd82a7",
194 | "metadata": {},
195 | "outputs": [],
196 | "source": [
197 | "plt.plot(put_skew.strike, put_skew.impliedVolatility)"
198 | ]
199 | },
200 | {
201 | "cell_type": "markdown",
202 | "id": "f22ed644",
203 | "metadata": {},
204 | "source": [
205 | "We apply our build_iv_grid function to both the call and put options data. This gives us a smooth curve of implied volatilities across different strike prices. We then plot these curves using matplotlib. The resulting graph shows the volatility skew for both call and put options, allowing us to visualize how implied volatility changes with the strike price."
206 | ]
207 | },
208 | {
209 | "cell_type": "markdown",
210 | "id": "840e6746",
211 | "metadata": {},
212 | "source": [
213 | "PyQuant News is where finance practitioners level up with Python for quant finance, algorithmic trading, and market data analysis. Looking to get started? Check out the fastest growing, top-selling course to get started with Python for quant finance. For educational purposes. Not investment advise. Use at your own risk."
214 | ]
215 | }
216 | ],
217 | "metadata": {
218 | "jupytext": {
219 | "cell_metadata_filter": "-all",
220 | "main_language": "python",
221 | "notebook_metadata_filter": "-all"
222 | },
223 | "kernelspec": {
224 | "display_name": "Python 3 (ipykernel)",
225 | "language": "python",
226 | "name": "python3"
227 | },
228 | "language_info": {
229 | "codemirror_mode": {
230 | "name": "ipython",
231 | "version": 3
232 | },
233 | "file_extension": ".py",
234 | "mimetype": "text/x-python",
235 | "name": "python",
236 | "nbconvert_exporter": "python",
237 | "pygments_lexer": "ipython3",
238 | "version": "3.10.13"
239 | }
240 | },
241 | "nbformat": 4,
242 | "nbformat_minor": 5
243 | }
244 |
--------------------------------------------------------------------------------
/149_VolatilityTermStructure.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "ce13aec3",
6 | "metadata": {},
7 | "source": [
8 | ""
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "2cb060e3",
14 | "metadata": {},
15 | "source": [
16 | "These libraries help us fetch financial data, manipulate it, and create visualizations"
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "id": "1caa019d",
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "import numpy as np\n",
27 | "import pandas as pd\n",
28 | "from pandas.tseries.offsets import WeekOfMonth\n",
29 | "import yfinance as yf\n",
30 | "from matplotlib import pyplot as plt"
31 | ]
32 | },
33 | {
34 | "cell_type": "markdown",
35 | "id": "db21c2c7",
36 | "metadata": {},
37 | "source": [
38 | "### Fetch option data for our stock"
39 | ]
40 | },
41 | {
42 | "cell_type": "markdown",
43 | "id": "0794f098",
44 | "metadata": {},
45 | "source": [
46 | "We'll gather option data for a specific stock ticker and organize it into a dataframe."
47 | ]
48 | },
49 | {
50 | "cell_type": "code",
51 | "execution_count": null,
52 | "id": "98f4d380",
53 | "metadata": {},
54 | "outputs": [],
55 | "source": [
56 | "ticker = \"SPY\"\n",
57 | "stock = yf.Ticker(ticker)\n",
58 | "expirations = stock.options"
59 | ]
60 | },
61 | {
62 | "cell_type": "code",
63 | "execution_count": null,
64 | "id": "fafcfbb1",
65 | "metadata": {},
66 | "outputs": [],
67 | "source": [
68 | "all_options = []\n",
69 | "for dt in expirations:\n",
70 | " options = stock.option_chain(dt)\n",
71 | " calls = options.calls\n",
72 | " calls['expirationDate'] = dt\n",
73 | " all_options.append(calls)"
74 | ]
75 | },
76 | {
77 | "cell_type": "code",
78 | "execution_count": null,
79 | "id": "73a99b8e",
80 | "metadata": {},
81 | "outputs": [],
82 | "source": [
83 | "options_df = pd.concat(all_options)\n",
84 | "options_df['daysToExpiration'] = (pd.to_datetime(options_df['expirationDate']) - pd.Timestamp.now()).dt.days"
85 | ]
86 | },
87 | {
88 | "cell_type": "markdown",
89 | "id": "b877c9b2",
90 | "metadata": {},
91 | "source": [
92 | "We start by defining our stock ticker as \"SPY\" and use yfinance to fetch its data. We then loop through all available expiration dates, collecting call option data for each. We combine this data into a single dataframe and calculate the days to expiration for each option."
93 | ]
94 | },
95 | {
96 | "cell_type": "markdown",
97 | "id": "835e069a",
98 | "metadata": {},
99 | "source": [
100 | "### Filter and clean our option data"
101 | ]
102 | },
103 | {
104 | "cell_type": "markdown",
105 | "id": "02c32b33",
106 | "metadata": {},
107 | "source": [
108 | "Now we'll filter our options to a specific strike price and clean up the data."
109 | ]
110 | },
111 | {
112 | "cell_type": "code",
113 | "execution_count": null,
114 | "id": "03d85acf",
115 | "metadata": {
116 | "lines_to_next_cell": 1
117 | },
118 | "outputs": [],
119 | "source": [
120 | "strike = 590\n",
121 | "filtered_options = options_df[options_df['strike'] == strike]"
122 | ]
123 | },
124 | {
125 | "cell_type": "code",
126 | "execution_count": null,
127 | "id": "d303caa8",
128 | "metadata": {
129 | "lines_to_next_cell": 1
130 | },
131 | "outputs": [],
132 | "source": [
133 | "def clean(df):\n",
134 | " cols = [\"daysToExpiration\", \"impliedVolatility\"]\n",
135 | " return (\n",
136 | " df[cols]\n",
137 | " .dropna(subset=[\"impliedVolatility\"])\n",
138 | " .drop_duplicates(\"daysToExpiration\")\n",
139 | " .sort_values(\"daysToExpiration\")\n",
140 | " .set_index(\"daysToExpiration\")\n",
141 | " )"
142 | ]
143 | },
144 | {
145 | "cell_type": "code",
146 | "execution_count": null,
147 | "id": "2c68a7af",
148 | "metadata": {
149 | "lines_to_next_cell": 1
150 | },
151 | "outputs": [],
152 | "source": [
153 | "def build_iv_grid(df):\n",
154 | " df_iv = clean(df)\n",
155 | " df_iv = df_iv.where(df_iv >= 0.01, np.nan)\n",
156 | " df_iv = df_iv[df_iv.index > 30]\n",
157 | " expiry_grid = np.arange(df_iv.index.min(), df_iv.index.max() + 1, 1.0)\n",
158 | " return (\n",
159 | " df_iv.reindex(expiry_grid)\n",
160 | " .interpolate(method=\"linear\")\n",
161 | " .rename_axis(\"daysToExpiration\")\n",
162 | " ).dropna()"
163 | ]
164 | },
165 | {
166 | "cell_type": "code",
167 | "execution_count": null,
168 | "id": "38f8c1d7",
169 | "metadata": {},
170 | "outputs": [],
171 | "source": [
172 | "call_structure = build_iv_grid(options_df)"
173 | ]
174 | },
175 | {
176 | "cell_type": "markdown",
177 | "id": "3e873efe",
178 | "metadata": {},
179 | "source": [
180 | "We filter our options to focus on a specific strike price. The clean function keeps only the days to expiration and implied volatility columns, removing any duplicates or missing data. The build_iv_grid function further processes this data, creating a grid of implied volatilities across different expiration dates."
181 | ]
182 | },
183 | {
184 | "cell_type": "markdown",
185 | "id": "78565871",
186 | "metadata": {},
187 | "source": [
188 | "### Visualize the implied volatility term structure"
189 | ]
190 | },
191 | {
192 | "cell_type": "markdown",
193 | "id": "82bb0cb1",
194 | "metadata": {},
195 | "source": [
196 | "Finally, we'll create a plot to visualize the implied volatility term structure."
197 | ]
198 | },
199 | {
200 | "cell_type": "code",
201 | "execution_count": null,
202 | "id": "3c422ebd",
203 | "metadata": {},
204 | "outputs": [],
205 | "source": [
206 | "plt.plot(call_structure.index, call_structure.impliedVolatility)\n",
207 | "plt.xlabel('Days to Expiration')\n",
208 | "plt.ylabel('Implied Volatility')\n",
209 | "plt.title(f'Implied Volatility Term Structure for {ticker} (Strike: {strike})')\n",
210 | "plt.grid(True)\n",
211 | "plt.show()"
212 | ]
213 | },
214 | {
215 | "cell_type": "markdown",
216 | "id": "6d5587d3",
217 | "metadata": {},
218 | "source": [
219 | "We use matplotlib to create a line plot of our implied volatility data. The x-axis shows the days to expiration, while the y-axis shows the implied volatility. We add labels to our axes and a title that includes our stock ticker and strike price. The grid helps make the chart easier to read."
220 | ]
221 | },
222 | {
223 | "cell_type": "markdown",
224 | "id": "8613b191",
225 | "metadata": {},
226 | "source": [
227 | "PyQuant News is where finance practitioners level up with Python for quant finance, algorithmic trading, and market data analysis. Looking to get started? Check out the fastest growing, top-selling course to get started with Python for quant finance. For educational purposes. Not investment advise. Use at your own risk."
228 | ]
229 | }
230 | ],
231 | "metadata": {
232 | "jupytext": {
233 | "cell_metadata_filter": "-all",
234 | "main_language": "python",
235 | "notebook_metadata_filter": "-all"
236 | }
237 | },
238 | "nbformat": 4,
239 | "nbformat_minor": 5
240 | }
241 |
--------------------------------------------------------------------------------
/16_OmegaRatio.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "79ea368d",
6 | "metadata": {},
7 | "source": [
8 | ""
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "58225e34",
14 | "metadata": {},
15 | "source": [
16 | "This code calculates the Omega ratio for financial returns, a performance metric that captures more information about the distribution of returns than traditional metrics like the Sharpe ratio. It uses stock price data from Yahoo Finance to compute daily returns for a specific stock (AAPL). The Omega ratio is calculated by dividing the sum of positive returns above a threshold by the absolute sum of negative returns below that threshold. This metric is useful for assessing the risk and return profile of investments, especially those with non-normal return distributions. The code also visualizes the rolling Omega ratio and basic statistical properties of the returns."
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "id": "3a954243",
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "import yfinance as yf\n",
27 | "import numpy as np"
28 | ]
29 | },
30 | {
31 | "cell_type": "markdown",
32 | "id": "a375f9fa",
33 | "metadata": {},
34 | "source": [
35 | "Download the stock data for AAPL from Yahoo Finance for the specified date range"
36 | ]
37 | },
38 | {
39 | "cell_type": "code",
40 | "execution_count": null,
41 | "id": "e3430b66",
42 | "metadata": {},
43 | "outputs": [],
44 | "source": [
45 | "data = yf.download(\"AAPL\", start=\"2020-01-01\", end=\"2021-12-31\")"
46 | ]
47 | },
48 | {
49 | "cell_type": "markdown",
50 | "id": "294330b3",
51 | "metadata": {},
52 | "source": [
53 | "Calculate daily returns from the adjusted closing prices"
54 | ]
55 | },
56 | {
57 | "cell_type": "code",
58 | "execution_count": null,
59 | "id": "4133cec7",
60 | "metadata": {
61 | "lines_to_next_cell": 1
62 | },
63 | "outputs": [],
64 | "source": [
65 | "returns = data[\"Adj Close\"].pct_change()"
66 | ]
67 | },
68 | {
69 | "cell_type": "markdown",
70 | "id": "68e174db",
71 | "metadata": {},
72 | "source": [
73 | "Calculate the Omega ratio of a strategy's returns"
74 | ]
75 | },
76 | {
77 | "cell_type": "code",
78 | "execution_count": null,
79 | "id": "aa288ad4",
80 | "metadata": {
81 | "lines_to_next_cell": 1
82 | },
83 | "outputs": [],
84 | "source": [
85 | "def omega_ratio(returns, required_return=0.0):\n",
86 | " \"\"\"Determines the Omega ratio of a strategy.\n",
87 | " \n",
88 | " Parameters\n",
89 | " ----------\n",
90 | " returns : pd.Series or np.ndarray\n",
91 | " Daily returns of the strategy, noncumulative.\n",
92 | " required_return : float, optional\n",
93 | " Minimum acceptance return of the investor. Threshold over which to\n",
94 | " consider positive vs negative returns. It will be converted to a\n",
95 | " value appropriate for the period of the returns. E.g. An annual minimum\n",
96 | " acceptable return of 100 will translate to a minimum acceptable\n",
97 | " return of 0.018.\n",
98 | "\n",
99 | " Returns\n",
100 | " -------\n",
101 | " omega_ratio : float\n",
102 | "\n",
103 | " Note\n",
104 | " -----\n",
105 | " See https://en.wikipedia.org/wiki/Omega_ratio for more details.\n",
106 | " \"\"\"\n",
107 | "\n",
108 | " # Convert the required return to a daily return threshold\n",
109 | "\n",
110 | " return_threshold = (1 + required_return) ** (1 / 252) - 1\n",
111 | "\n",
112 | " # Calculate the difference between returns and the return threshold\n",
113 | "\n",
114 | " returns_less_thresh = returns - return_threshold\n",
115 | "\n",
116 | " # Calculate the numerator as the sum of positive returns above the threshold\n",
117 | "\n",
118 | " numer = sum(returns_less_thresh[returns_less_thresh > 0.0])\n",
119 | "\n",
120 | " # Calculate the denominator as the absolute sum of negative returns below the threshold\n",
121 | "\n",
122 | " denom = -1.0 * sum(returns_less_thresh[returns_less_thresh < 0.0])\n",
123 | "\n",
124 | " # Return the Omega ratio if the denominator is positive; otherwise, return NaN\n",
125 | "\n",
126 | " if denom > 0.0:\n",
127 | " return numer / denom\n",
128 | " else:\n",
129 | " return np.nan"
130 | ]
131 | },
132 | {
133 | "cell_type": "markdown",
134 | "id": "cc63495d",
135 | "metadata": {},
136 | "source": [
137 | "Calculate the Omega ratio for the given returns and required return"
138 | ]
139 | },
140 | {
141 | "cell_type": "code",
142 | "execution_count": null,
143 | "id": "da67b3ba",
144 | "metadata": {},
145 | "outputs": [],
146 | "source": [
147 | "omega_ratio(returns, 0.07)"
148 | ]
149 | },
150 | {
151 | "cell_type": "markdown",
152 | "id": "9e89e93b",
153 | "metadata": {},
154 | "source": [
155 | "Compute and plot the rolling 30-day Omega ratio of the returns"
156 | ]
157 | },
158 | {
159 | "cell_type": "code",
160 | "execution_count": null,
161 | "id": "ba112f43",
162 | "metadata": {},
163 | "outputs": [],
164 | "source": [
165 | "returns.rolling(30).apply(omega_ratio).plot()"
166 | ]
167 | },
168 | {
169 | "cell_type": "markdown",
170 | "id": "33e329c8",
171 | "metadata": {},
172 | "source": [
173 | "Plot a histogram of the daily returns to visualize their distribution"
174 | ]
175 | },
176 | {
177 | "cell_type": "code",
178 | "execution_count": null,
179 | "id": "1bad9e37",
180 | "metadata": {},
181 | "outputs": [],
182 | "source": [
183 | "returns.hist(bins=50)"
184 | ]
185 | },
186 | {
187 | "cell_type": "markdown",
188 | "id": "cc2176f5",
189 | "metadata": {},
190 | "source": [
191 | "Calculate and display the skewness of the returns distribution"
192 | ]
193 | },
194 | {
195 | "cell_type": "code",
196 | "execution_count": null,
197 | "id": "85b6eba5",
198 | "metadata": {},
199 | "outputs": [],
200 | "source": [
201 | "returns.skew()"
202 | ]
203 | },
204 | {
205 | "cell_type": "markdown",
206 | "id": "2cd3f659",
207 | "metadata": {},
208 | "source": [
209 | "Calculate and display the kurtosis of the returns distribution"
210 | ]
211 | },
212 | {
213 | "cell_type": "code",
214 | "execution_count": null,
215 | "id": "025b51fc",
216 | "metadata": {},
217 | "outputs": [],
218 | "source": [
219 | "returns.kurtosis()"
220 | ]
221 | },
222 | {
223 | "cell_type": "markdown",
224 | "id": "9f4bae70",
225 | "metadata": {},
226 | "source": [
227 | "PyQuant News is where finance practitioners level up with Python for quant finance, algorithmic trading, and market data analysis. Looking to get started? Check out the fastest growing, top-selling course to get started with Python for quant finance. For educational purposes. Not investment advise. Use at your own risk."
228 | ]
229 | }
230 | ],
231 | "metadata": {
232 | "jupytext": {
233 | "cell_metadata_filter": "-all",
234 | "main_language": "python",
235 | "notebook_metadata_filter": "-all"
236 | }
237 | },
238 | "nbformat": 4,
239 | "nbformat_minor": 5
240 | }
241 |
--------------------------------------------------------------------------------
/25_KalmanFilter.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "120db895",
6 | "metadata": {},
7 | "source": [
8 | ""
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "98c6ae8b",
14 | "metadata": {},
15 | "source": [
16 | "This code uses the Kalman Filter to smooth financial time series data and compare it to a simple moving average. It loads historical stock prices for a specified date range and computes the Kalman Filter estimate of the average price. It then plots the Kalman Filter estimate alongside the actual prices and a 30-day moving average. This is useful for financial analysis and modeling to reduce noise and detect underlying trends in stock prices."
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "id": "2d864e60",
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "import pandas as pd\n",
27 | "import matplotlib.pyplot as plt"
28 | ]
29 | },
30 | {
31 | "cell_type": "code",
32 | "execution_count": null,
33 | "id": "d7c52f92",
34 | "metadata": {},
35 | "outputs": [],
36 | "source": [
37 | "from pykalman import KalmanFilter\n",
38 | "from openbb_terminal.sdk import openbb"
39 | ]
40 | },
41 | {
42 | "cell_type": "markdown",
43 | "id": "5fa2307c",
44 | "metadata": {},
45 | "source": [
46 | "Load historical stock prices for LMT between specified start and end dates"
47 | ]
48 | },
49 | {
50 | "cell_type": "code",
51 | "execution_count": null,
52 | "id": "216b4c1a",
53 | "metadata": {},
54 | "outputs": [],
55 | "source": [
56 | "data = openbb.stocks.load(\"LMT\", start_date=\"2013-01-01\", end_date=\"2015-01-01\")\n",
57 | "prices = data[\"Adj Close\"]"
58 | ]
59 | },
60 | {
61 | "cell_type": "markdown",
62 | "id": "f8486b4c",
63 | "metadata": {},
64 | "source": [
65 | "Initialize the Kalman Filter with specified parameters"
66 | ]
67 | },
68 | {
69 | "cell_type": "code",
70 | "execution_count": null,
71 | "id": "a262938f",
72 | "metadata": {},
73 | "outputs": [],
74 | "source": [
75 | "kf = KalmanFilter(\n",
76 | " transition_matrices = [1],\n",
77 | " observation_matrices = [1],\n",
78 | " initial_state_mean = 0,\n",
79 | " initial_state_covariance = 1,\n",
80 | " observation_covariance=1,\n",
81 | " transition_covariance=0.01\n",
82 | ")"
83 | ]
84 | },
85 | {
86 | "cell_type": "markdown",
87 | "id": "84830040",
88 | "metadata": {},
89 | "source": [
90 | "Apply Kalman Filter to the price data to estimate the average. The filter smooths the data to reduce noise and detect trends. "
91 | ]
92 | },
93 | {
94 | "cell_type": "code",
95 | "execution_count": null,
96 | "id": "733d4019",
97 | "metadata": {},
98 | "outputs": [],
99 | "source": [
100 | "state_means, _ = kf.filter(prices.values)\n",
101 | "state_means = pd.Series(state_means.flatten(), index=prices.index)"
102 | ]
103 | },
104 | {
105 | "cell_type": "markdown",
106 | "id": "b3bc4dde",
107 | "metadata": {},
108 | "source": [
109 | "Compute a 30-day rolling mean of the price data for comparison"
110 | ]
111 | },
112 | {
113 | "cell_type": "code",
114 | "execution_count": null,
115 | "id": "b96677e2",
116 | "metadata": {},
117 | "outputs": [],
118 | "source": [
119 | "mean30 = prices.rolling(window=30).mean()"
120 | ]
121 | },
122 | {
123 | "cell_type": "markdown",
124 | "id": "7cf2307d",
125 | "metadata": {},
126 | "source": [
127 | "Plot the Kalman Filter estimate, actual prices, and 30-day moving average to visualize data smoothing and trend detection"
128 | ]
129 | },
130 | {
131 | "cell_type": "code",
132 | "execution_count": null,
133 | "id": "42c88d04",
134 | "metadata": {},
135 | "outputs": [],
136 | "source": [
137 | "plt.plot(state_means)\n",
138 | "plt.plot(prices)\n",
139 | "plt.plot(mean30)\n",
140 | "plt.title('Kalman filter estimate of average')\n",
141 | "plt.legend(['Kalman', 'Price', '30-day MA'])\n",
142 | "plt.xlabel('Day')\n",
143 | "plt.ylabel('Price')"
144 | ]
145 | },
146 | {
147 | "cell_type": "markdown",
148 | "id": "726482c3",
149 | "metadata": {},
150 | "source": [
151 | "Plot the last 200 days of Kalman Filter estimate, actual prices, and 30-day moving average for a more detailed view"
152 | ]
153 | },
154 | {
155 | "cell_type": "code",
156 | "execution_count": null,
157 | "id": "71660a32",
158 | "metadata": {},
159 | "outputs": [],
160 | "source": [
161 | "plt.plot(state_means[-200:])\n",
162 | "plt.plot(prices[-200:])\n",
163 | "plt.plot(mean30[-200:])\n",
164 | "plt.title('Kalman filter estimate of average')\n",
165 | "plt.legend(['Kalman', 'Price', '30-day MA'])\n",
166 | "plt.xlabel('Day')\n",
167 | "plt.ylabel('Price')"
168 | ]
169 | },
170 | {
171 | "cell_type": "markdown",
172 | "id": "fa717712",
173 | "metadata": {},
174 | "source": [
175 | "PyQuant News is where finance practitioners level up with Python for quant finance, algorithmic trading, and market data analysis. Looking to get started? Check out the fastest growing, top-selling course to get started with Python for quant finance. For educational purposes. Not investment advise. Use at your own risk."
176 | ]
177 | }
178 | ],
179 | "metadata": {
180 | "jupytext": {
181 | "cell_metadata_filter": "-all",
182 | "main_language": "python",
183 | "notebook_metadata_filter": "-all"
184 | }
185 | },
186 | "nbformat": 4,
187 | "nbformat_minor": 5
188 | }
189 |
--------------------------------------------------------------------------------
/26_HurstExponent.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "cf1415df",
6 | "metadata": {},
7 | "source": [
8 | ""
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "b9fd5ee6",
14 | "metadata": {},
15 | "source": [
16 | "This notebook analyzes the Hurst exponent of the S&P 500 index to measure market trends and randomness. It loads historical S&P 500 data using the OpenBB SDK and calculates the Hurst exponent for various time lags. The Hurst exponent helps in understanding the nature of time series, whether it is mean-reverting, trending, or a random walk. This information is valuable for financial analysts and quant traders for making informed decisions. Additionally, it plots rolling volatility to observe changes in market volatility over time."
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "id": "d89701d0",
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "import pandas as pd\n",
27 | "import numpy as np"
28 | ]
29 | },
30 | {
31 | "cell_type": "code",
32 | "execution_count": null,
33 | "id": "c475ecc1",
34 | "metadata": {
35 | "lines_to_next_cell": 2
36 | },
37 | "outputs": [],
38 | "source": [
39 | "from openbb_terminal.sdk import openbb"
40 | ]
41 | },
42 | {
43 | "cell_type": "markdown",
44 | "id": "883862a7",
45 | "metadata": {},
46 | "source": [
47 | "Load historical S&P 500 data from 2000 to 2019 using the OpenBB SDK and select the adjusted close prices"
48 | ]
49 | },
50 | {
51 | "cell_type": "code",
52 | "execution_count": null,
53 | "id": "00be2ba2",
54 | "metadata": {},
55 | "outputs": [],
56 | "source": [
57 | "df = openbb.stocks.load(\"^GSPC\", start_date=\"2000-01-01\", end_date=\"2019-12-31\")[\"Adj Close\"]"
58 | ]
59 | },
60 | {
61 | "cell_type": "markdown",
62 | "id": "8dd5f150",
63 | "metadata": {},
64 | "source": [
65 | "Plot the S&P 500 adjusted close prices to visualize the historical data"
66 | ]
67 | },
68 | {
69 | "cell_type": "code",
70 | "execution_count": null,
71 | "id": "ab7ba936",
72 | "metadata": {},
73 | "outputs": [],
74 | "source": [
75 | "df.plot(title=\"S&P 500\")"
76 | ]
77 | },
78 | {
79 | "cell_type": "code",
80 | "execution_count": null,
81 | "id": "9a9e6e7d",
82 | "metadata": {},
83 | "outputs": [],
84 | "source": [
85 | "def get_hurst_exponent(ts, max_lag=20):\n",
86 | " \"\"\"Calculate the Hurst exponent of a time series\n",
87 | " \n",
88 | " Parameters\n",
89 | " ----------\n",
90 | " ts : np.ndarray\n",
91 | " Time series data\n",
92 | " max_lag : int, optional\n",
93 | " Maximum lag to consider, by default 20\n",
94 | " \n",
95 | " Returns\n",
96 | " -------\n",
97 | " float\n",
98 | " Estimated Hurst exponent\n",
99 | " \n",
100 | " Notes\n",
101 | " -----\n",
102 | " The Hurst exponent is used to determine the \n",
103 | " long-term memory of time series data.\n",
104 | " \"\"\"\n",
105 | "\n",
106 | " # Define the range of lags to be used in the calculation\n",
107 | " lags = range(2, max_lag)\n",
108 | "\n",
109 | " # Calculate the standard deviation of differences for each lag\n",
110 | " tau = [np.std(np.subtract(ts[lag:], ts[:-lag])) for lag in lags]\n",
111 | "\n",
112 | " # Perform a linear fit to estimate the Hurst exponent\n",
113 | " return np.polyfit(np.log(lags), np.log(tau), 1)[0]"
114 | ]
115 | },
116 | {
117 | "cell_type": "markdown",
118 | "id": "fc132efc",
119 | "metadata": {},
120 | "source": [
121 | "Calculate and print the Hurst exponent for various lags using the full dataset"
122 | ]
123 | },
124 | {
125 | "cell_type": "code",
126 | "execution_count": null,
127 | "id": "7a1872d6",
128 | "metadata": {
129 | "lines_to_next_cell": 2
130 | },
131 | "outputs": [],
132 | "source": [
133 | "for lag in [20, 100, 250, 500, 1000]:\n",
134 | " hurst_exp = get_hurst_exponent(df.values, lag)\n",
135 | " print(f\"{lag} lags: {hurst_exp:.4f}\")"
136 | ]
137 | },
138 | {
139 | "cell_type": "markdown",
140 | "id": "d51dc23f",
141 | "metadata": {},
142 | "source": [
143 | "Select a shorter series from 2005 to 2007 and calculate the Hurst exponent for various lags"
144 | ]
145 | },
146 | {
147 | "cell_type": "code",
148 | "execution_count": null,
149 | "id": "3cd6e2df",
150 | "metadata": {
151 | "lines_to_next_cell": 2
152 | },
153 | "outputs": [],
154 | "source": [
155 | "shorter_series = df.loc[\"2005\":\"2007\"].values\n",
156 | "for lag in [20, 100, 250, 500]:\n",
157 | " hurst_exp = get_hurst_exponent(shorter_series, lag)\n",
158 | " print(f\"{lag} lags: {hurst_exp:.4f}\")"
159 | ]
160 | },
161 | {
162 | "cell_type": "markdown",
163 | "id": "0a623fcf",
164 | "metadata": {},
165 | "source": [
166 | "Calculate rolling volatility using a 30-day window and plot the results to observe changes over time"
167 | ]
168 | },
169 | {
170 | "cell_type": "code",
171 | "execution_count": null,
172 | "id": "100403dc",
173 | "metadata": {
174 | "lines_to_next_cell": 2
175 | },
176 | "outputs": [],
177 | "source": [
178 | "rv = df.rolling(30).apply(np.std)\n",
179 | "rv.plot()"
180 | ]
181 | },
182 | {
183 | "cell_type": "markdown",
184 | "id": "a021555b",
185 | "metadata": {},
186 | "source": [
187 | "Calculate and print the Hurst exponent for various lags using the rolling volatility data"
188 | ]
189 | },
190 | {
191 | "cell_type": "code",
192 | "execution_count": null,
193 | "id": "2175669f",
194 | "metadata": {},
195 | "outputs": [],
196 | "source": [
197 | "for lag in [20, 100, 250, 500, 1000]:\n",
198 | " hurst_exp = get_hurst_exponent(rv.dropna().values, lag)\n",
199 | " print(f\"{lag} lags: {hurst_exp:.4f}\")"
200 | ]
201 | },
202 | {
203 | "cell_type": "markdown",
204 | "id": "dfb39650",
205 | "metadata": {},
206 | "source": [
207 | "PyQuant News is where finance practitioners level up with Python for quant finance, algorithmic trading, and market data analysis. Looking to get started? Check out the fastest growing, top-selling course to get started with Python for quant finance. For educational purposes. Not investment advise. Use at your own risk."
208 | ]
209 | }
210 | ],
211 | "metadata": {
212 | "jupytext": {
213 | "cell_metadata_filter": "-all",
214 | "main_language": "python",
215 | "notebook_metadata_filter": "-all"
216 | }
217 | },
218 | "nbformat": 4,
219 | "nbformat_minor": 5
220 | }
221 |
--------------------------------------------------------------------------------
/27_Decomposition.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "f265ef79",
6 | "metadata": {},
7 | "source": [
8 | ""
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "d7770571",
14 | "metadata": {},
15 | "source": [
16 | "This code retrieves and analyzes unemployment data, focusing on trends and seasonality. It uses the OpenBB SDK to fetch unemployment data from 2010 to 2019. The data is then processed to calculate rolling statistics and visualized. Seasonal decomposition and STL decomposition are applied to understand the seasonal and trend components. Additionally, the Hodrick-Prescott filter is used to separate the cyclical and trend components of the data."
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "id": "60656fb6",
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "import pandas as pd"
27 | ]
28 | },
29 | {
30 | "cell_type": "code",
31 | "execution_count": null,
32 | "id": "1436ce1d",
33 | "metadata": {},
34 | "outputs": [],
35 | "source": [
36 | "from statsmodels.tsa.seasonal import seasonal_decompose, STL\n",
37 | "from statsmodels.tsa.filters.hp_filter import hpfilter\n",
38 | "from openbb_terminal.sdk import openbb"
39 | ]
40 | },
41 | {
42 | "cell_type": "markdown",
43 | "id": "566d605d",
44 | "metadata": {},
45 | "source": [
46 | "Retrieve unemployment data from OpenBB SDK for the period starting 2010"
47 | ]
48 | },
49 | {
50 | "cell_type": "code",
51 | "execution_count": null,
52 | "id": "df30c6a4",
53 | "metadata": {},
54 | "outputs": [],
55 | "source": [
56 | "df = openbb.economy.unemp(2010)"
57 | ]
58 | },
59 | {
60 | "cell_type": "markdown",
61 | "id": "e7a42626",
62 | "metadata": {},
63 | "source": [
64 | "Set the index to the 'date' column, filter up to 2019-12-31, and sort by date"
65 | ]
66 | },
67 | {
68 | "cell_type": "code",
69 | "execution_count": null,
70 | "id": "cbcf68cf",
71 | "metadata": {},
72 | "outputs": [],
73 | "source": [
74 | "df = df.set_index(\"date\")[:\"2019-12-31\"].sort_index()"
75 | ]
76 | },
77 | {
78 | "cell_type": "markdown",
79 | "id": "9b4b91cd",
80 | "metadata": {},
81 | "source": [
82 | "Calculate rolling mean and standard deviation with a 12-month window"
83 | ]
84 | },
85 | {
86 | "cell_type": "code",
87 | "execution_count": null,
88 | "id": "e28f9215",
89 | "metadata": {},
90 | "outputs": [],
91 | "source": [
92 | "df[\"rolling_mean\"] = df[\"unemp\"].rolling(window=12).mean()\n",
93 | "df[\"rolling_std\"] = df[\"unemp\"].rolling(window=12).std()"
94 | ]
95 | },
96 | {
97 | "cell_type": "markdown",
98 | "id": "e5863458",
99 | "metadata": {},
100 | "source": [
101 | "Plot the unemployment rate with rolling mean and standard deviation"
102 | ]
103 | },
104 | {
105 | "cell_type": "code",
106 | "execution_count": null,
107 | "id": "c10e8025",
108 | "metadata": {},
109 | "outputs": [],
110 | "source": [
111 | "df.plot(title=\"Unemployment rate\")"
112 | ]
113 | },
114 | {
115 | "cell_type": "markdown",
116 | "id": "17630ce4",
117 | "metadata": {},
118 | "source": [
119 | "Perform seasonal decomposition of the unemployment data using an additive model and plot results"
120 | ]
121 | },
122 | {
123 | "cell_type": "code",
124 | "execution_count": null,
125 | "id": "52f48f1d",
126 | "metadata": {},
127 | "outputs": [],
128 | "source": [
129 | "decomposition_results = seasonal_decompose(\n",
130 | " df[\"unemp\"], \n",
131 | " model=\"additive\"\n",
132 | ").plot()"
133 | ]
134 | },
135 | {
136 | "cell_type": "markdown",
137 | "id": "47f1d516",
138 | "metadata": {},
139 | "source": [
140 | "Apply STL decomposition to the unemployment data and plot the results"
141 | ]
142 | },
143 | {
144 | "cell_type": "code",
145 | "execution_count": null,
146 | "id": "a75ed137",
147 | "metadata": {},
148 | "outputs": [],
149 | "source": [
150 | "stl_decomposition = STL(df[[\"unemp\"]]).fit()\n",
151 | "stl_decomposition.plot().suptitle(\"STL Decomposition\");"
152 | ]
153 | },
154 | {
155 | "cell_type": "markdown",
156 | "id": "b61552be",
157 | "metadata": {},
158 | "source": [
159 | "Apply Hodrick-Prescott filter to decompose the unemployment data into cycle and trend components and plot results"
160 | ]
161 | },
162 | {
163 | "cell_type": "code",
164 | "execution_count": null,
165 | "id": "36c7a50e",
166 | "metadata": {
167 | "lines_to_next_cell": 2
168 | },
169 | "outputs": [],
170 | "source": [
171 | "hp_df = df[[\"unemp\"]].copy()\n",
172 | "hp_df[\"cycle\"], hp_df[\"trend\"] = hpfilter(hp_df[\"unemp\"], 129600)\n",
173 | "hp_df.plot(subplots=True, title=\"Hodrick-Prescott filter\");"
174 | ]
175 | },
176 | {
177 | "cell_type": "markdown",
178 | "id": "c508c532",
179 | "metadata": {},
180 | "source": [
181 | "PyQuant News is where finance practitioners level up with Python for quant finance, algorithmic trading, and market data analysis. Looking to get started? Check out the fastest growing, top-selling course to get started with Python for quant finance. For educational purposes. Not investment advise. Use at your own risk."
182 | ]
183 | }
184 | ],
185 | "metadata": {
186 | "jupytext": {
187 | "cell_metadata_filter": "-all",
188 | "main_language": "python",
189 | "notebook_metadata_filter": "-all"
190 | }
191 | },
192 | "nbformat": 4,
193 | "nbformat_minor": 5
194 | }
195 |
--------------------------------------------------------------------------------
/31_BarrierOptionPricing.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "fa749e17",
6 | "metadata": {},
7 | "source": [
8 | ""
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "58737b4a",
14 | "metadata": {},
15 | "source": [
16 | "This code simulates stock returns using Geometric Brownian Motion (GBM) and calculates the premium for a barrier option. It defines a function to simulate GBM paths based on initial stock price, drift, volatility, and other parameters. The code then generates multiple simulated price paths, checks if the maximum value of each path exceeds a barrier, and calculates the option payoff accordingly. Lastly, it discounts the payoffs to present value and computes the average premium. This is useful for pricing exotic financial derivatives and analyzing risk."
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "id": "ecc8c484",
23 | "metadata": {
24 | "lines_to_next_cell": 1
25 | },
26 | "outputs": [],
27 | "source": [
28 | "import numpy as np\n",
29 | "import matplotlib.pyplot as plt"
30 | ]
31 | },
32 | {
33 | "cell_type": "code",
34 | "execution_count": null,
35 | "id": "748351bc",
36 | "metadata": {
37 | "lines_to_next_cell": 1
38 | },
39 | "outputs": [],
40 | "source": [
41 | "def simulate_gbm(s_0, mu, sigma, T, N, n_sims=10**3, random_seed=42):\n",
42 | " \"\"\"\n",
43 | " Function used for simulating stock returns using Geometric Brownian Motion.\n",
44 | "\n",
45 | " Parameters\n",
46 | " ------------\n",
47 | " s_0 : float\n",
48 | " Initial stock price\n",
49 | " mu : float\n",
50 | " Drift coefficient\n",
51 | " sigma : float\n",
52 | " Diffusion coefficient\n",
53 | " T : float\n",
54 | " Length of the forecast horizon, same unit as dt\n",
55 | " N : int\n",
56 | " Number of time increments in the forecast horizon\n",
57 | " n_sims : int\n",
58 | " Number of simulation paths\n",
59 | " random_seed : int\n",
60 | " Random seed for reproducibility\n",
61 | "\n",
62 | " Returns\n",
63 | " -----------\n",
64 | " S_t : np.ndarray\n",
65 | " Matrix (size: n_sims x (T+1)) containing the simulation results.\n",
66 | " Rows represent sample paths, while columns point of time.\n",
67 | " \"\"\"\n",
68 | "\n",
69 | " # Set random seed for reproducibility\n",
70 | " np.random.seed(random_seed)\n",
71 | "\n",
72 | " # Calculate time increment\n",
73 | " dt = T / N\n",
74 | "\n",
75 | " # Generate normally distributed random values for the Wiener process\n",
76 | " dW = np.random.normal(scale=np.sqrt(dt), size=(n_sims, N + 1))\n",
77 | "\n",
78 | " # Simulate the evolution of the process using GBM formula\n",
79 | " S_t = s_0 * np.exp(np.cumsum((mu - 0.5 * sigma**2) * dt + sigma * dW, axis=1))\n",
80 | " S_t[:, 0] = s_0\n",
81 | "\n",
82 | " return S_t"
83 | ]
84 | },
85 | {
86 | "cell_type": "markdown",
87 | "id": "e67fab55",
88 | "metadata": {},
89 | "source": [
90 | "Define initial stock price, drift, volatility, time horizon, and number of increments"
91 | ]
92 | },
93 | {
94 | "cell_type": "code",
95 | "execution_count": null,
96 | "id": "96268bc8",
97 | "metadata": {},
98 | "outputs": [],
99 | "source": [
100 | "S_0 = 55\n",
101 | "r = 0.06\n",
102 | "sigma = 0.2\n",
103 | "T = 1\n",
104 | "N = 252"
105 | ]
106 | },
107 | {
108 | "cell_type": "markdown",
109 | "id": "be23773e",
110 | "metadata": {},
111 | "source": [
112 | "Define barrier level and strike price for the option"
113 | ]
114 | },
115 | {
116 | "cell_type": "code",
117 | "execution_count": null,
118 | "id": "30090621",
119 | "metadata": {},
120 | "outputs": [],
121 | "source": [
122 | "BARRIER = 65\n",
123 | "K = 60"
124 | ]
125 | },
126 | {
127 | "cell_type": "markdown",
128 | "id": "126163f8",
129 | "metadata": {},
130 | "source": [
131 | "Generate GBM simulations for the given parameters"
132 | ]
133 | },
134 | {
135 | "cell_type": "code",
136 | "execution_count": null,
137 | "id": "227001bf",
138 | "metadata": {},
139 | "outputs": [],
140 | "source": [
141 | "gbm_sims = simulate_gbm(s_0=S_0, mu=r, sigma=sigma, T=T, N=N)"
142 | ]
143 | },
144 | {
145 | "cell_type": "markdown",
146 | "id": "6c5abfb3",
147 | "metadata": {},
148 | "source": [
149 | "Plot the simulated price paths and the barrier level"
150 | ]
151 | },
152 | {
153 | "cell_type": "code",
154 | "execution_count": null,
155 | "id": "b718dafd",
156 | "metadata": {},
157 | "outputs": [],
158 | "source": [
159 | "plt.axhline(y=BARRIER, color='r', linestyle='-')\n",
160 | "plt.xlim(0, N)\n",
161 | "plt.plot(gbm_sims.T, linewidth=0.25)"
162 | ]
163 | },
164 | {
165 | "cell_type": "markdown",
166 | "id": "58aed041",
167 | "metadata": {},
168 | "source": [
169 | "Calculate the maximum value per path to determine if the barrier was breached"
170 | ]
171 | },
172 | {
173 | "cell_type": "code",
174 | "execution_count": null,
175 | "id": "7fc85776",
176 | "metadata": {},
177 | "outputs": [],
178 | "source": [
179 | "max_value_per_path = np.max(gbm_sims, axis=1)"
180 | ]
181 | },
182 | {
183 | "cell_type": "markdown",
184 | "id": "589b129d",
185 | "metadata": {},
186 | "source": [
187 | "Calculate the payoff of the barrier option based on the barrier breach condition"
188 | ]
189 | },
190 | {
191 | "cell_type": "code",
192 | "execution_count": null,
193 | "id": "685b4103",
194 | "metadata": {},
195 | "outputs": [],
196 | "source": [
197 | "payoff = np.where(\n",
198 | " max_value_per_path > BARRIER, \n",
199 | " np.maximum(0, gbm_sims[:, -1] - K), \n",
200 | " 0\n",
201 | ")"
202 | ]
203 | },
204 | {
205 | "cell_type": "markdown",
206 | "id": "74527d4a",
207 | "metadata": {},
208 | "source": [
209 | "Calculate the discount factor and the average premium for the option"
210 | ]
211 | },
212 | {
213 | "cell_type": "code",
214 | "execution_count": null,
215 | "id": "0c654f62",
216 | "metadata": {},
217 | "outputs": [],
218 | "source": [
219 | "discount_factor = np.exp(-r * T)\n",
220 | "premium = discount_factor * np.mean(payoff)\n",
221 | "premium"
222 | ]
223 | },
224 | {
225 | "cell_type": "markdown",
226 | "id": "51efd6b6",
227 | "metadata": {},
228 | "source": [
229 | "PyQuant News is where finance practitioners level up with Python for quant finance, algorithmic trading, and market data analysis. Looking to get started? Check out the fastest growing, top-selling course to get started with Python for quant finance. For educational purposes. Not investment advise. Use at your own risk."
230 | ]
231 | }
232 | ],
233 | "metadata": {
234 | "jupytext": {
235 | "cell_metadata_filter": "-all",
236 | "main_language": "python",
237 | "notebook_metadata_filter": "-all"
238 | }
239 | },
240 | "nbformat": 4,
241 | "nbformat_minor": 5
242 | }
243 |
--------------------------------------------------------------------------------
/36_TreynorRatio.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "0c0b824d",
6 | "metadata": {},
7 | "source": [
8 | ""
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "14722e46",
14 | "metadata": {},
15 | "source": [
16 | "This notebook calculates the Treynor ratio for a given stock and its benchmark index over time. It fetches historical price data for the specified stock and benchmark, computes daily returns, and calculates rolling beta values. The Treynor ratio is then derived using these beta values. This ratio helps in assessing the risk-adjusted performance of the asset relative to the benchmark, aiding in investment decision-making."
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "id": "64fbd380",
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "from openbb_terminal.sdk import openbb\n",
27 | "from openbb_terminal.sdk import TerminalStyle"
28 | ]
29 | },
30 | {
31 | "cell_type": "code",
32 | "execution_count": null,
33 | "id": "f263a055",
34 | "metadata": {},
35 | "outputs": [],
36 | "source": [
37 | "theme = TerminalStyle(\"light\", \"light\", \"light\")"
38 | ]
39 | },
40 | {
41 | "cell_type": "markdown",
42 | "id": "6bd464bd",
43 | "metadata": {},
44 | "source": [
45 | "Load historical stock data for JPM and SPY over the specified date range"
46 | ]
47 | },
48 | {
49 | "cell_type": "code",
50 | "execution_count": null,
51 | "id": "4adcbef5",
52 | "metadata": {},
53 | "outputs": [],
54 | "source": [
55 | "data = openbb.stocks.load(\n",
56 | " \"JPM, SPY\",\n",
57 | " start_date=\"2014-01-01\",\n",
58 | " end_date=\"2022-12-31\"\n",
59 | ")"
60 | ]
61 | },
62 | {
63 | "cell_type": "markdown",
64 | "id": "be35b95f",
65 | "metadata": {},
66 | "source": [
67 | "Extract adjusted closing prices for JPM and SPY from the loaded data"
68 | ]
69 | },
70 | {
71 | "cell_type": "code",
72 | "execution_count": null,
73 | "id": "84cbd927",
74 | "metadata": {},
75 | "outputs": [],
76 | "source": [
77 | "asset = data[\"Adj Close\"].JPM\n",
78 | "benchmark = data[\"Adj Close\"].SPY"
79 | ]
80 | },
81 | {
82 | "cell_type": "markdown",
83 | "id": "7325eefa",
84 | "metadata": {},
85 | "source": [
86 | "Calculate daily returns for JPM and SPY, dropping any missing values"
87 | ]
88 | },
89 | {
90 | "cell_type": "code",
91 | "execution_count": null,
92 | "id": "201dc273",
93 | "metadata": {},
94 | "outputs": [],
95 | "source": [
96 | "asset_returns = asset.pct_change().dropna()\n",
97 | "benchmark_returns = benchmark.pct_change().dropna()"
98 | ]
99 | },
100 | {
101 | "cell_type": "markdown",
102 | "id": "c5b1530c",
103 | "metadata": {},
104 | "source": [
105 | "Calculate the rolling variance of benchmark returns over a 30-day window"
106 | ]
107 | },
108 | {
109 | "cell_type": "code",
110 | "execution_count": null,
111 | "id": "eb1cbc10",
112 | "metadata": {},
113 | "outputs": [],
114 | "source": [
115 | "bm_var = benchmark_returns.rolling(\n",
116 | " window=30\n",
117 | ").var()"
118 | ]
119 | },
120 | {
121 | "cell_type": "markdown",
122 | "id": "af91c7ce",
123 | "metadata": {},
124 | "source": [
125 | "Calculate the rolling covariance between asset returns and benchmark returns over a 30-day window"
126 | ]
127 | },
128 | {
129 | "cell_type": "code",
130 | "execution_count": null,
131 | "id": "ebcee8bb",
132 | "metadata": {},
133 | "outputs": [],
134 | "source": [
135 | "bm_cov = benchmark_returns.rolling(\n",
136 | " window=30\n",
137 | ").cov(asset_returns)"
138 | ]
139 | },
140 | {
141 | "cell_type": "markdown",
142 | "id": "44a33731",
143 | "metadata": {},
144 | "source": [
145 | "Compute the rolling beta values by dividing the rolling covariance by the rolling variance"
146 | ]
147 | },
148 | {
149 | "cell_type": "code",
150 | "execution_count": null,
151 | "id": "0d100612",
152 | "metadata": {},
153 | "outputs": [],
154 | "source": [
155 | "beta = bm_cov / bm_var"
156 | ]
157 | },
158 | {
159 | "cell_type": "markdown",
160 | "id": "fb8f6eee",
161 | "metadata": {},
162 | "source": [
163 | "Calculate the Treynor ratio by adjusting the returns of the asset against its beta"
164 | ]
165 | },
166 | {
167 | "cell_type": "code",
168 | "execution_count": null,
169 | "id": "40d560cd",
170 | "metadata": {},
171 | "outputs": [],
172 | "source": [
173 | "treynor = (\n",
174 | " asset_returns - benchmark_returns\n",
175 | ") / beta"
176 | ]
177 | },
178 | {
179 | "cell_type": "markdown",
180 | "id": "7e050c57",
181 | "metadata": {},
182 | "source": [
183 | "Display the calculated Treynor ratio"
184 | ]
185 | },
186 | {
187 | "cell_type": "code",
188 | "execution_count": null,
189 | "id": "a927dab4",
190 | "metadata": {},
191 | "outputs": [],
192 | "source": [
193 | "treynor"
194 | ]
195 | },
196 | {
197 | "cell_type": "markdown",
198 | "id": "cfc7731a",
199 | "metadata": {},
200 | "source": [
201 | "Plot the Treynor ratio over time to visualize its trend"
202 | ]
203 | },
204 | {
205 | "cell_type": "code",
206 | "execution_count": null,
207 | "id": "863ccecf",
208 | "metadata": {},
209 | "outputs": [],
210 | "source": [
211 | "treynor.plot()"
212 | ]
213 | },
214 | {
215 | "cell_type": "markdown",
216 | "id": "6f0482e8",
217 | "metadata": {},
218 | "source": [
219 | "PyQuant News is where finance practitioners level up with Python for quant finance, algorithmic trading, and market data analysis. Looking to get started? Check out the fastest growing, top-selling course to get started with Python for quant finance. For educational purposes. Not investment advise. Use at your own risk."
220 | ]
221 | }
222 | ],
223 | "metadata": {
224 | "jupytext": {
225 | "cell_metadata_filter": "-all",
226 | "main_language": "python",
227 | "notebook_metadata_filter": "-all"
228 | }
229 | },
230 | "nbformat": 4,
231 | "nbformat_minor": 5
232 | }
233 |
--------------------------------------------------------------------------------
/38_AmericanOptionPricer.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "9d21a51c",
6 | "metadata": {},
7 | "source": [
8 | ""
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "658dd5ee",
14 | "metadata": {},
15 | "source": [
16 | "This code implements a binomial model to price American options, considering early exercise features. Using parameters like spot price, strike price, risk-free rate, volatility, time to expiry, and the number of steps, the model constructs a price tree. It then calculates the option value at each node, considering both holding and exercising the option. This approach is practical for valuing American-style options, which can be exercised at any time before expiration."
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "id": "8791fb6f",
23 | "metadata": {
24 | "lines_to_next_cell": 1
25 | },
26 | "outputs": [],
27 | "source": [
28 | "import numpy as np"
29 | ]
30 | },
31 | {
32 | "cell_type": "markdown",
33 | "id": "9af5deeb",
34 | "metadata": {},
35 | "source": [
36 | "Define a function to price American options using the binomial tree model"
37 | ]
38 | },
39 | {
40 | "cell_type": "code",
41 | "execution_count": null,
42 | "id": "f00b850d",
43 | "metadata": {
44 | "lines_to_next_cell": 1
45 | },
46 | "outputs": [],
47 | "source": [
48 | "def american_option_pricer(spot, strike, rate, vol, expiry, steps, option_type):\n",
49 | " \"\"\"Price an American option using binomial model\n",
50 | " \n",
51 | " Parameters\n",
52 | " ----------\n",
53 | " spot : float\n",
54 | " Current spot price of the underlying asset\n",
55 | " strike : float\n",
56 | " Strike price of the option\n",
57 | " rate : float\n",
58 | " Risk-free interest rate\n",
59 | " vol : float\n",
60 | " Volatility of the underlying asset\n",
61 | " expiry : float\n",
62 | " Time to expiry in years\n",
63 | " steps : int\n",
64 | " Number of steps in the binomial tree\n",
65 | " option_type : str\n",
66 | " Type of the option ('call' or 'put')\n",
67 | " \n",
68 | " Returns\n",
69 | " -------\n",
70 | " float\n",
71 | " Estimated price of the American option\n",
72 | " \n",
73 | " Notes\n",
74 | " -----\n",
75 | " This function constructs a binomial tree to \n",
76 | " estimate the price of an American option. It \n",
77 | " accounts for early exercise features by comparing \n",
78 | " the value of holding versus exercising the option \n",
79 | " at each node.\n",
80 | " \"\"\"\n",
81 | "\n",
82 | " # Calculate the time interval and the up and down factors\n",
83 | "\n",
84 | " dt = expiry / steps\n",
85 | " u = np.exp(vol * np.sqrt(dt))\n",
86 | " d = 1 / u\n",
87 | "\n",
88 | " # Calculate the risk-neutral probability\n",
89 | "\n",
90 | " p = (np.exp(rate * dt) - d) / (u - d)\n",
91 | "\n",
92 | " # Create the binomial price tree\n",
93 | "\n",
94 | " price_tree = np.zeros((steps + 1, steps + 1))\n",
95 | " for i in range(steps + 1):\n",
96 | " price_tree[i, -1] = spot * (u ** (steps - i)) * (d**i)\n",
97 | "\n",
98 | " # Calculate the option value at each node\n",
99 | "\n",
100 | " option_tree = np.zeros_like(price_tree)\n",
101 | " if option_type.lower() == \"call\":\n",
102 | " option_tree[:, -1] = np.maximum(price_tree[:, -1] - strike, 0)\n",
103 | " elif option_type.lower() == \"put\":\n",
104 | " option_tree[:, -1] = np.maximum(strike - price_tree[:, -1], 0)\n",
105 | " else:\n",
106 | " raise ValueError(\"Option type must be either 'call' or 'put'.\")\n",
107 | "\n",
108 | " # Traverse the tree backward to find the option price today\n",
109 | "\n",
110 | " for t in range(steps - 1, -1, -1):\n",
111 | " for i in range(t + 1):\n",
112 | " exercise = 0\n",
113 | " if option_type.lower() == \"call\":\n",
114 | " exercise = price_tree[i, t] - strike\n",
115 | " elif option_type.lower() == \"put\":\n",
116 | " exercise = strike - price_tree[i, t]\n",
117 | "\n",
118 | " hold = np.exp(-rate * dt) * (\n",
119 | " p * option_tree[i, t + 1] + (1 - p) * option_tree[i + 1, t + 1]\n",
120 | " )\n",
121 | " option_tree[i, t] = np.maximum(exercise, hold)\n",
122 | "\n",
123 | " return option_tree[0, 0]"
124 | ]
125 | },
126 | {
127 | "cell_type": "markdown",
128 | "id": "cba2ebed",
129 | "metadata": {},
130 | "source": [
131 | "Estimate the price of an American Call option using the defined binomial model parameters"
132 | ]
133 | },
134 | {
135 | "cell_type": "code",
136 | "execution_count": null,
137 | "id": "e615d29c",
138 | "metadata": {},
139 | "outputs": [],
140 | "source": [
141 | "option_price = american_option_pricer(\n",
142 | " spot=55.0,\n",
143 | " strike=50.0,\n",
144 | " rate=0.05,\n",
145 | " vol=0.3,\n",
146 | " expiry=1.0,\n",
147 | " steps=100,\n",
148 | " option_type=\"Call\",\n",
149 | ")"
150 | ]
151 | },
152 | {
153 | "cell_type": "markdown",
154 | "id": "a87537d8",
155 | "metadata": {},
156 | "source": [
157 | "Print the estimated price of the American Call option"
158 | ]
159 | },
160 | {
161 | "cell_type": "code",
162 | "execution_count": null,
163 | "id": "5118d3b2",
164 | "metadata": {},
165 | "outputs": [],
166 | "source": [
167 | "print(\n",
168 | " f\"The estimated price of the American Call option is: {option_price:.2f}\"\n",
169 | ")"
170 | ]
171 | },
172 | {
173 | "cell_type": "markdown",
174 | "id": "16e10849",
175 | "metadata": {},
176 | "source": [
177 | "PyQuant News is where finance practitioners level up with Python for quant finance, algorithmic trading, and market data analysis. Looking to get started? Check out the fastest growing, top-selling course to get started with Python for quant finance. For educational purposes. Not investment advise. Use at your own risk."
178 | ]
179 | }
180 | ],
181 | "metadata": {
182 | "jupytext": {
183 | "cell_metadata_filter": "-all",
184 | "main_language": "python",
185 | "notebook_metadata_filter": "-all"
186 | }
187 | },
188 | "nbformat": 4,
189 | "nbformat_minor": 5
190 | }
191 |
--------------------------------------------------------------------------------
/41_ThetaDataStreamingStraddle.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "eabb2e65",
6 | "metadata": {},
7 | "source": [
8 | ""
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "44c499db",
14 | "metadata": {},
15 | "source": [
16 | "This code streams real-time option quotes using the ThetaData API to calculate and print straddle prices for a specific option contract. It defines a callback function to handle incoming quote messages and update the bid, ask, and mid prices of the straddle. The main function initializes the ThetaClient, connects to the stream, and requests quote updates for both call and put options. This setup is useful for traders and analysts monitoring option prices and straddle strategies in real-time."
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "id": "db51b442",
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "import datetime as dt"
27 | ]
28 | },
29 | {
30 | "cell_type": "code",
31 | "execution_count": null,
32 | "id": "54c5fb62",
33 | "metadata": {},
34 | "outputs": [],
35 | "source": [
36 | "import thetadata.client\n",
37 | "from thetadata import (\n",
38 | " Quote,\n",
39 | " StreamMsg,\n",
40 | " ThetaClient,\n",
41 | " OptionRight,\n",
42 | " StreamMsgType\n",
43 | ")\n",
44 | "from thetadata import StreamResponseType as ResponseType"
45 | ]
46 | },
47 | {
48 | "cell_type": "markdown",
49 | "id": "68ddfbd0",
50 | "metadata": {},
51 | "source": [
52 | "Initialize last call and put quotes and price variable"
53 | ]
54 | },
55 | {
56 | "cell_type": "code",
57 | "execution_count": null,
58 | "id": "14880c60",
59 | "metadata": {
60 | "lines_to_next_cell": 1
61 | },
62 | "outputs": [],
63 | "source": [
64 | "last_call_quote = Quote()\n",
65 | "last_put_quote = Quote()\n",
66 | "price = 0"
67 | ]
68 | },
69 | {
70 | "cell_type": "markdown",
71 | "id": "9b319868",
72 | "metadata": {},
73 | "source": [
74 | "Callback function to handle incoming stream messages and update straddle prices"
75 | ]
76 | },
77 | {
78 | "cell_type": "code",
79 | "execution_count": null,
80 | "id": "65f8ab69",
81 | "metadata": {
82 | "lines_to_next_cell": 1
83 | },
84 | "outputs": [],
85 | "source": [
86 | "def callback_straddle(msg):\n",
87 | " \"\"\"Handles incoming stream messages to update straddle prices.\n",
88 | " \n",
89 | " Parameters\n",
90 | " ----------\n",
91 | " msg : StreamMsg\n",
92 | " Incoming stream message containing quote data.\n",
93 | " \n",
94 | " Returns\n",
95 | " -------\n",
96 | " None\n",
97 | " \"\"\"\n",
98 | "\n",
99 | " if (msg.type != StreamMsgType.QUOTE):\n",
100 | " return\n",
101 | "\n",
102 | " if msg.contract.isCall:\n",
103 | " last_call_quote.copy_from(msg.quote)\n",
104 | " else:\n",
105 | " last_put_quote.copy_from(msg.quote)\n",
106 | "\n",
107 | " straddle_bid = round(last_call_quote.bid_price + last_put_quote.bid_price, 2)\n",
108 | " straddle_ask = round(last_call_quote.ask_price + last_put_quote.ask_price, 2)\n",
109 | " straddle_mid = round((straddle_bid + straddle_ask) / 2, 2)\n",
110 | " \n",
111 | " time_stamp = thetadata.client.ms_to_time(\n",
112 | " msg.quote.ms_of_day\n",
113 | " )\n",
114 | "\n",
115 | " if price != straddle_mid:\n",
116 | " print(\n",
117 | " f\"time: {time_stamp} bid: {straddle_bid} mid: {straddle_mid} ask: {straddle_ask}\"\n",
118 | " )\n",
119 | " price = straddle_mid"
120 | ]
121 | },
122 | {
123 | "cell_type": "markdown",
124 | "id": "09157b67",
125 | "metadata": {},
126 | "source": [
127 | "Main function to set up streaming for straddle prices"
128 | ]
129 | },
130 | {
131 | "cell_type": "code",
132 | "execution_count": null,
133 | "id": "9d963b0f",
134 | "metadata": {
135 | "lines_to_next_cell": 1
136 | },
137 | "outputs": [],
138 | "source": [
139 | "def streaming_straddle():\n",
140 | " \"\"\"Streams option quotes to calculate straddle prices in real-time.\n",
141 | " \n",
142 | " Returns\n",
143 | " -------\n",
144 | " None\n",
145 | " \"\"\"\n",
146 | " \n",
147 | " client = ThetaClient(\n",
148 | " username=\"strimp101@gmail.com\",\n",
149 | " passwd=\"kdk_fzu6pyb0UZA-yuz\"\n",
150 | " )\n",
151 | "\n",
152 | " client.connect_stream(\n",
153 | " callback_straddle\n",
154 | " )\n",
155 | " \n",
156 | " req_id_call = client.req_quote_stream_opt(\n",
157 | " \"SPY\", dt.date(2024, 3, 28), 475, OptionRight.CALL\n",
158 | " ) # Request quote updates\n",
159 | " \n",
160 | " req_id_put = client.req_quote_stream_opt(\n",
161 | " \"SPY\", dt.date(2024, 3, 28), 475, OptionRight.PUT\n",
162 | " )\n",
163 | "\n",
164 | " if (\n",
165 | " client.verify(req_id_call) != ResponseType.SUBSCRIBED\n",
166 | " or client.verify(req_id_put) != ResponseType.SUBSCRIBED\n",
167 | " ):\n",
168 | " raise Exception(\n",
169 | " \"Unable to stream contract. A standard/PRO subscription required.\"\n",
170 | " )"
171 | ]
172 | },
173 | {
174 | "cell_type": "markdown",
175 | "id": "f08a4bc5",
176 | "metadata": {},
177 | "source": [
178 | "Call the main function to start streaming"
179 | ]
180 | },
181 | {
182 | "cell_type": "code",
183 | "execution_count": null,
184 | "id": "1bfbff7c",
185 | "metadata": {},
186 | "outputs": [],
187 | "source": [
188 | "streaming_straddle()"
189 | ]
190 | },
191 | {
192 | "cell_type": "markdown",
193 | "id": "74a5b687",
194 | "metadata": {},
195 | "source": [
196 | "PyQuant News is where finance practitioners level up with Python for quant finance, algorithmic trading, and market data analysis. Looking to get started? Check out the fastest growing, top-selling course to get started with Python for quant finance. For educational purposes. Not investment advise. Use at your own risk."
197 | ]
198 | }
199 | ],
200 | "metadata": {
201 | "jupytext": {
202 | "cell_metadata_filter": "-all",
203 | "main_language": "python",
204 | "notebook_metadata_filter": "-all"
205 | }
206 | },
207 | "nbformat": 4,
208 | "nbformat_minor": 5
209 | }
210 |
--------------------------------------------------------------------------------
/47_LangChainPDF.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "6e9a4f41",
6 | "metadata": {},
7 | "source": [
8 | ""
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "fd709f5c",
14 | "metadata": {},
15 | "source": [
16 | "This code integrates LangChain library functionalities to process and query PDF documents using OpenAI's language model. It loads a PDF file, splits it into pages, and stores these pages in a vector database (ChromaDB). It then creates a toolkit to interact with the vector store and uses an agent executor to query the database based on user input. This is useful for extracting and querying information from structured documents like financial reports."
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "id": "58bade97",
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "import os"
27 | ]
28 | },
29 | {
30 | "cell_type": "code",
31 | "execution_count": null,
32 | "id": "ce47c00a",
33 | "metadata": {},
34 | "outputs": [],
35 | "source": [
36 | "from langchain.llms import OpenAI\n",
37 | "from langchain.document_loaders import PyPDFLoader\n",
38 | "from langchain.vectorstores import Chroma\n",
39 | "from langchain.agents.agent_toolkits import (\n",
40 | " create_vectorstore_agent,\n",
41 | " VectorStoreToolkit,\n",
42 | " VectorStoreInfo,\n",
43 | ")"
44 | ]
45 | },
46 | {
47 | "cell_type": "markdown",
48 | "id": "18b8f141",
49 | "metadata": {},
50 | "source": [
51 | "Set the OpenAI API key for authentication"
52 | ]
53 | },
54 | {
55 | "cell_type": "code",
56 | "execution_count": null,
57 | "id": "7a0ddde1",
58 | "metadata": {},
59 | "outputs": [],
60 | "source": [
61 | "os.environ[\"OPENAI_API_KEY\"] = \"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\""
62 | ]
63 | },
64 | {
65 | "cell_type": "markdown",
66 | "id": "9cb3ca7c",
67 | "metadata": {},
68 | "source": [
69 | "Create an instance of the OpenAI language model with specified parameters"
70 | ]
71 | },
72 | {
73 | "cell_type": "code",
74 | "execution_count": null,
75 | "id": "6ebf63b0",
76 | "metadata": {},
77 | "outputs": [],
78 | "source": [
79 | "llm = OpenAI(temperature=0.1, verbose=True)"
80 | ]
81 | },
82 | {
83 | "cell_type": "markdown",
84 | "id": "e5e50726",
85 | "metadata": {},
86 | "source": [
87 | "Initialize a PDF loader for the specified file"
88 | ]
89 | },
90 | {
91 | "cell_type": "code",
92 | "execution_count": null,
93 | "id": "681ccbf2",
94 | "metadata": {},
95 | "outputs": [],
96 | "source": [
97 | "loader = PyPDFLoader(\"apple.pdf\")"
98 | ]
99 | },
100 | {
101 | "cell_type": "markdown",
102 | "id": "41e538eb",
103 | "metadata": {},
104 | "source": [
105 | "Split the PDF into individual pages for processing"
106 | ]
107 | },
108 | {
109 | "cell_type": "code",
110 | "execution_count": null,
111 | "id": "adf3f45c",
112 | "metadata": {},
113 | "outputs": [],
114 | "source": [
115 | "pages = loader.load_and_split()"
116 | ]
117 | },
118 | {
119 | "cell_type": "markdown",
120 | "id": "fbbc170b",
121 | "metadata": {},
122 | "source": [
123 | "Load the split pages into a Chroma vector database for efficient querying"
124 | ]
125 | },
126 | {
127 | "cell_type": "code",
128 | "execution_count": null,
129 | "id": "964eaa4a",
130 | "metadata": {},
131 | "outputs": [],
132 | "source": [
133 | "store = Chroma.from_documents(pages, collection_name=\"annualreport\")"
134 | ]
135 | },
136 | {
137 | "cell_type": "markdown",
138 | "id": "dbd9b7f5",
139 | "metadata": {},
140 | "source": [
141 | "Create a VectorStoreInfo object to hold metadata about the vector store"
142 | ]
143 | },
144 | {
145 | "cell_type": "code",
146 | "execution_count": null,
147 | "id": "4fd45821",
148 | "metadata": {},
149 | "outputs": [],
150 | "source": [
151 | "vectorstore_info = VectorStoreInfo(\n",
152 | " name=\"apple\",\n",
153 | " description=\"Apple quarterly consolidated financials\",\n",
154 | " vectorstore=store,\n",
155 | ")"
156 | ]
157 | },
158 | {
159 | "cell_type": "markdown",
160 | "id": "9db55c3d",
161 | "metadata": {},
162 | "source": [
163 | "Convert the vector store information into a toolkit for LangChain"
164 | ]
165 | },
166 | {
167 | "cell_type": "code",
168 | "execution_count": null,
169 | "id": "40f81c28",
170 | "metadata": {},
171 | "outputs": [],
172 | "source": [
173 | "toolkit = VectorStoreToolkit(vectorstore_info=vectorstore_info)"
174 | ]
175 | },
176 | {
177 | "cell_type": "markdown",
178 | "id": "288a7fd0",
179 | "metadata": {},
180 | "source": [
181 | "Create an agent executor that uses the language model and toolkit for querying"
182 | ]
183 | },
184 | {
185 | "cell_type": "code",
186 | "execution_count": null,
187 | "id": "2bee8094",
188 | "metadata": {},
189 | "outputs": [],
190 | "source": [
191 | "agent_executor = create_vectorstore_agent(llm=llm, toolkit=toolkit, verbose=True)"
192 | ]
193 | },
194 | {
195 | "cell_type": "markdown",
196 | "id": "961d1da5",
197 | "metadata": {},
198 | "source": [
199 | "Prompt the user to enter a search term for querying the document"
200 | ]
201 | },
202 | {
203 | "cell_type": "code",
204 | "execution_count": null,
205 | "id": "445ea906",
206 | "metadata": {},
207 | "outputs": [],
208 | "source": [
209 | "prompt = input(\"Enter your search term: \")"
210 | ]
211 | },
212 | {
213 | "cell_type": "code",
214 | "execution_count": null,
215 | "id": "c6b36ac3",
216 | "metadata": {},
217 | "outputs": [],
218 | "source": [
219 | "if prompt:\n",
220 | "\n",
221 | " # Pass the user input to the agent executor for processing\n",
222 | "\n",
223 | " response = agent_executor.run(prompt)\n",
224 | "\n",
225 | " # Print the response from the language model to the screen\n",
226 | "\n",
227 | " print(response)"
228 | ]
229 | },
230 | {
231 | "cell_type": "markdown",
232 | "id": "8a4cac29",
233 | "metadata": {},
234 | "source": [
235 | "PyQuant News is where finance practitioners level up with Python for quant finance, algorithmic trading, and market data analysis. Looking to get started? Check out the fastest growing, top-selling course to get started with Python for quant finance. For educational purposes. Not investment advise. Use at your own risk."
236 | ]
237 | }
238 | ],
239 | "metadata": {
240 | "jupytext": {
241 | "cell_metadata_filter": "-all",
242 | "main_language": "python",
243 | "notebook_metadata_filter": "-all"
244 | }
245 | },
246 | "nbformat": 4,
247 | "nbformat_minor": 5
248 | }
249 |
--------------------------------------------------------------------------------
/48_KMeans.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "c60b36d4",
6 | "metadata": {},
7 | "source": [
8 | ""
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "b0709d1d",
14 | "metadata": {},
15 | "source": [
16 | "This code performs k-means clustering on the Dow Jones Industrial Average (DJIA) stock data from 2020 to 2022. It extracts historical stock prices, calculates returns and volatility, and then clusters the stocks based on these metrics. The 'Elbow Method' is used to determine the optimal number of clusters. Finally, it visualizes the clusters with a scatter plot, annotating each stock with its cluster label. This is useful for identifying patterns or groupings in stock performance metrics."
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "id": "09e34b5c",
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "from math import sqrt"
27 | ]
28 | },
29 | {
30 | "cell_type": "code",
31 | "execution_count": null,
32 | "id": "d73dcf13",
33 | "metadata": {},
34 | "outputs": [],
35 | "source": [
36 | "import pandas as pd\n",
37 | "from sklearn.cluster import KMeans\n",
38 | "import matplotlib.pyplot as plt\n",
39 | "plt.rc(\"font\", size=10)"
40 | ]
41 | },
42 | {
43 | "cell_type": "code",
44 | "execution_count": null,
45 | "id": "da61cf6b",
46 | "metadata": {},
47 | "outputs": [],
48 | "source": [
49 | "from openbb_terminal.sdk import openbb"
50 | ]
51 | },
52 | {
53 | "cell_type": "markdown",
54 | "id": "2c671ff3",
55 | "metadata": {},
56 | "source": [
57 | "Fetch the list of Dow Jones Industrial Average (DJIA) component symbols from Wikipedia"
58 | ]
59 | },
60 | {
61 | "cell_type": "code",
62 | "execution_count": null,
63 | "id": "b0dcdce9",
64 | "metadata": {},
65 | "outputs": [],
66 | "source": [
67 | "dji = (\n",
68 | " pd.read_html('https://en.wikipedia.org/wiki/Dow_Jones_Industrial_Average')[1]\n",
69 | ")\n",
70 | "symbols = dji.Symbol.tolist()"
71 | ]
72 | },
73 | {
74 | "cell_type": "markdown",
75 | "id": "7140d32a",
76 | "metadata": {},
77 | "source": [
78 | "Download historical stock price data for the DJIA components using OpenBB SDK"
79 | ]
80 | },
81 | {
82 | "cell_type": "code",
83 | "execution_count": null,
84 | "id": "82f64558",
85 | "metadata": {},
86 | "outputs": [],
87 | "source": [
88 | "data = openbb.stocks.ca.hist(\n",
89 | " symbols, \n",
90 | " start_date=\"2020-01-01\",\n",
91 | " end_date=\"2022-12-31\"\n",
92 | ")"
93 | ]
94 | },
95 | {
96 | "cell_type": "markdown",
97 | "id": "08436f25",
98 | "metadata": {},
99 | "source": [
100 | "Calculate annualized returns and volatility for each stock in the dataset"
101 | ]
102 | },
103 | {
104 | "cell_type": "code",
105 | "execution_count": null,
106 | "id": "c3306d91",
107 | "metadata": {},
108 | "outputs": [],
109 | "source": [
110 | "moments = (\n",
111 | " data\n",
112 | " .pct_change()\n",
113 | " .describe()\n",
114 | " .T[[\"mean\", \"std\"]]\n",
115 | " .rename(columns={\"mean\": \"returns\", \"std\": \"vol\"})\n",
116 | ") * [252, sqrt(252)]"
117 | ]
118 | },
119 | {
120 | "cell_type": "markdown",
121 | "id": "6b3b12c5",
122 | "metadata": {},
123 | "source": [
124 | "Compute sum of squared errors (SSE) for k-means clustering with different cluster counts to use the Elbow Method for optimal k determination. SSE helps identify the point where adding more clusters doesn't significantly improve the model."
125 | ]
126 | },
127 | {
128 | "cell_type": "code",
129 | "execution_count": null,
130 | "id": "da6607e8",
131 | "metadata": {},
132 | "outputs": [],
133 | "source": [
134 | "sse = []\n",
135 | "for k in range(2, 15):\n",
136 | " kmeans = KMeans(n_clusters=k, n_init=10)\n",
137 | " kmeans.fit(moments)\n",
138 | " sse.append(kmeans.inertia_)"
139 | ]
140 | },
141 | {
142 | "cell_type": "code",
143 | "execution_count": null,
144 | "id": "4853d749",
145 | "metadata": {},
146 | "outputs": [],
147 | "source": [
148 | "plt.plot(range(2, 15), sse)\n",
149 | "plt.title(\"Elbow Curve\");"
150 | ]
151 | },
152 | {
153 | "cell_type": "markdown",
154 | "id": "8d4b74d2",
155 | "metadata": {},
156 | "source": [
157 | "Perform k-means clustering with 5 clusters on the calculated returns and volatility metrics. Visualize the clusters in a scatter plot and annotate each stock with its cluster label for easy identification."
158 | ]
159 | },
160 | {
161 | "cell_type": "code",
162 | "execution_count": null,
163 | "id": "cab4e3bd",
164 | "metadata": {},
165 | "outputs": [],
166 | "source": [
167 | "kmeans = KMeans(n_clusters=5, n_init=10).fit(moments)\n",
168 | "plt.scatter(\n",
169 | " moments.returns, \n",
170 | " moments.vol, \n",
171 | " c=kmeans.labels_, \n",
172 | " cmap=\"rainbow\",\n",
173 | ");"
174 | ]
175 | },
176 | {
177 | "cell_type": "code",
178 | "execution_count": null,
179 | "id": "9e0d3176",
180 | "metadata": {},
181 | "outputs": [],
182 | "source": [
183 | "plt.title(\"Dow Jones stocks by return and volatility (K=5)\")\n",
184 | "for i in range(len(moments.index)):\n",
185 | " txt = f\"{moments.index[i]} ({kmeans.labels_[i]})\"\n",
186 | " xy = tuple(moments.iloc[i, :] + [0, 0.01])\n",
187 | " plt.annotate(txt, xy)"
188 | ]
189 | },
190 | {
191 | "cell_type": "code",
192 | "execution_count": null,
193 | "id": "48ae8500",
194 | "metadata": {},
195 | "outputs": [],
196 | "source": []
197 | },
198 | {
199 | "cell_type": "markdown",
200 | "id": "5b962026",
201 | "metadata": {},
202 | "source": [
203 | "PyQuant News is where finance practitioners level up with Python for quant finance, algorithmic trading, and market data analysis. Looking to get started? Check out the fastest growing, top-selling course to get started with Python for quant finance. For educational purposes. Not investment advise. Use at your own risk."
204 | ]
205 | }
206 | ],
207 | "metadata": {
208 | "jupytext": {
209 | "cell_metadata_filter": "-all",
210 | "main_language": "python",
211 | "notebook_metadata_filter": "-all"
212 | }
213 | },
214 | "nbformat": 4,
215 | "nbformat_minor": 5
216 | }
217 |
--------------------------------------------------------------------------------
/4_VolatilityCones.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "06df379f",
6 | "metadata": {},
7 | "source": [
8 | ""
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "c008466a",
14 | "metadata": {},
15 | "source": [
16 | "This code downloads historical stock data for JPMorgan Chase & Co. (JPM) from Yahoo Finance, covering the year 2020. It calculates realized volatility over various rolling windows (30, 60, 90, 120 days) and analyzes the maximum, minimum, top quantile, median, and bottom quantile of these volatilities. The results are visualized using Matplotlib to show the volatility distribution over different windows. This is useful for understanding the stock's risk profile over different time frames."
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "id": "373db843",
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "import math\n",
27 | "import yfinance as yf\n",
28 | "import numpy as np\n",
29 | "import matplotlib.pyplot as plt"
30 | ]
31 | },
32 | {
33 | "cell_type": "markdown",
34 | "id": "93871d13",
35 | "metadata": {},
36 | "source": [
37 | "Download historical stock data for JPM from Yahoo Finance for the year 2020"
38 | ]
39 | },
40 | {
41 | "cell_type": "code",
42 | "execution_count": null,
43 | "id": "f9af1afc",
44 | "metadata": {},
45 | "outputs": [],
46 | "source": [
47 | "data = yf.download(\"JPM\", start=\"2020-01-01\", end=\"2020-12-31\")"
48 | ]
49 | },
50 | {
51 | "cell_type": "markdown",
52 | "id": "67429a4d",
53 | "metadata": {},
54 | "source": [
55 | "Define rolling windows and quantiles to analyze"
56 | ]
57 | },
58 | {
59 | "cell_type": "code",
60 | "execution_count": null,
61 | "id": "a4139b06",
62 | "metadata": {},
63 | "outputs": [],
64 | "source": [
65 | "windows = [30, 60, 90, 120]\n",
66 | "quantiles = [0.25, 0.75]"
67 | ]
68 | },
69 | {
70 | "cell_type": "markdown",
71 | "id": "2b183370",
72 | "metadata": {},
73 | "source": [
74 | "Initialize lists to store maximum, minimum, quantiles, median, and realized volatilities"
75 | ]
76 | },
77 | {
78 | "cell_type": "code",
79 | "execution_count": null,
80 | "id": "3dbec930",
81 | "metadata": {
82 | "lines_to_next_cell": 1
83 | },
84 | "outputs": [],
85 | "source": [
86 | "max_ = []\n",
87 | "min_ = []\n",
88 | "top_q = []\n",
89 | "median = []\n",
90 | "bottom_q = []\n",
91 | "realized = []"
92 | ]
93 | },
94 | {
95 | "cell_type": "markdown",
96 | "id": "3c72c004",
97 | "metadata": {},
98 | "source": [
99 | "Calculate realized volatility using rolling windows"
100 | ]
101 | },
102 | {
103 | "cell_type": "code",
104 | "execution_count": null,
105 | "id": "8f7c79c2",
106 | "metadata": {
107 | "lines_to_next_cell": 1
108 | },
109 | "outputs": [],
110 | "source": [
111 | "def realized_vol(price_data, window=30):\n",
112 | " \"\"\"\n",
113 | " Calculates realized volatility over a rolling window\n",
114 | " \n",
115 | " Parameters\n",
116 | " ----------\n",
117 | " price_data : pd.DataFrame\n",
118 | " DataFrame containing stock price data\n",
119 | " window : int\n",
120 | " Rolling window size in days\n",
121 | " \n",
122 | " Returns\n",
123 | " -------\n",
124 | " realized_vol : pd.Series\n",
125 | " Series containing realized volatility values\n",
126 | " \"\"\"\n",
127 | " \n",
128 | " # Compute log returns from closing prices\n",
129 | " log_return = (price_data[\"Close\"] / price_data[\"Close\"].shift(1)).apply(np.log)\n",
130 | " \n",
131 | " # Compute rolling standard deviation of log returns and annualize it\n",
132 | " return log_return.rolling(window=window, center=False).std() * math.sqrt(252)"
133 | ]
134 | },
135 | {
136 | "cell_type": "markdown",
137 | "id": "0d965c51",
138 | "metadata": {},
139 | "source": [
140 | "Loop over each window to calculate and store volatility metrics"
141 | ]
142 | },
143 | {
144 | "cell_type": "code",
145 | "execution_count": null,
146 | "id": "ede0e0e3",
147 | "metadata": {},
148 | "outputs": [],
149 | "source": [
150 | "for window in windows:\n",
151 | "\n",
152 | " # Calculate realized volatility for the current window\n",
153 | " estimator = realized_vol(window=window, price_data=data)\n",
154 | "\n",
155 | " # Append the maximum realized volatility to the list\n",
156 | " max_.append(estimator.max())\n",
157 | "\n",
158 | " # Append the top quantile realized volatility to the list\n",
159 | " top_q.append(estimator.quantile(quantiles[1]))\n",
160 | "\n",
161 | " # Append the median realized volatility to the list\n",
162 | " median.append(estimator.median())\n",
163 | "\n",
164 | " # Append the bottom quantile realized volatility to the list\n",
165 | " bottom_q.append(estimator.quantile(quantiles[0]))\n",
166 | "\n",
167 | " # Append the minimum realized volatility to the list\n",
168 | " min_.append(estimator.min())\n",
169 | "\n",
170 | " # Append the last realized volatility to the list\n",
171 | " realized.append(estimator[-1])"
172 | ]
173 | },
174 | {
175 | "cell_type": "markdown",
176 | "id": "ab0bf7e1",
177 | "metadata": {},
178 | "source": [
179 | "Plot the realized volatility metrics for different rolling windows"
180 | ]
181 | },
182 | {
183 | "cell_type": "code",
184 | "execution_count": null,
185 | "id": "73b648d5",
186 | "metadata": {},
187 | "outputs": [],
188 | "source": [
189 | "plt.plot(windows, max_, \"-o\", linewidth=1, label=\"Max\")\n",
190 | "plt.plot(windows, top_q, \"-o\", linewidth=1, label=f\"{quantiles[1] * 100:.0f} Prctl\")\n",
191 | "plt.plot(windows, median, \"-o\", linewidth=1, label=\"Median\")\n",
192 | "plt.plot(windows, bottom_q, \"-o\", linewidth=1, label=f\"{quantiles[0] * 100:.0f} Prctl\")\n",
193 | "plt.plot(windows, min_, \"-o\", linewidth=1, label=\"Min\")"
194 | ]
195 | },
196 | {
197 | "cell_type": "markdown",
198 | "id": "9aa0f64d",
199 | "metadata": {},
200 | "source": [
201 | "Plot the realized volatility calculated from the latest window"
202 | ]
203 | },
204 | {
205 | "cell_type": "code",
206 | "execution_count": null,
207 | "id": "ba1919d4",
208 | "metadata": {},
209 | "outputs": [],
210 | "source": [
211 | "plt.plot(windows, realized, \"ro-.\", linewidth=1, label=\"Realized\")\n",
212 | "plt.xticks(windows)\n",
213 | "plt.legend(loc=\"upper center\", bbox_to_anchor=(0.5, -0.1), ncol=3)"
214 | ]
215 | },
216 | {
217 | "cell_type": "markdown",
218 | "id": "6a4dfc4b",
219 | "metadata": {},
220 | "source": [
221 | "Plot the closing prices of the stock data"
222 | ]
223 | },
224 | {
225 | "cell_type": "code",
226 | "execution_count": null,
227 | "id": "b959f4c8",
228 | "metadata": {},
229 | "outputs": [],
230 | "source": [
231 | "data.Close.plot()"
232 | ]
233 | },
234 | {
235 | "cell_type": "markdown",
236 | "id": "011cc534",
237 | "metadata": {},
238 | "source": [
239 | "PyQuant News is where finance practitioners level up with Python for quant finance, algorithmic trading, and market data analysis. Looking to get started? Check out the fastest growing, top-selling course to get started with Python for quant finance. For educational purposes. Not investment advise. Use at your own risk."
240 | ]
241 | }
242 | ],
243 | "metadata": {
244 | "jupytext": {
245 | "cell_metadata_filter": "-all",
246 | "main_language": "python",
247 | "notebook_metadata_filter": "-all"
248 | }
249 | },
250 | "nbformat": 4,
251 | "nbformat_minor": 5
252 | }
253 |
--------------------------------------------------------------------------------
/51_IVTermStructureSkew.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "0f1af07e",
6 | "metadata": {},
7 | "source": [
8 | ""
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "da69000c",
14 | "metadata": {},
15 | "source": [
16 | "This code uses the OpenBB Terminal SDK to analyze option chains for the SPY ETF. It fetches option chains and expiration dates from the Yahoo Finance source, and identifies the at-the-money (ATM) strike price for SPY options. It then plots the implied volatility (IV) term structure for ATM call options and the IV skew for call options with a specific expiration date. This is useful for visualizing how implied volatility varies across different strike prices and expiration dates, aiding in options trading strategies."
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "id": "efc8d575",
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "from openbb_terminal.sdk import openbb"
27 | ]
28 | },
29 | {
30 | "cell_type": "code",
31 | "execution_count": null,
32 | "id": "f875e54b",
33 | "metadata": {},
34 | "outputs": [],
35 | "source": [
36 | "import matplotlib.pyplot as plt\n",
37 | "plt.rc(\"font\", size=10)"
38 | ]
39 | },
40 | {
41 | "cell_type": "markdown",
42 | "id": "7d7114f0",
43 | "metadata": {},
44 | "source": [
45 | "Fetch option chains for SPY from Yahoo Finance"
46 | ]
47 | },
48 | {
49 | "cell_type": "code",
50 | "execution_count": null,
51 | "id": "d7b96161",
52 | "metadata": {},
53 | "outputs": [],
54 | "source": [
55 | "chains = openbb.stocks.options.chains(\n",
56 | " symbol=\"SPY\", \n",
57 | " source=\"YahooFinance\"\n",
58 | ")"
59 | ]
60 | },
61 | {
62 | "cell_type": "markdown",
63 | "id": "3d05e985",
64 | "metadata": {},
65 | "source": [
66 | "Fetch expiration dates for SPY options"
67 | ]
68 | },
69 | {
70 | "cell_type": "code",
71 | "execution_count": null,
72 | "id": "fb360b93",
73 | "metadata": {},
74 | "outputs": [],
75 | "source": [
76 | "expirations = openbb.stocks.options.expirations(\"SPY\")"
77 | ]
78 | },
79 | {
80 | "cell_type": "markdown",
81 | "id": "318a8b30",
82 | "metadata": {},
83 | "source": [
84 | "Retrieve the last adjusted closing price for SPY"
85 | ]
86 | },
87 | {
88 | "cell_type": "code",
89 | "execution_count": null,
90 | "id": "1b027826",
91 | "metadata": {},
92 | "outputs": [],
93 | "source": [
94 | "last = (\n",
95 | " openbb\n",
96 | " .stocks\n",
97 | " .load(\"SPY\")\n",
98 | " .iloc[-1][\"Adj Close\"]\n",
99 | ")"
100 | ]
101 | },
102 | {
103 | "cell_type": "code",
104 | "execution_count": null,
105 | "id": "cdf8e3c2",
106 | "metadata": {},
107 | "outputs": [],
108 | "source": [
109 | "last"
110 | ]
111 | },
112 | {
113 | "cell_type": "markdown",
114 | "id": "d17254b4",
115 | "metadata": {},
116 | "source": [
117 | "Identify the index of the strike price closest to the last adjusted closing price"
118 | ]
119 | },
120 | {
121 | "cell_type": "code",
122 | "execution_count": null,
123 | "id": "a0063ef3",
124 | "metadata": {},
125 | "outputs": [],
126 | "source": [
127 | "idx = (\n",
128 | " (chains.strike - last)\n",
129 | " .abs()\n",
130 | " .sort_values()\n",
131 | " .index[0]\n",
132 | ")"
133 | ]
134 | },
135 | {
136 | "cell_type": "markdown",
137 | "id": "6ef7cbf1",
138 | "metadata": {},
139 | "source": [
140 | "Retrieve the at-the-money (ATM) strike price"
141 | ]
142 | },
143 | {
144 | "cell_type": "code",
145 | "execution_count": null,
146 | "id": "7432cf1b",
147 | "metadata": {},
148 | "outputs": [],
149 | "source": [
150 | "atm_strike = (\n",
151 | " chains\n",
152 | " .iloc[idx]\n",
153 | " .strike\n",
154 | ")"
155 | ]
156 | },
157 | {
158 | "cell_type": "markdown",
159 | "id": "00624270",
160 | "metadata": {},
161 | "source": [
162 | "Filter the option chains to get only the ATM call options"
163 | ]
164 | },
165 | {
166 | "cell_type": "code",
167 | "execution_count": null,
168 | "id": "9fbe0cd8",
169 | "metadata": {},
170 | "outputs": [],
171 | "source": [
172 | "calls = (\n",
173 | " chains[\n",
174 | " (chains.strike == atm_strike) \n",
175 | " & (chains.optionType == \"call\")\n",
176 | " ]\n",
177 | ")"
178 | ]
179 | },
180 | {
181 | "cell_type": "markdown",
182 | "id": "17ad09bb",
183 | "metadata": {},
184 | "source": [
185 | "Plot the implied volatility term structure for ATM call options"
186 | ]
187 | },
188 | {
189 | "cell_type": "code",
190 | "execution_count": null,
191 | "id": "a776685a",
192 | "metadata": {},
193 | "outputs": [],
194 | "source": [
195 | "(\n",
196 | " calls\n",
197 | " .set_index(\"expiration\")\n",
198 | " .impliedVolatility.plot(title=\"IV term structure for ATM call options\")\n",
199 | ")"
200 | ]
201 | },
202 | {
203 | "cell_type": "markdown",
204 | "id": "742dc7cf",
205 | "metadata": {},
206 | "source": [
207 | "Filter the option chains to get call options expiring on a specific date"
208 | ]
209 | },
210 | {
211 | "cell_type": "code",
212 | "execution_count": null,
213 | "id": "ea22fc4c",
214 | "metadata": {},
215 | "outputs": [],
216 | "source": [
217 | "calls = (\n",
218 | " chains[\n",
219 | " (chains.expiration == expirations[4]) \n",
220 | " & (chains.optionType == \"call\")\n",
221 | " ]\n",
222 | ")"
223 | ]
224 | },
225 | {
226 | "cell_type": "markdown",
227 | "id": "40ba8471",
228 | "metadata": {},
229 | "source": [
230 | "Plot the implied volatility skew for call options with the specified expiration date"
231 | ]
232 | },
233 | {
234 | "cell_type": "code",
235 | "execution_count": null,
236 | "id": "f2219e7a",
237 | "metadata": {},
238 | "outputs": [],
239 | "source": [
240 | "(\n",
241 | " calls\n",
242 | " .set_index(\"strike\")\n",
243 | " .impliedVolatility\n",
244 | " .plot(title=f\"IV term structure for call options expiring {expirations[1]}\")\n",
245 | ")"
246 | ]
247 | },
248 | {
249 | "cell_type": "markdown",
250 | "id": "7c4f7488",
251 | "metadata": {},
252 | "source": [
253 | "PyQuant News is where finance practitioners level up with Python for quant finance, algorithmic trading, and market data analysis. Looking to get started? Check out the fastest growing, top-selling course to get started with Python for quant finance. For educational purposes. Not investment advise. Use at your own risk."
254 | ]
255 | }
256 | ],
257 | "metadata": {
258 | "jupytext": {
259 | "cell_metadata_filter": "-all",
260 | "main_language": "python",
261 | "notebook_metadata_filter": "-all"
262 | },
263 | "kernelspec": {
264 | "display_name": "Python 3 (ipykernel)",
265 | "language": "python",
266 | "name": "python3"
267 | },
268 | "language_info": {
269 | "codemirror_mode": {
270 | "name": "ipython",
271 | "version": 3
272 | },
273 | "file_extension": ".py",
274 | "mimetype": "text/x-python",
275 | "name": "python",
276 | "nbconvert_exporter": "python",
277 | "pygments_lexer": "ipython3",
278 | "version": "3.10.13"
279 | }
280 | },
281 | "nbformat": 4,
282 | "nbformat_minor": 5
283 | }
284 |
--------------------------------------------------------------------------------
/54_BlackLitterman.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "63831470",
6 | "metadata": {},
7 | "source": [
8 | ""
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "535538d4",
14 | "metadata": {},
15 | "source": [
16 | "This code utilizes the Black-Litterman model to incorporate subjective views into the portfolio optimization process. It fetches price data for selected assets, constructs a covariance matrix, and sets up absolute views on asset returns. The Black-Litterman model is then used to compute new expected returns, followed by the construction of an efficient frontier. This approach helps in creating a more informed and optimized portfolio by blending market equilibrium with investor views."
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "id": "30dc6f4f",
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "import numpy as np"
27 | ]
28 | },
29 | {
30 | "cell_type": "code",
31 | "execution_count": null,
32 | "id": "6c1d4af5",
33 | "metadata": {},
34 | "outputs": [],
35 | "source": [
36 | "from pypfopt.black_litterman import BlackLittermanModel\n",
37 | "from pypfopt.efficient_frontier import EfficientFrontier\n",
38 | "from pypfopt import risk_models, plotting"
39 | ]
40 | },
41 | {
42 | "cell_type": "code",
43 | "execution_count": null,
44 | "id": "293a45be",
45 | "metadata": {},
46 | "outputs": [],
47 | "source": [
48 | "from openbb_terminal.sdk import openbb\n",
49 | "import seaborn as sns\n",
50 | "sns.set_theme()"
51 | ]
52 | },
53 | {
54 | "cell_type": "markdown",
55 | "id": "60664575",
56 | "metadata": {},
57 | "source": [
58 | "Fetch price data for selected assets from the OpenBB terminal"
59 | ]
60 | },
61 | {
62 | "cell_type": "code",
63 | "execution_count": null,
64 | "id": "02c6c09c",
65 | "metadata": {},
66 | "outputs": [],
67 | "source": [
68 | "prices = openbb.economy.index([\"AAPL\", \"BBY\", \"BAC\", \"SBUX\", \"T\"])"
69 | ]
70 | },
71 | {
72 | "cell_type": "markdown",
73 | "id": "9081783b",
74 | "metadata": {},
75 | "source": [
76 | "Define absolute views on the expected returns for specific assets"
77 | ]
78 | },
79 | {
80 | "cell_type": "code",
81 | "execution_count": null,
82 | "id": "5d9cb1ca",
83 | "metadata": {},
84 | "outputs": [],
85 | "source": [
86 | "viewdict = {\n",
87 | " \"AAPL\": 0.20, \n",
88 | " \"BBY\": 0.30,\n",
89 | " \"BAC\": 0.10,\n",
90 | " \"SBUX\": 0.2,\n",
91 | " \"T\": 0.15\n",
92 | "}"
93 | ]
94 | },
95 | {
96 | "cell_type": "markdown",
97 | "id": "a3a6f94a",
98 | "metadata": {},
99 | "source": [
100 | "Construct the sample covariance matrix using historical price data"
101 | ]
102 | },
103 | {
104 | "cell_type": "code",
105 | "execution_count": null,
106 | "id": "2d0e2e01",
107 | "metadata": {},
108 | "outputs": [],
109 | "source": [
110 | "cov_matrix = risk_models.sample_cov(prices)"
111 | ]
112 | },
113 | {
114 | "cell_type": "markdown",
115 | "id": "500bebeb",
116 | "metadata": {},
117 | "source": [
118 | "Initialize the Black-Litterman model with equal weight priors and absolute views"
119 | ]
120 | },
121 | {
122 | "cell_type": "code",
123 | "execution_count": null,
124 | "id": "6db6f3b2",
125 | "metadata": {},
126 | "outputs": [],
127 | "source": [
128 | "bl = BlackLittermanModel(\n",
129 | " cov_matrix, \n",
130 | " absolute_views=viewdict,\n",
131 | " pi=\"equal\"\n",
132 | ")"
133 | ]
134 | },
135 | {
136 | "cell_type": "markdown",
137 | "id": "b3ac9e80",
138 | "metadata": {},
139 | "source": [
140 | "Compute the implied expected returns using the Black-Litterman model and initialize Efficient Frontier"
141 | ]
142 | },
143 | {
144 | "cell_type": "code",
145 | "execution_count": null,
146 | "id": "024947c8",
147 | "metadata": {},
148 | "outputs": [],
149 | "source": [
150 | "rets = bl.bl_returns()\n",
151 | "ef = EfficientFrontier(rets, cov_matrix)"
152 | ]
153 | },
154 | {
155 | "cell_type": "markdown",
156 | "id": "644d3dec",
157 | "metadata": {},
158 | "source": [
159 | "Plot the efficient frontier showing the possible portfolios"
160 | ]
161 | },
162 | {
163 | "cell_type": "code",
164 | "execution_count": null,
165 | "id": "86ea1335",
166 | "metadata": {},
167 | "outputs": [],
168 | "source": [
169 | "plotting.plot_efficient_frontier(ef, show_tickers=True)"
170 | ]
171 | },
172 | {
173 | "cell_type": "markdown",
174 | "id": "aba68864",
175 | "metadata": {},
176 | "source": [
177 | "Calculate and display the optimal weights for the portfolio"
178 | ]
179 | },
180 | {
181 | "cell_type": "code",
182 | "execution_count": null,
183 | "id": "ef1c2d68",
184 | "metadata": {},
185 | "outputs": [],
186 | "source": [
187 | "bl.bl_weights()"
188 | ]
189 | },
190 | {
191 | "cell_type": "markdown",
192 | "id": "8bbae896",
193 | "metadata": {},
194 | "source": [
195 | "PyQuant News is where finance practitioners level up with Python for quant finance, algorithmic trading, and market data analysis. Looking to get started? Check out the fastest growing, top-selling course to get started with Python for quant finance. For educational purposes. Not investment advise. Use at your own risk."
196 | ]
197 | }
198 | ],
199 | "metadata": {
200 | "jupytext": {
201 | "cell_metadata_filter": "-all",
202 | "main_language": "python",
203 | "notebook_metadata_filter": "-all"
204 | }
205 | },
206 | "nbformat": 4,
207 | "nbformat_minor": 5
208 | }
209 |
--------------------------------------------------------------------------------
/55_DownsideDeviation.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "8addf00f",
6 | "metadata": {},
7 | "source": [
8 | ""
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "e5202c9f",
14 | "metadata": {},
15 | "source": [
16 | "This code calculates the downside deviation of stock returns for Apple Inc. (AAPL).\n",
17 | "It imports necessary libraries and loads historical adjusted closing prices.\n",
18 | "The downside deviation is computed to measure risk by focusing on negative returns.\n",
19 | "This metric is useful for investors aiming to quantify the volatility of negative returns.\n",
20 | "The downside deviation is annualized for practical financial analysis."
21 | ]
22 | },
23 | {
24 | "cell_type": "code",
25 | "execution_count": null,
26 | "id": "eba3a353",
27 | "metadata": {},
28 | "outputs": [],
29 | "source": [
30 | "import numpy as np"
31 | ]
32 | },
33 | {
34 | "cell_type": "code",
35 | "execution_count": null,
36 | "id": "cf45199f",
37 | "metadata": {},
38 | "outputs": [],
39 | "source": [
40 | "from openbb_terminal.sdk import openbb"
41 | ]
42 | },
43 | {
44 | "cell_type": "markdown",
45 | "id": "fee9bac4",
46 | "metadata": {},
47 | "source": [
48 | "Load historical adjusted closing prices for Apple Inc. (AAPL)"
49 | ]
50 | },
51 | {
52 | "cell_type": "code",
53 | "execution_count": null,
54 | "id": "0ccc7c3f",
55 | "metadata": {},
56 | "outputs": [],
57 | "source": [
58 | "data = openbb.stocks.load(\"AAPL\")"
59 | ]
60 | },
61 | {
62 | "cell_type": "markdown",
63 | "id": "98ac41af",
64 | "metadata": {},
65 | "source": [
66 | "Calculate daily percentage change in adjusted closing prices to obtain returns"
67 | ]
68 | },
69 | {
70 | "cell_type": "code",
71 | "execution_count": null,
72 | "id": "f39cd038",
73 | "metadata": {
74 | "lines_to_next_cell": 1
75 | },
76 | "outputs": [],
77 | "source": [
78 | "returns = data[\"Adj Close\"].pct_change()"
79 | ]
80 | },
81 | {
82 | "cell_type": "markdown",
83 | "id": "7e8c9c93",
84 | "metadata": {},
85 | "source": [
86 | "Calculate the downside deviation of the returns"
87 | ]
88 | },
89 | {
90 | "cell_type": "code",
91 | "execution_count": null,
92 | "id": "beb978c6",
93 | "metadata": {
94 | "lines_to_next_cell": 1
95 | },
96 | "outputs": [],
97 | "source": [
98 | "def downside_deviation(returns):\n",
99 | " \"\"\"Calculate downside deviation of returns\n",
100 | " \n",
101 | " Parameters\n",
102 | " ----------\n",
103 | " returns : np.ndarray\n",
104 | " Array of daily percentage returns\n",
105 | " \n",
106 | " Returns\n",
107 | " -------\n",
108 | " downside_deviation : float\n",
109 | " Annualized downside deviation of returns\n",
110 | " \n",
111 | " Notes\n",
112 | " -----\n",
113 | " This function calculates the downside deviation, \n",
114 | " which measures the volatility of negative returns. \n",
115 | " It annualizes the deviation for practical financial analysis.\n",
116 | " \"\"\"\n",
117 | "\n",
118 | " # Initialize an empty array to store downside deviation values\n",
119 | "\n",
120 | " out = np.empty(returns.shape[1:])\n",
121 | "\n",
122 | " # Clip returns at zero to focus on negative returns\n",
123 | "\n",
124 | " downside_diff = np.clip(returns, np.NINF, 0)\n",
125 | "\n",
126 | " # Square the clipped values to calculate the squared deviations\n",
127 | "\n",
128 | " np.square(downside_diff, out=downside_diff)\n",
129 | "\n",
130 | " # Calculate the mean of squared deviations ignoring NaNs\n",
131 | "\n",
132 | " np.nanmean(downside_diff, axis=0, out=out)\n",
133 | "\n",
134 | " # Take the square root of the mean squared deviations\n",
135 | "\n",
136 | " np.sqrt(out, out=out)\n",
137 | "\n",
138 | " # Annualize the downside deviation by multiplying by the square root of 252\n",
139 | "\n",
140 | " np.multiply(out, np.sqrt(252), out=out)\n",
141 | "\n",
142 | " # Return the annualized downside deviation as a single value\n",
143 | "\n",
144 | " return out.item()"
145 | ]
146 | },
147 | {
148 | "cell_type": "markdown",
149 | "id": "28a2ce36",
150 | "metadata": {},
151 | "source": [
152 | "Calculate and output the downside deviation of the returns"
153 | ]
154 | },
155 | {
156 | "cell_type": "code",
157 | "execution_count": null,
158 | "id": "470ebf6d",
159 | "metadata": {},
160 | "outputs": [],
161 | "source": [
162 | "downside_deviation(returns)"
163 | ]
164 | },
165 | {
166 | "cell_type": "markdown",
167 | "id": "f26cc8cd",
168 | "metadata": {},
169 | "source": [
170 | "Calculate the annualized standard deviation of returns for comparison"
171 | ]
172 | },
173 | {
174 | "cell_type": "code",
175 | "execution_count": null,
176 | "id": "fb6d47b4",
177 | "metadata": {},
178 | "outputs": [],
179 | "source": [
180 | "np.sqrt(np.square(returns).mean()) * np.sqrt(252)"
181 | ]
182 | },
183 | {
184 | "cell_type": "markdown",
185 | "id": "fa64ff0c",
186 | "metadata": {},
187 | "source": [
188 | "PyQuant News is where finance practitioners level up with Python for quant finance, algorithmic trading, and market data analysis. Looking to get started? Check out the fastest growing, top-selling course to get started with Python for quant finance. For educational purposes. Not investment advise. Use at your own risk."
189 | ]
190 | }
191 | ],
192 | "metadata": {
193 | "jupytext": {
194 | "cell_metadata_filter": "-all",
195 | "main_language": "python",
196 | "notebook_metadata_filter": "-all"
197 | }
198 | },
199 | "nbformat": 4,
200 | "nbformat_minor": 5
201 | }
202 |
--------------------------------------------------------------------------------
/56_UpsideCapture.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "7b39d6ef",
6 | "metadata": {},
7 | "source": [
8 | ""
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "09b6a2a7",
14 | "metadata": {},
15 | "source": [
16 | "This code fetches historical stock prices for AAPL, WMT, and SPY, calculates daily returns, and constructs a portfolio. It defines functions to compute the annual return and upside capture ratio of the portfolio compared to a benchmark. The upside capture ratio measures the portfolio's performance relative to the benchmark when the benchmark is up. This is useful in practice for portfolio performance analysis and risk management."
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "id": "7589769e",
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "from openbb_terminal.sdk import openbb"
27 | ]
28 | },
29 | {
30 | "cell_type": "markdown",
31 | "id": "d7af040b",
32 | "metadata": {},
33 | "source": [
34 | "Fetch historical stock prices for AAPL, WMT, and SPY starting from 2020-01-01"
35 | ]
36 | },
37 | {
38 | "cell_type": "code",
39 | "execution_count": null,
40 | "id": "01ac1e2a",
41 | "metadata": {},
42 | "outputs": [],
43 | "source": [
44 | "prices = openbb.economy.index(\n",
45 | " [\"AAPL\", \"WMT\", \"SPY\"], \n",
46 | " start_date=\"2020-01-01\"\n",
47 | ")"
48 | ]
49 | },
50 | {
51 | "cell_type": "markdown",
52 | "id": "6cf5e1ea",
53 | "metadata": {},
54 | "source": [
55 | "Calculate daily returns by taking the percentage change and dropping any NaN values"
56 | ]
57 | },
58 | {
59 | "cell_type": "code",
60 | "execution_count": null,
61 | "id": "fdd82c4a",
62 | "metadata": {},
63 | "outputs": [],
64 | "source": [
65 | "returns = prices.pct_change().dropna()"
66 | ]
67 | },
68 | {
69 | "cell_type": "markdown",
70 | "id": "859f62ce",
71 | "metadata": {},
72 | "source": [
73 | "Construct a portfolio's returns by summing the returns of AAPL and WMT, and selecting SPY and portfolio columns"
74 | ]
75 | },
76 | {
77 | "cell_type": "code",
78 | "execution_count": null,
79 | "id": "1138bf9a",
80 | "metadata": {
81 | "lines_to_next_cell": 1
82 | },
83 | "outputs": [],
84 | "source": [
85 | "data = returns.assign(\n",
86 | " port=returns[[\"AAPL\", \"WMT\"]].sum(axis=1)\n",
87 | ")[[\"SPY\", \"port\"]]"
88 | ]
89 | },
90 | {
91 | "cell_type": "markdown",
92 | "id": "a8b57c5b",
93 | "metadata": {},
94 | "source": [
95 | "Compute annualized return for a series of returns"
96 | ]
97 | },
98 | {
99 | "cell_type": "code",
100 | "execution_count": null,
101 | "id": "a4341da4",
102 | "metadata": {
103 | "lines_to_next_cell": 1
104 | },
105 | "outputs": [],
106 | "source": [
107 | "def annual_return(returns):\n",
108 | " \"\"\"Compute annualized return.\n",
109 | " \n",
110 | " This function calculates the annualized return of a series of returns.\n",
111 | " \n",
112 | " Parameters\n",
113 | " ----------\n",
114 | " returns : pd.Series\n",
115 | " Series of daily returns.\n",
116 | " \n",
117 | " Returns\n",
118 | " -------\n",
119 | " float\n",
120 | " Annualized return.\n",
121 | " \"\"\"\n",
122 | " \n",
123 | " # Calculate the number of years by dividing total days by 252 (trading days in a year)\n",
124 | " num_years = len(returns) / 252\n",
125 | " \n",
126 | " # Calculate the annualized return using the geometric average\n",
127 | " return (returns + 1).prod() ** (1 / num_years) - 1"
128 | ]
129 | },
130 | {
131 | "cell_type": "markdown",
132 | "id": "567d5ef4",
133 | "metadata": {},
134 | "source": [
135 | "Compute upside capture ratio comparing portfolio returns to benchmark returns"
136 | ]
137 | },
138 | {
139 | "cell_type": "code",
140 | "execution_count": null,
141 | "id": "1dea9509",
142 | "metadata": {
143 | "lines_to_next_cell": 1
144 | },
145 | "outputs": [],
146 | "source": [
147 | "def upside_capture(port_returns, bench_returns):\n",
148 | " \"\"\"Compute upside capture ratio.\n",
149 | " \n",
150 | " This function calculates the upside capture ratio of portfolio returns \n",
151 | " compared to benchmark returns.\n",
152 | " \n",
153 | " Parameters\n",
154 | " ----------\n",
155 | " port_returns : pd.Series\n",
156 | " Series of portfolio returns.\n",
157 | " bench_returns : pd.Series\n",
158 | " Series of benchmark returns.\n",
159 | " \n",
160 | " Returns\n",
161 | " -------\n",
162 | " float\n",
163 | " Upside capture ratio.\n",
164 | " \"\"\"\n",
165 | " \n",
166 | " # Filter portfolio and benchmark returns where benchmark returns are positive\n",
167 | " mask = bench_returns > 0\n",
168 | " port_returns = port_returns[mask]\n",
169 | " bench_returns = bench_returns[mask]\n",
170 | " \n",
171 | " # Calculate the upside capture ratio by dividing annualized portfolio return by annualized benchmark return\n",
172 | " return (\n",
173 | " annual_return(port_returns) \n",
174 | " / annual_return(bench_returns)\n",
175 | " )"
176 | ]
177 | },
178 | {
179 | "cell_type": "markdown",
180 | "id": "600cd3ac",
181 | "metadata": {},
182 | "source": [
183 | "Calculate the upside capture ratio for the constructed portfolio compared to SPY"
184 | ]
185 | },
186 | {
187 | "cell_type": "code",
188 | "execution_count": null,
189 | "id": "47239c60",
190 | "metadata": {},
191 | "outputs": [],
192 | "source": [
193 | "upside_capture(data.port, data.SPY)"
194 | ]
195 | },
196 | {
197 | "cell_type": "markdown",
198 | "id": "4d6184fc",
199 | "metadata": {},
200 | "source": [
201 | "PyQuant News is where finance practitioners level up with Python for quant finance, algorithmic trading, and market data analysis. Looking to get started? Check out the fastest growing, top-selling course to get started with Python for quant finance. For educational purposes. Not investment advise. Use at your own risk."
202 | ]
203 | }
204 | ],
205 | "metadata": {
206 | "jupytext": {
207 | "cell_metadata_filter": "-all",
208 | "main_language": "python",
209 | "notebook_metadata_filter": "-all"
210 | }
211 | },
212 | "nbformat": 4,
213 | "nbformat_minor": 5
214 | }
215 |
--------------------------------------------------------------------------------
/58_BattingAverage.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "e61b6551",
6 | "metadata": {},
7 | "source": [
8 | ""
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "8cebd5cb",
14 | "metadata": {},
15 | "source": [
16 | "This code retrieves stock prices, calculates returns, and evaluates portfolio performance against a benchmark. It fetches historical prices for specified stocks and computes their percentage changes to derive returns. The code then calculates portfolio returns based on equal weighting and compares them with a benchmark index. Finally, it computes the batting average to assess the portfolio's performance in up and down markets. This is useful for portfolio managers and financial analysts to evaluate investment strategies."
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "id": "f484f7f8",
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "import numpy as np\n",
27 | "import pandas as pd\n",
28 | "from openbb_terminal.sdk import openbb"
29 | ]
30 | },
31 | {
32 | "cell_type": "markdown",
33 | "id": "e3fe4de2",
34 | "metadata": {},
35 | "source": [
36 | "Fetch historical prices for specified stocks using the OpenBB SDK"
37 | ]
38 | },
39 | {
40 | "cell_type": "code",
41 | "execution_count": null,
42 | "id": "e6394bcf",
43 | "metadata": {},
44 | "outputs": [],
45 | "source": [
46 | "prices = openbb.economy.index([\n",
47 | " \"META\", \n",
48 | " \"AAPL\", \n",
49 | " \"AMZN\", \n",
50 | " \"NFLX\", \n",
51 | " \"GOOG\", \n",
52 | " \"QQQ\"\n",
53 | "])"
54 | ]
55 | },
56 | {
57 | "cell_type": "markdown",
58 | "id": "9006ddd4",
59 | "metadata": {},
60 | "source": [
61 | "Calculate percentage returns from the historical prices and remove any rows with NaN values"
62 | ]
63 | },
64 | {
65 | "cell_type": "code",
66 | "execution_count": null,
67 | "id": "97f84964",
68 | "metadata": {},
69 | "outputs": [],
70 | "source": [
71 | "returns = prices.pct_change().dropna()"
72 | ]
73 | },
74 | {
75 | "cell_type": "markdown",
76 | "id": "4aa143ad",
77 | "metadata": {},
78 | "source": [
79 | "Extract the benchmark returns (QQQ) from the returns DataFrame"
80 | ]
81 | },
82 | {
83 | "cell_type": "code",
84 | "execution_count": null,
85 | "id": "e639344f",
86 | "metadata": {},
87 | "outputs": [],
88 | "source": [
89 | "bench_returns = returns.pop(\"QQQ\")"
90 | ]
91 | },
92 | {
93 | "cell_type": "markdown",
94 | "id": "664669a3",
95 | "metadata": {},
96 | "source": [
97 | "Calculate portfolio returns by equally weighting the individual stock returns"
98 | ]
99 | },
100 | {
101 | "cell_type": "code",
102 | "execution_count": null,
103 | "id": "1f427ac8",
104 | "metadata": {
105 | "lines_to_next_cell": 1
106 | },
107 | "outputs": [],
108 | "source": [
109 | "port_returns = (returns * 0.2).sum(axis=1)"
110 | ]
111 | },
112 | {
113 | "cell_type": "markdown",
114 | "id": "24341092",
115 | "metadata": {},
116 | "source": [
117 | "Define the function to calculate the batting average of portfolio returns against the benchmark"
118 | ]
119 | },
120 | {
121 | "cell_type": "code",
122 | "execution_count": null,
123 | "id": "d15cb24f",
124 | "metadata": {
125 | "lines_to_next_cell": 1
126 | },
127 | "outputs": [],
128 | "source": [
129 | "def batting_average(port_returns, bench_returns):\n",
130 | " \"\"\"Calculate batting average of portfolio returns.\n",
131 | " \n",
132 | " This function computes the batting average of portfolio \n",
133 | " returns compared to a benchmark, indicating performance \n",
134 | " in up and down markets.\n",
135 | " \n",
136 | " Parameters\n",
137 | " ----------\n",
138 | " port_returns : pd.Series\n",
139 | " Returns of the portfolio.\n",
140 | " bench_returns : pd.Series\n",
141 | " Returns of the benchmark index.\n",
142 | " \n",
143 | " Returns\n",
144 | " -------\n",
145 | " pd.Series\n",
146 | " Batting average, up market, and down market metrics.\n",
147 | " \"\"\"\n",
148 | " \n",
149 | " # Initialize a dictionary to store the results\n",
150 | " results = dict(\n",
151 | " {\n",
152 | " \"batting average\": np.nan,\n",
153 | " \"up market\": np.nan,\n",
154 | " \"down market\": np.nan,\n",
155 | " }\n",
156 | " )\n",
157 | " \n",
158 | " # Calculate active returns by subtracting benchmark returns from portfolio returns\n",
159 | " active_returns = port_returns - bench_returns\n",
160 | "\n",
161 | " # Determine boolean arrays for batting average, up market, and down market conditions\n",
162 | " ba = active_returns > 0\n",
163 | " up = active_returns[bench_returns >= 0.0] > 0\n",
164 | " down = active_returns[bench_returns < 0.0] > 0\n",
165 | "\n",
166 | " # Compute the mean values for the batting average, up market, and down market\n",
167 | " if len(ba) > 0:\n",
168 | " results[\"batting average\"] = ba.mean()\n",
169 | " if len(up) > 0:\n",
170 | " results[\"up market\"] = up.mean()\n",
171 | " if len(down) > 0:\n",
172 | " results[\"down market\"] = down.mean()\n",
173 | "\n",
174 | " # Return a Series with the computed results\n",
175 | " return pd.Series(results, index=results.keys())"
176 | ]
177 | },
178 | {
179 | "cell_type": "markdown",
180 | "id": "046b9295",
181 | "metadata": {},
182 | "source": [
183 | "Calculate and display the batting average metrics for the portfolio"
184 | ]
185 | },
186 | {
187 | "cell_type": "code",
188 | "execution_count": null,
189 | "id": "82e94611",
190 | "metadata": {},
191 | "outputs": [],
192 | "source": [
193 | "batting_average(port_returns, bench_returns)"
194 | ]
195 | },
196 | {
197 | "cell_type": "markdown",
198 | "id": "e210bcf1",
199 | "metadata": {},
200 | "source": [
201 | "PyQuant News is where finance practitioners level up with Python for quant finance, algorithmic trading, and market data analysis. Looking to get started? Check out the fastest growing, top-selling course to get started with Python for quant finance. For educational purposes. Not investment advise. Use at your own risk."
202 | ]
203 | }
204 | ],
205 | "metadata": {
206 | "jupytext": {
207 | "cell_metadata_filter": "-all",
208 | "main_language": "python",
209 | "notebook_metadata_filter": "-all"
210 | }
211 | },
212 | "nbformat": 4,
213 | "nbformat_minor": 5
214 | }
215 |
--------------------------------------------------------------------------------
/64_LangChainSentimentAnalysis.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "6aa29ed3",
6 | "metadata": {},
7 | "source": [
8 | ""
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "dbe3c872",
14 | "metadata": {},
15 | "source": [
16 | "This code performs sentiment analysis on news articles related to Microsoft (MSFT) using a predefined language model. It imports necessary libraries, loads environment variables, and sets up a language chain model with a prompt template. The code then fetches news articles, applies the sentiment analysis model to each news description, and appends the sentiment results. Finally, it displays the news descriptions along with their corresponding sentiments."
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "id": "a7beec45",
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "from openbb_terminal.sdk import openbb"
27 | ]
28 | },
29 | {
30 | "cell_type": "code",
31 | "execution_count": null,
32 | "id": "dcdaade8",
33 | "metadata": {},
34 | "outputs": [],
35 | "source": [
36 | "from langchain.chains import LLMChain\n",
37 | "from langchain.prompts import PromptTemplate\n",
38 | "from langchain.chat_models import ChatOpenAI"
39 | ]
40 | },
41 | {
42 | "cell_type": "code",
43 | "execution_count": null,
44 | "id": "86d35212",
45 | "metadata": {},
46 | "outputs": [],
47 | "source": [
48 | "from dotenv import load_dotenv"
49 | ]
50 | },
51 | {
52 | "cell_type": "code",
53 | "execution_count": null,
54 | "id": "cb878e28",
55 | "metadata": {},
56 | "outputs": [],
57 | "source": [
58 | "load_dotenv()"
59 | ]
60 | },
61 | {
62 | "cell_type": "code",
63 | "execution_count": null,
64 | "id": "c6d9e010",
65 | "metadata": {},
66 | "outputs": [],
67 | "source": [
68 | "import pandas as pd"
69 | ]
70 | },
71 | {
72 | "cell_type": "code",
73 | "execution_count": null,
74 | "id": "cee1e48e",
75 | "metadata": {},
76 | "outputs": [],
77 | "source": [
78 | "pd.set_option(\"max_colwidth\", None)"
79 | ]
80 | },
81 | {
82 | "cell_type": "markdown",
83 | "id": "ecdeff99",
84 | "metadata": {},
85 | "source": [
86 | "Initialize the language model with GPT-4 and set temperature for response variability"
87 | ]
88 | },
89 | {
90 | "cell_type": "code",
91 | "execution_count": null,
92 | "id": "cc0e296a",
93 | "metadata": {},
94 | "outputs": [],
95 | "source": [
96 | "llm = ChatOpenAI(model=\"gpt-4\", temperature=0)"
97 | ]
98 | },
99 | {
100 | "cell_type": "markdown",
101 | "id": "929eb3e1",
102 | "metadata": {},
103 | "source": [
104 | "Define the sentiment analysis prompt template"
105 | ]
106 | },
107 | {
108 | "cell_type": "code",
109 | "execution_count": null,
110 | "id": "34670050",
111 | "metadata": {},
112 | "outputs": [],
113 | "source": [
114 | "prompt = \"\"\"\n",
115 | "Is the predominant sentiment in the following statement positive, negative, or neutral?\n",
116 | "---------\n",
117 | "Statement: {statement}\n",
118 | "---------\n",
119 | "Respond with one word in lowercase: positive, negative, or neutral.\n",
120 | "Sentiment:\n",
121 | "\"\"\""
122 | ]
123 | },
124 | {
125 | "cell_type": "markdown",
126 | "id": "a1067c85",
127 | "metadata": {},
128 | "source": [
129 | "Create a sentiment analysis chain using the language model and prompt template"
130 | ]
131 | },
132 | {
133 | "cell_type": "code",
134 | "execution_count": null,
135 | "id": "540facbd",
136 | "metadata": {},
137 | "outputs": [],
138 | "source": [
139 | "chain = LLMChain.from_string(\n",
140 | " llm=llm,\n",
141 | " template=prompt\n",
142 | ")"
143 | ]
144 | },
145 | {
146 | "cell_type": "markdown",
147 | "id": "905ccc67",
148 | "metadata": {},
149 | "source": [
150 | "Fetch news articles related to Microsoft (MSFT) using the OpenBB SDK"
151 | ]
152 | },
153 | {
154 | "cell_type": "code",
155 | "execution_count": null,
156 | "id": "a85fdbbe",
157 | "metadata": {},
158 | "outputs": [],
159 | "source": [
160 | "msft = openbb.news(term=\"msft\")"
161 | ]
162 | },
163 | {
164 | "cell_type": "markdown",
165 | "id": "0e72a0f5",
166 | "metadata": {},
167 | "source": [
168 | "Apply the sentiment analysis chain to each news description and add the sentiment results to a new column"
169 | ]
170 | },
171 | {
172 | "cell_type": "code",
173 | "execution_count": null,
174 | "id": "d7c08925",
175 | "metadata": {},
176 | "outputs": [],
177 | "source": [
178 | "msft[\"Sentiment\"] = msft.Description.apply(chain.run)"
179 | ]
180 | },
181 | {
182 | "cell_type": "markdown",
183 | "id": "ae31394d",
184 | "metadata": {},
185 | "source": [
186 | "Display the news descriptions along with their corresponding sentiment results"
187 | ]
188 | },
189 | {
190 | "cell_type": "code",
191 | "execution_count": null,
192 | "id": "259bb33e",
193 | "metadata": {},
194 | "outputs": [],
195 | "source": [
196 | "msft[[\"Description\", \"Sentiment\"]]"
197 | ]
198 | },
199 | {
200 | "cell_type": "markdown",
201 | "id": "29c68af1",
202 | "metadata": {},
203 | "source": [
204 | "PyQuant News is where finance practitioners level up with Python for quant finance, algorithmic trading, and market data analysis. Looking to get started? Check out the fastest growing, top-selling course to get started with Python for quant finance. For educational purposes. Not investment advise. Use at your own risk."
205 | ]
206 | }
207 | ],
208 | "metadata": {
209 | "jupytext": {
210 | "cell_metadata_filter": "-all",
211 | "main_language": "python",
212 | "notebook_metadata_filter": "-all"
213 | }
214 | },
215 | "nbformat": 4,
216 | "nbformat_minor": 5
217 | }
218 |
--------------------------------------------------------------------------------
/65_KMedoids.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "344122e6",
6 | "metadata": {},
7 | "source": [
8 | ""
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "fedd6a2c",
14 | "metadata": {},
15 | "source": [
16 | "This code performs clustering on Nasdaq-100 stock returns and volatilities to identify distinct groups using the K-Medoids algorithm. It fetches historical stock data, calculates annualized returns and volatilities, and visualizes the clustering results. The Elbow method is used to determine the optimal number of clusters. This approach is useful for financial analysis, portfolio management, and identifying patterns in stock performance."
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "id": "a3b6f094",
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "import numpy as np\n",
27 | "import pandas as pd\n",
28 | "from sklearn_extra.cluster import KMedoids\n",
29 | "import matplotlib.pyplot as plt\n",
30 | "from openbb_terminal.sdk import openbb"
31 | ]
32 | },
33 | {
34 | "cell_type": "markdown",
35 | "id": "67dda957",
36 | "metadata": {},
37 | "source": [
38 | "Configure default plot style and parameters for visualizations"
39 | ]
40 | },
41 | {
42 | "cell_type": "code",
43 | "execution_count": null,
44 | "id": "a254a022",
45 | "metadata": {},
46 | "outputs": [],
47 | "source": [
48 | "plt.style.use(\"default\")\n",
49 | "plt.rcParams[\"figure.figsize\"] = [5.5, 4.0]\n",
50 | "plt.rcParams[\"figure.dpi\"] = 140\n",
51 | "plt.rcParams[\"lines.linewidth\"] = 0.75\n",
52 | "plt.rcParams[\"font.size\"] = 8"
53 | ]
54 | },
55 | {
56 | "cell_type": "markdown",
57 | "id": "144440ca",
58 | "metadata": {},
59 | "source": [
60 | "Fetch Nasdaq-100 tickers from Wikipedia and retrieve historical stock data from the OpenBB Terminal SDK"
61 | ]
62 | },
63 | {
64 | "cell_type": "code",
65 | "execution_count": null,
66 | "id": "9bfb4664",
67 | "metadata": {},
68 | "outputs": [],
69 | "source": [
70 | "nq = pd.read_html(\"https://en.wikipedia.org/wiki/Nasdaq-100\")[4]\n",
71 | "symbols = nq.Ticker.tolist()\n",
72 | "data = openbb.stocks.ca.hist(\n",
73 | " symbols, \n",
74 | " start_date=\"2020-01-01\", \n",
75 | " end_date=\"2022-12-31\"\n",
76 | ")"
77 | ]
78 | },
79 | {
80 | "cell_type": "markdown",
81 | "id": "835e4118",
82 | "metadata": {},
83 | "source": [
84 | "Calculate annualized returns and volatilities from the historical stock data"
85 | ]
86 | },
87 | {
88 | "cell_type": "code",
89 | "execution_count": null,
90 | "id": "293200a9",
91 | "metadata": {},
92 | "outputs": [],
93 | "source": [
94 | "moments = (\n",
95 | " data\n",
96 | " .pct_change()\n",
97 | " .describe()\n",
98 | " .T[[\"mean\", \"std\"]]\n",
99 | " .rename(columns={\"mean\": \"returns\", \"std\": \"vol\"})\n",
100 | ") * [252, np.sqrt(252)]"
101 | ]
102 | },
103 | {
104 | "cell_type": "markdown",
105 | "id": "896c573b",
106 | "metadata": {},
107 | "source": [
108 | "Calculate the sum of squared errors (SSE) for different numbers of clusters to determine the optimal number using the Elbow method"
109 | ]
110 | },
111 | {
112 | "cell_type": "code",
113 | "execution_count": null,
114 | "id": "a4728d89",
115 | "metadata": {},
116 | "outputs": [],
117 | "source": [
118 | "sse = []\n",
119 | "for k in range(2, 15):\n",
120 | " km = KMedoids(n_clusters=k).fit(moments)\n",
121 | " sse.append(km.inertia_)"
122 | ]
123 | },
124 | {
125 | "cell_type": "code",
126 | "execution_count": null,
127 | "id": "fe9f1536",
128 | "metadata": {},
129 | "outputs": [],
130 | "source": [
131 | "plt.plot(range(2, 15), sse)\n",
132 | "plt.title(\"Elbow Curve\")"
133 | ]
134 | },
135 | {
136 | "cell_type": "markdown",
137 | "id": "69b91a09",
138 | "metadata": {},
139 | "source": [
140 | "Fit the K-Medoids algorithm with the optimal number of clusters (in this case, 5) and obtain cluster labels"
141 | ]
142 | },
143 | {
144 | "cell_type": "code",
145 | "execution_count": null,
146 | "id": "918cbfa6",
147 | "metadata": {},
148 | "outputs": [],
149 | "source": [
150 | "km = KMedoids(n_clusters=5).fit(moments)\n",
151 | "labels = km.labels_\n",
152 | "unique_labels = set(labels)\n",
153 | "colors = [\n",
154 | " plt.cm.Spectral(each) for each in np.linspace(0, 1, len(unique_labels))\n",
155 | "]"
156 | ]
157 | },
158 | {
159 | "cell_type": "code",
160 | "execution_count": null,
161 | "id": "b8687502",
162 | "metadata": {},
163 | "outputs": [],
164 | "source": [
165 | "labels"
166 | ]
167 | },
168 | {
169 | "cell_type": "markdown",
170 | "id": "1cffebb3",
171 | "metadata": {},
172 | "source": [
173 | "Visualize the clustering results by plotting annualized returns and volatilities, color-coded by cluster"
174 | ]
175 | },
176 | {
177 | "cell_type": "code",
178 | "execution_count": null,
179 | "id": "7f465b31",
180 | "metadata": {},
181 | "outputs": [],
182 | "source": [
183 | "for k, col in zip(unique_labels, colors):\n",
184 | " class_member_mask = labels == k\n",
185 | "\n",
186 | " xy = moments[class_member_mask]\n",
187 | " plt.plot(\n",
188 | " xy.iloc[:, 0],\n",
189 | " xy.iloc[:, 1],\n",
190 | " \"o\",\n",
191 | " markerfacecolor=tuple(col),\n",
192 | " markeredgecolor=\"k\",\n",
193 | " )"
194 | ]
195 | },
196 | {
197 | "cell_type": "code",
198 | "execution_count": null,
199 | "id": "97140a52",
200 | "metadata": {},
201 | "outputs": [],
202 | "source": [
203 | "plt.plot(\n",
204 | " km.cluster_centers_[:, 0],\n",
205 | " km.cluster_centers_[:, 1],\n",
206 | " \"o\",\n",
207 | " markerfacecolor=\"cyan\",\n",
208 | " markeredgecolor=\"k\",\n",
209 | ")\n",
210 | "plt.xlabel(\"Return\")\n",
211 | "plt.ylabel(\"Ann. Vol.\")"
212 | ]
213 | },
214 | {
215 | "cell_type": "markdown",
216 | "id": "ec75d2f7",
217 | "metadata": {},
218 | "source": [
219 | "PyQuant News is where finance practitioners level up with Python for quant finance, algorithmic trading, and market data analysis. Looking to get started? Check out the fastest growing, top-selling course to get started with Python for quant finance. For educational purposes. Not investment advise. Use at your own risk."
220 | ]
221 | }
222 | ],
223 | "metadata": {
224 | "jupytext": {
225 | "cell_metadata_filter": "-all",
226 | "main_language": "python",
227 | "notebook_metadata_filter": "-all"
228 | }
229 | },
230 | "nbformat": 4,
231 | "nbformat_minor": 5
232 | }
233 |
--------------------------------------------------------------------------------
/6_RollingZScore.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "9bcf2ee3",
6 | "metadata": {},
7 | "source": [
8 | ""
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "a5b4c5df",
14 | "metadata": {},
15 | "source": [
16 | "This code retrieves historical stock price data for Netflix from Yahoo Finance and performs statistical analysis. It calculates the rolling z-score of the closing prices over a 30-day window, allowing for the detection of significant deviations from the mean. The z-score is then plotted and its distribution visualized using a histogram. Additionally, it computes the minimum percentage change in closing prices over a 30-day rolling window and visualizes it. This is useful for identifying extreme price movements and understanding the stock's volatility."
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "id": "50d5e330",
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "import yfinance as yf"
27 | ]
28 | },
29 | {
30 | "cell_type": "markdown",
31 | "id": "393360c8",
32 | "metadata": {},
33 | "source": [
34 | "Download historical stock price data for Netflix from Yahoo Finance"
35 | ]
36 | },
37 | {
38 | "cell_type": "code",
39 | "execution_count": null,
40 | "id": "b89ccf32",
41 | "metadata": {
42 | "lines_to_next_cell": 1
43 | },
44 | "outputs": [],
45 | "source": [
46 | "data = yf.download(\"NFLX\", start=\"2020-01-01\", end=\"2022-06-30\")"
47 | ]
48 | },
49 | {
50 | "cell_type": "markdown",
51 | "id": "87c13794",
52 | "metadata": {},
53 | "source": [
54 | "Define a function to calculate the z-score for a given chunk of data"
55 | ]
56 | },
57 | {
58 | "cell_type": "code",
59 | "execution_count": null,
60 | "id": "2b6152e4",
61 | "metadata": {
62 | "lines_to_next_cell": 1
63 | },
64 | "outputs": [],
65 | "source": [
66 | "def z_score(chunk):\n",
67 | " \"\"\"Calculate z-score for a given chunk.\n",
68 | " \n",
69 | " Parameters\n",
70 | " ----------\n",
71 | " chunk : pd.Series\n",
72 | " A series of stock prices or values.\n",
73 | " \n",
74 | " Returns\n",
75 | " -------\n",
76 | " float\n",
77 | " The z-score of the last value in the chunk.\n",
78 | " \n",
79 | " Notes\n",
80 | " -----\n",
81 | " This method computes the z-score, which is the number \n",
82 | " of standard deviations a value is from the mean.\n",
83 | " \"\"\"\n",
84 | " return (chunk[-1] - chunk.mean()) / chunk.std()"
85 | ]
86 | },
87 | {
88 | "cell_type": "markdown",
89 | "id": "d92d5b53",
90 | "metadata": {},
91 | "source": [
92 | "Calculate the rolling z-score of the closing prices over a 30-day window"
93 | ]
94 | },
95 | {
96 | "cell_type": "code",
97 | "execution_count": null,
98 | "id": "9c3b8292",
99 | "metadata": {},
100 | "outputs": [],
101 | "source": [
102 | "rolled = data.Close.rolling(window=30).apply(z_score)"
103 | ]
104 | },
105 | {
106 | "cell_type": "markdown",
107 | "id": "f0c0ce32",
108 | "metadata": {},
109 | "source": [
110 | "Plot the rolling z-score to visualize deviations from the mean"
111 | ]
112 | },
113 | {
114 | "cell_type": "code",
115 | "execution_count": null,
116 | "id": "5c0f3295",
117 | "metadata": {},
118 | "outputs": [],
119 | "source": [
120 | "rolled.plot()"
121 | ]
122 | },
123 | {
124 | "cell_type": "markdown",
125 | "id": "5fc5a361",
126 | "metadata": {},
127 | "source": [
128 | "Plot a histogram of the rolling z-score to understand its distribution"
129 | ]
130 | },
131 | {
132 | "cell_type": "code",
133 | "execution_count": null,
134 | "id": "979cf4f9",
135 | "metadata": {},
136 | "outputs": [],
137 | "source": [
138 | "rolled.hist(bins=20)"
139 | ]
140 | },
141 | {
142 | "cell_type": "markdown",
143 | "id": "1acedb50",
144 | "metadata": {},
145 | "source": [
146 | "Find the minimum z-score value to identify significant deviations"
147 | ]
148 | },
149 | {
150 | "cell_type": "code",
151 | "execution_count": null,
152 | "id": "127b0f43",
153 | "metadata": {},
154 | "outputs": [],
155 | "source": [
156 | "rolled.min()"
157 | ]
158 | },
159 | {
160 | "cell_type": "markdown",
161 | "id": "f13fc8f4",
162 | "metadata": {},
163 | "source": [
164 | "Calculate the percentage change from the closing price on 20 April 2022"
165 | ]
166 | },
167 | {
168 | "cell_type": "code",
169 | "execution_count": null,
170 | "id": "502e5d91",
171 | "metadata": {},
172 | "outputs": [],
173 | "source": [
174 | "(226.19 - 348.61) / 348.61"
175 | ]
176 | },
177 | {
178 | "cell_type": "markdown",
179 | "id": "da9e48db",
180 | "metadata": {},
181 | "source": [
182 | "Calculate the minimum percentage change in closing prices over a 30-day rolling window"
183 | ]
184 | },
185 | {
186 | "cell_type": "code",
187 | "execution_count": null,
188 | "id": "5c98be53",
189 | "metadata": {},
190 | "outputs": [],
191 | "source": [
192 | "min_pct_change = (\n",
193 | " data\n",
194 | " .Close\n",
195 | " .pct_change()\n",
196 | " .rolling(window=30)\n",
197 | " .min()\n",
198 | ")"
199 | ]
200 | },
201 | {
202 | "cell_type": "markdown",
203 | "id": "47be23a9",
204 | "metadata": {},
205 | "source": [
206 | "Plot the minimum percentage change to visualize extreme price movements"
207 | ]
208 | },
209 | {
210 | "cell_type": "code",
211 | "execution_count": null,
212 | "id": "e1fabbf2",
213 | "metadata": {},
214 | "outputs": [],
215 | "source": [
216 | "min_pct_change.plot()"
217 | ]
218 | },
219 | {
220 | "cell_type": "markdown",
221 | "id": "3c0c5951",
222 | "metadata": {},
223 | "source": [
224 | "Plot a histogram of the minimum percentage change to understand its distribution"
225 | ]
226 | },
227 | {
228 | "cell_type": "code",
229 | "execution_count": null,
230 | "id": "a1697d6b",
231 | "metadata": {},
232 | "outputs": [],
233 | "source": [
234 | "min_pct_change.hist(bins=20)"
235 | ]
236 | },
237 | {
238 | "cell_type": "markdown",
239 | "id": "d97d6472",
240 | "metadata": {},
241 | "source": [
242 | "PyQuant News is where finance practitioners level up with Python for quant finance, algorithmic trading, and market data analysis. Looking to get started? Check out the fastest growing, top-selling course to get started with Python for quant finance. For educational purposes. Not investment advise. Use at your own risk."
243 | ]
244 | }
245 | ],
246 | "metadata": {
247 | "jupytext": {
248 | "cell_metadata_filter": "-all",
249 | "main_language": "python",
250 | "notebook_metadata_filter": "-all"
251 | },
252 | "kernelspec": {
253 | "display_name": "Python 3 (ipykernel)",
254 | "language": "python",
255 | "name": "python3"
256 | },
257 | "language_info": {
258 | "codemirror_mode": {
259 | "name": "ipython",
260 | "version": 3
261 | },
262 | "file_extension": ".py",
263 | "mimetype": "text/x-python",
264 | "name": "python",
265 | "nbconvert_exporter": "python",
266 | "pygments_lexer": "ipython3",
267 | "version": "3.10.13"
268 | }
269 | },
270 | "nbformat": 4,
271 | "nbformat_minor": 5
272 | }
273 |
--------------------------------------------------------------------------------
/73_OptionsBacktesting.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "2a7b4695",
6 | "metadata": {},
7 | "source": [
8 | ""
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "4dde6243",
14 | "metadata": {},
15 | "source": [
16 | "This code processes historical stock data for financial analysis and option strategy backtesting. It reads multiple CSV files containing end-of-day data, concatenates them, and saves the result in a single file. The script then loads this combined data and uses the Optopsy library to analyze option chains. It evaluates various option strategies such as short calls, long straddles, and short strangles, providing a practical tool for financial professionals to assess historical performance and strategy efficacy."
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "id": "5d5d8246",
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "import os\n",
27 | "import glob\n",
28 | "import pandas as pd\n",
29 | "import optopsy as op"
30 | ]
31 | },
32 | {
33 | "cell_type": "markdown",
34 | "id": "9337e673",
35 | "metadata": {},
36 | "source": [
37 | "Collect all CSV files in the \"rut-eod\" directory"
38 | ]
39 | },
40 | {
41 | "cell_type": "code",
42 | "execution_count": null,
43 | "id": "25707d5a",
44 | "metadata": {},
45 | "outputs": [],
46 | "source": [
47 | "files = glob.glob(os.path.join(\"rut-eod\", \"*.csv\"))"
48 | ]
49 | },
50 | {
51 | "cell_type": "markdown",
52 | "id": "da83ac55",
53 | "metadata": {},
54 | "source": [
55 | "Initialize an empty list for storing dataframes"
56 | ]
57 | },
58 | {
59 | "cell_type": "code",
60 | "execution_count": null,
61 | "id": "20abd76b",
62 | "metadata": {},
63 | "outputs": [],
64 | "source": [
65 | "dfs = []\n",
66 | "for fl in files:\n",
67 | " # Read each CSV file and append its dataframe to the list\n",
68 | " df = pd.read_csv(fl)\n",
69 | " dfs.append(df)"
70 | ]
71 | },
72 | {
73 | "cell_type": "markdown",
74 | "id": "106998bc",
75 | "metadata": {},
76 | "source": [
77 | "Concatenate all dataframes and save the combined data to a new CSV file"
78 | ]
79 | },
80 | {
81 | "cell_type": "code",
82 | "execution_count": null,
83 | "id": "47998816",
84 | "metadata": {},
85 | "outputs": [],
86 | "source": [
87 | "pd.concat(dfs).to_csv(\"rut_historic.csv\", index=False)"
88 | ]
89 | },
90 | {
91 | "cell_type": "markdown",
92 | "id": "3d36b3a7",
93 | "metadata": {},
94 | "source": [
95 | "Load the combined historical data from the new CSV file"
96 | ]
97 | },
98 | {
99 | "cell_type": "code",
100 | "execution_count": null,
101 | "id": "e504c82f",
102 | "metadata": {},
103 | "outputs": [],
104 | "source": [
105 | "rut = pd.read_csv(\"rut_historic.csv\")"
106 | ]
107 | },
108 | {
109 | "cell_type": "markdown",
110 | "id": "0d6640b5",
111 | "metadata": {},
112 | "source": [
113 | "Print the most recent date in the dataset"
114 | ]
115 | },
116 | {
117 | "cell_type": "code",
118 | "execution_count": null,
119 | "id": "67b0527c",
120 | "metadata": {},
121 | "outputs": [],
122 | "source": [
123 | "rut.date.max()"
124 | ]
125 | },
126 | {
127 | "cell_type": "markdown",
128 | "id": "4c9823ce",
129 | "metadata": {},
130 | "source": [
131 | "Load the option chains from the combined historical data using specific column indices"
132 | ]
133 | },
134 | {
135 | "cell_type": "code",
136 | "execution_count": null,
137 | "id": "8240913f",
138 | "metadata": {},
139 | "outputs": [],
140 | "source": [
141 | "rut_chains = op.csv_data(\n",
142 | " \"rut_historic.csv\",\n",
143 | " underlying_symbol=1,\n",
144 | " underlying_price=4,\n",
145 | " option_type=8,\n",
146 | " expiration=6,\n",
147 | " quote_date=0,\n",
148 | " strike=7,\n",
149 | " bid=14,\n",
150 | " ask=15\n",
151 | ")"
152 | ]
153 | },
154 | {
155 | "cell_type": "markdown",
156 | "id": "e21cd76d",
157 | "metadata": {},
158 | "source": [
159 | "Display the first few rows of the option chains dataframe"
160 | ]
161 | },
162 | {
163 | "cell_type": "code",
164 | "execution_count": null,
165 | "id": "5850a16f",
166 | "metadata": {},
167 | "outputs": [],
168 | "source": [
169 | "rut_chains.head()"
170 | ]
171 | },
172 | {
173 | "cell_type": "markdown",
174 | "id": "219a8e53",
175 | "metadata": {},
176 | "source": [
177 | "Evaluate and round the results of the short calls strategy"
178 | ]
179 | },
180 | {
181 | "cell_type": "code",
182 | "execution_count": null,
183 | "id": "6f7e6cd7",
184 | "metadata": {},
185 | "outputs": [],
186 | "source": [
187 | "op.short_calls(rut_chains).round(2)"
188 | ]
189 | },
190 | {
191 | "cell_type": "markdown",
192 | "id": "ad327b31",
193 | "metadata": {},
194 | "source": [
195 | "Evaluate and round the results of the long straddles strategy"
196 | ]
197 | },
198 | {
199 | "cell_type": "code",
200 | "execution_count": null,
201 | "id": "57f48082",
202 | "metadata": {},
203 | "outputs": [],
204 | "source": [
205 | "op.long_straddles(rut_chains).round(2)"
206 | ]
207 | },
208 | {
209 | "cell_type": "markdown",
210 | "id": "1d4573d2",
211 | "metadata": {},
212 | "source": [
213 | "Evaluate and round the results of the short strangles strategy with specified parameters"
214 | ]
215 | },
216 | {
217 | "cell_type": "code",
218 | "execution_count": null,
219 | "id": "bb9d8953",
220 | "metadata": {},
221 | "outputs": [],
222 | "source": [
223 | "op.short_strangles(\n",
224 | " rut_chains, \n",
225 | " dte_interval=60, \n",
226 | " max_entry_dte=70, \n",
227 | " exit_dte=10,\n",
228 | " otm_pct_interval=0.01,\n",
229 | " max_otm_pct=0.10\n",
230 | ").round(2)"
231 | ]
232 | },
233 | {
234 | "cell_type": "markdown",
235 | "id": "5baa0691",
236 | "metadata": {},
237 | "source": [
238 | "PyQuant News is where finance practitioners level up with Python for quant finance, algorithmic trading, and market data analysis. Looking to get started? Check out the fastest growing, top-selling course to get started with Python for quant finance. For educational purposes. Not investment advise. Use at your own risk."
239 | ]
240 | }
241 | ],
242 | "metadata": {
243 | "jupytext": {
244 | "cell_metadata_filter": "-all",
245 | "main_language": "python",
246 | "notebook_metadata_filter": "-all"
247 | }
248 | },
249 | "nbformat": 4,
250 | "nbformat_minor": 5
251 | }
252 |
--------------------------------------------------------------------------------
/7_Drawdown.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "0b8df441",
6 | "metadata": {},
7 | "source": [
8 | ""
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "f07fab33",
14 | "metadata": {},
15 | "source": [
16 | "This code downloads historical stock data for SPY from Yahoo Finance, calculates daily returns, and analyzes drawdowns. It defines functions to compute drawdowns and maximum drawdowns, which are essential for risk management. The drawdown function calculates the percentage decline from a peak in the cumulative return series. The max_drawdown function identifies the largest drawdown over the period. These metrics are visualized to evaluate the stock's risk."
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "id": "ef1f1af7",
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "import yfinance as yf\n",
27 | "import numpy as np"
28 | ]
29 | },
30 | {
31 | "cell_type": "markdown",
32 | "id": "e5a113e5",
33 | "metadata": {},
34 | "source": [
35 | "Download historical stock data for SPY from Yahoo Finance"
36 | ]
37 | },
38 | {
39 | "cell_type": "code",
40 | "execution_count": null,
41 | "id": "a63d13bb",
42 | "metadata": {},
43 | "outputs": [],
44 | "source": [
45 | "data = yf.download(\"SPY\", start=\"2020-01-01\", end=\"2022-07-31\")"
46 | ]
47 | },
48 | {
49 | "cell_type": "markdown",
50 | "id": "8adb54ef",
51 | "metadata": {},
52 | "source": [
53 | "Calculate daily returns from the adjusted closing prices"
54 | ]
55 | },
56 | {
57 | "cell_type": "code",
58 | "execution_count": null,
59 | "id": "649d8b88",
60 | "metadata": {
61 | "lines_to_next_cell": 1
62 | },
63 | "outputs": [],
64 | "source": [
65 | "returns = data[\"Adj Close\"].pct_change()"
66 | ]
67 | },
68 | {
69 | "cell_type": "markdown",
70 | "id": "4adb0254",
71 | "metadata": {},
72 | "source": [
73 | "Define a function to determine the drawdown from daily returns"
74 | ]
75 | },
76 | {
77 | "cell_type": "code",
78 | "execution_count": null,
79 | "id": "0d2c6022",
80 | "metadata": {
81 | "lines_to_next_cell": 1
82 | },
83 | "outputs": [],
84 | "source": [
85 | "def drawdown(returns):\n",
86 | " \"\"\"Determines the drawdown\n",
87 | " \n",
88 | " Parameters\n",
89 | " ----------\n",
90 | " returns : pd.Series\n",
91 | " Daily returns of an asset, noncumulative\n",
92 | " \n",
93 | " Returns\n",
94 | " -------\n",
95 | " drawdown : pd.Series\n",
96 | " \n",
97 | " Notes\n",
98 | " -----\n",
99 | " This function calculates the percentage decline \n",
100 | " from the peak value of cumulative returns.\n",
101 | " \"\"\"\n",
102 | "\n",
103 | " # Replace the first NaN value with 0.0 for accurate calculations\n",
104 | "\n",
105 | " returns.fillna(0.0, inplace=True)\n",
106 | "\n",
107 | " # Create a series of cumulative returns over time\n",
108 | "\n",
109 | " cumulative = (returns + 1).cumprod()\n",
110 | "\n",
111 | " # Calculate the running maximum value of cumulative returns\n",
112 | "\n",
113 | " running_max = np.maximum.accumulate(cumulative)\n",
114 | "\n",
115 | " # Compute the drawdown as the percentage decline from the running maximum\n",
116 | "\n",
117 | " return (cumulative - running_max) / running_max"
118 | ]
119 | },
120 | {
121 | "cell_type": "markdown",
122 | "id": "1261aa32",
123 | "metadata": {},
124 | "source": [
125 | "Plot the drawdown over time as an area chart"
126 | ]
127 | },
128 | {
129 | "cell_type": "code",
130 | "execution_count": null,
131 | "id": "72cffa58",
132 | "metadata": {
133 | "lines_to_next_cell": 1
134 | },
135 | "outputs": [],
136 | "source": [
137 | "drawdown(returns).plot(kind=\"area\", color=\"salmon\", alpha=0.5)"
138 | ]
139 | },
140 | {
141 | "cell_type": "markdown",
142 | "id": "a4aa9724",
143 | "metadata": {},
144 | "source": [
145 | "Define a function to determine the maximum drawdown from daily returns"
146 | ]
147 | },
148 | {
149 | "cell_type": "code",
150 | "execution_count": null,
151 | "id": "136eda22",
152 | "metadata": {
153 | "lines_to_next_cell": 1
154 | },
155 | "outputs": [],
156 | "source": [
157 | "def max_drawdown(returns):\n",
158 | " \"\"\"Determines the maximum drawdown\n",
159 | " \n",
160 | " Parameters\n",
161 | " ----------\n",
162 | " returns : pd.Series\n",
163 | " Daily returns of an asset, noncumulative\n",
164 | " \n",
165 | " Returns\n",
166 | " -------\n",
167 | " max_drawdown : float\n",
168 | " \n",
169 | " Notes\n",
170 | " -----\n",
171 | " This function identifies the largest \n",
172 | " drawdown over the specified period.\n",
173 | " \"\"\"\n",
174 | "\n",
175 | " # Calculate the drawdown and return the minimum value as the max drawdown\n",
176 | "\n",
177 | " return np.min(drawdown(returns))"
178 | ]
179 | },
180 | {
181 | "cell_type": "markdown",
182 | "id": "8e5f427b",
183 | "metadata": {},
184 | "source": [
185 | "Calculate the rolling maximum drawdown over a 30-day window and plot it"
186 | ]
187 | },
188 | {
189 | "cell_type": "code",
190 | "execution_count": null,
191 | "id": "9a79e6e8",
192 | "metadata": {},
193 | "outputs": [],
194 | "source": [
195 | "returns.rolling(30).apply(max_drawdown).plot(kind=\"area\", color=\"salmon\", alpha=0.5)"
196 | ]
197 | },
198 | {
199 | "cell_type": "markdown",
200 | "id": "c46cc0e5",
201 | "metadata": {},
202 | "source": [
203 | "PyQuant News is where finance practitioners level up with Python for quant finance, algorithmic trading, and market data analysis. Looking to get started? Check out the fastest growing, top-selling course to get started with Python for quant finance. For educational purposes. Not investment advise. Use at your own risk."
204 | ]
205 | }
206 | ],
207 | "metadata": {
208 | "jupytext": {
209 | "cell_metadata_filter": "-all",
210 | "main_language": "python",
211 | "notebook_metadata_filter": "-all"
212 | }
213 | },
214 | "nbformat": 4,
215 | "nbformat_minor": 5
216 | }
217 |
--------------------------------------------------------------------------------
/82_HierarchicalRiskParity.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "e1e4398f",
6 | "metadata": {},
7 | "source": [
8 | ""
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "54e702d1",
14 | "metadata": {},
15 | "source": [
16 | "This code performs hierarchical risk parity (HRP) portfolio optimization using historical price data of selected assets. It fetches historical price data, calculates returns, and constructs a hierarchical tree using the Pearson correlation. The code then optimizes the portfolio using HRP methodology and visualizes the portfolio allocation and risk contributions. This is useful for constructing diversified portfolios that minimize risk through hierarchical clustering."
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "id": "81a9b353",
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "import pandas as pd\n",
27 | "import riskfolio as rp\n",
28 | "from openbb import obb"
29 | ]
30 | },
31 | {
32 | "cell_type": "markdown",
33 | "id": "987769bc",
34 | "metadata": {},
35 | "source": [
36 | "Define a list of asset symbols for which historical price data will be retrieved"
37 | ]
38 | },
39 | {
40 | "cell_type": "code",
41 | "execution_count": null,
42 | "id": "b6b98345",
43 | "metadata": {},
44 | "outputs": [],
45 | "source": [
46 | "assets = [\n",
47 | " \"XLE\", \"XLF\", \"XLU\", \"XLI\", \"GDX\", \n",
48 | " \"XLK\", \"XLV\", \"XLY\", \"XLP\", \"XLB\", \n",
49 | " \"XOP\", \"IYR\", \"XHB\", \"ITB\", \"VNQ\", \n",
50 | " \"GDXJ\", \"IYE\", \"OIH\", \"XME\", \"XRT\", \n",
51 | " \"SMH\", \"IBB\", \"KBE\", \"KRE\", \"XTL\", \n",
52 | "]"
53 | ]
54 | },
55 | {
56 | "cell_type": "markdown",
57 | "id": "08cacea7",
58 | "metadata": {},
59 | "source": [
60 | "Fetch historical price data for the specified assets and pivot the data into a DataFrame"
61 | ]
62 | },
63 | {
64 | "cell_type": "code",
65 | "execution_count": null,
66 | "id": "72e8db71",
67 | "metadata": {},
68 | "outputs": [],
69 | "source": [
70 | "data = (\n",
71 | " obb\n",
72 | " .equity\n",
73 | " .price\n",
74 | " .historical(assets, provider=\"yfinance\")\n",
75 | " .to_df()\n",
76 | " .pivot(columns=\"symbol\", values=\"close\")\n",
77 | ")"
78 | ]
79 | },
80 | {
81 | "cell_type": "markdown",
82 | "id": "d6b073b0",
83 | "metadata": {},
84 | "source": [
85 | "Calculate percentage returns from the historical price data and drop any missing values"
86 | ]
87 | },
88 | {
89 | "cell_type": "code",
90 | "execution_count": null,
91 | "id": "3564caeb",
92 | "metadata": {},
93 | "outputs": [],
94 | "source": [
95 | "returns = data.pct_change().dropna()"
96 | ]
97 | },
98 | {
99 | "cell_type": "markdown",
100 | "id": "f9df6bf1",
101 | "metadata": {},
102 | "source": [
103 | "Plot a dendrogram to visualize hierarchical clustering of asset returns using Pearson correlation"
104 | ]
105 | },
106 | {
107 | "cell_type": "code",
108 | "execution_count": null,
109 | "id": "4a6d85ac",
110 | "metadata": {},
111 | "outputs": [],
112 | "source": [
113 | "ax = rp.plot_dendrogram(\n",
114 | " returns=returns,\n",
115 | " codependence=\"pearson\",\n",
116 | " linkage=\"single\",\n",
117 | " k=None,\n",
118 | " max_k=10,\n",
119 | " leaf_order=True,\n",
120 | " ax=None,\n",
121 | ")"
122 | ]
123 | },
124 | {
125 | "cell_type": "markdown",
126 | "id": "aa1da0d9",
127 | "metadata": {},
128 | "source": [
129 | "Create an instance of HCPortfolio with the calculated returns for portfolio optimization"
130 | ]
131 | },
132 | {
133 | "cell_type": "code",
134 | "execution_count": null,
135 | "id": "254f5052",
136 | "metadata": {},
137 | "outputs": [],
138 | "source": [
139 | "port = rp.HCPortfolio(returns=returns)"
140 | ]
141 | },
142 | {
143 | "cell_type": "markdown",
144 | "id": "59421937",
145 | "metadata": {},
146 | "source": [
147 | "Optimize the portfolio using Hierarchical Risk Parity (HRP) with specified parameters"
148 | ]
149 | },
150 | {
151 | "cell_type": "code",
152 | "execution_count": null,
153 | "id": "44556ae2",
154 | "metadata": {},
155 | "outputs": [],
156 | "source": [
157 | "w = port.optimization(\n",
158 | " model=\"HRP\",\n",
159 | " codependence=\"pearson\",\n",
160 | " rm=\"MV\",\n",
161 | " rf=0.05,\n",
162 | " linkage=\"single\",\n",
163 | " max_k=10,\n",
164 | " leaf_order=True,\n",
165 | ")"
166 | ]
167 | },
168 | {
169 | "cell_type": "markdown",
170 | "id": "e94bd8c4",
171 | "metadata": {},
172 | "source": [
173 | "Plot a pie chart to visualize the portfolio allocation resulting from the HRP optimization"
174 | ]
175 | },
176 | {
177 | "cell_type": "code",
178 | "execution_count": null,
179 | "id": "54d6c889",
180 | "metadata": {},
181 | "outputs": [],
182 | "source": [
183 | "ax = rp.plot_pie(\n",
184 | " w=w,\n",
185 | " title=\"HRP Naive Risk Parity\",\n",
186 | " others=0.05,\n",
187 | " nrow=25,\n",
188 | " cmap=\"tab20\",\n",
189 | " height=8,\n",
190 | " width=10,\n",
191 | " ax=None,\n",
192 | ")"
193 | ]
194 | },
195 | {
196 | "cell_type": "markdown",
197 | "id": "bc3e0460",
198 | "metadata": {},
199 | "source": [
200 | "Plot the risk contributions of each asset in the optimized portfolio"
201 | ]
202 | },
203 | {
204 | "cell_type": "code",
205 | "execution_count": null,
206 | "id": "8b566981",
207 | "metadata": {},
208 | "outputs": [],
209 | "source": [
210 | "ax = rp.plot_risk_con(\n",
211 | " w=w,\n",
212 | " cov=returns.cov(),\n",
213 | " returns=returns,\n",
214 | " rm=\"MV\",\n",
215 | " rf=0,\n",
216 | " alpha=0.05,\n",
217 | " color=\"tab:blue\",\n",
218 | " height=6,\n",
219 | " width=10,\n",
220 | " t_factor=252,\n",
221 | " ax=None,\n",
222 | ")"
223 | ]
224 | },
225 | {
226 | "cell_type": "markdown",
227 | "id": "2fd1f927",
228 | "metadata": {},
229 | "source": [
230 | "PyQuant News is where finance practitioners level up with Python for quant finance, algorithmic trading, and market data analysis. Looking to get started? Check out the fastest growing, top-selling course to get started with Python for quant finance. For educational purposes. Not investment advise. Use at your own risk."
231 | ]
232 | }
233 | ],
234 | "metadata": {
235 | "jupytext": {
236 | "cell_metadata_filter": "-all",
237 | "main_language": "python",
238 | "notebook_metadata_filter": "-all"
239 | }
240 | },
241 | "nbformat": 4,
242 | "nbformat_minor": 5
243 | }
244 |
--------------------------------------------------------------------------------
/90_MarkovChainsRegime.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "e0b9b533",
6 | "metadata": {},
7 | "source": [
8 | ""
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "bbb152ce",
14 | "metadata": {},
15 | "source": [
16 | "This code uses Hidden Markov Models (HMM) to identify regimes in financial time series data. It downloads historical price data, calculates returns and ranges, and uses them as features for the HMM. The model is fitted with the features to identify different market states. The identified states are then plotted to visualize market regime changes. This is useful for understanding and predicting market behavior."
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "id": "fbd8082e",
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "import numpy as np\n",
27 | "import pandas as pd\n",
28 | "import yfinance as yf\n",
29 | "from hmmlearn import hmm"
30 | ]
31 | },
32 | {
33 | "cell_type": "markdown",
34 | "id": "e66793fc",
35 | "metadata": {},
36 | "source": [
37 | "Download historical price data for SPY from Yahoo Finance"
38 | ]
39 | },
40 | {
41 | "cell_type": "code",
42 | "execution_count": null,
43 | "id": "2b0e43aa",
44 | "metadata": {},
45 | "outputs": [],
46 | "source": [
47 | "data = yf.download(\"SPY\")"
48 | ]
49 | },
50 | {
51 | "cell_type": "markdown",
52 | "id": "52141649",
53 | "metadata": {},
54 | "source": [
55 | "Calculate log returns of the closing prices"
56 | ]
57 | },
58 | {
59 | "cell_type": "code",
60 | "execution_count": null,
61 | "id": "c31978ad",
62 | "metadata": {},
63 | "outputs": [],
64 | "source": [
65 | "returns = np.log(data.Close / data.Close.shift(1))"
66 | ]
67 | },
68 | {
69 | "cell_type": "markdown",
70 | "id": "1f181add",
71 | "metadata": {},
72 | "source": [
73 | "Calculate the range as the difference between high and low prices"
74 | ]
75 | },
76 | {
77 | "cell_type": "code",
78 | "execution_count": null,
79 | "id": "7f6a03ba",
80 | "metadata": {},
81 | "outputs": [],
82 | "source": [
83 | "range = (data.High - data.Low)"
84 | ]
85 | },
86 | {
87 | "cell_type": "markdown",
88 | "id": "6a286ad5",
89 | "metadata": {},
90 | "source": [
91 | "Concatenate returns and range into a single DataFrame and drop any missing values"
92 | ]
93 | },
94 | {
95 | "cell_type": "code",
96 | "execution_count": null,
97 | "id": "0b6e0fed",
98 | "metadata": {},
99 | "outputs": [],
100 | "source": [
101 | "features = pd.concat([returns, range], axis=1).dropna()\n",
102 | "features.columns = [\"returns\", \"range\"]"
103 | ]
104 | },
105 | {
106 | "cell_type": "markdown",
107 | "id": "b7e18b8d",
108 | "metadata": {},
109 | "source": [
110 | "Initialize a Gaussian Hidden Markov Model with 3 states and fit it to the features"
111 | ]
112 | },
113 | {
114 | "cell_type": "code",
115 | "execution_count": null,
116 | "id": "51c204c1",
117 | "metadata": {},
118 | "outputs": [],
119 | "source": [
120 | "model = hmm.GaussianHMM(\n",
121 | " n_components=3,\n",
122 | " covariance_type=\"full\",\n",
123 | " n_iter=1000,\n",
124 | ")\n",
125 | "model.fit(features)"
126 | ]
127 | },
128 | {
129 | "cell_type": "markdown",
130 | "id": "8b338395",
131 | "metadata": {},
132 | "source": [
133 | "Predict the hidden states for the given features and store them in a Series"
134 | ]
135 | },
136 | {
137 | "cell_type": "code",
138 | "execution_count": null,
139 | "id": "b02db04c",
140 | "metadata": {},
141 | "outputs": [],
142 | "source": [
143 | "states = pd.Series(model.predict(features), index=data.index[1:])\n",
144 | "states.name = \"state\""
145 | ]
146 | },
147 | {
148 | "cell_type": "markdown",
149 | "id": "649d1abc",
150 | "metadata": {},
151 | "source": [
152 | "Plot a histogram of the hidden states"
153 | ]
154 | },
155 | {
156 | "cell_type": "code",
157 | "execution_count": null,
158 | "id": "c1da4348",
159 | "metadata": {},
160 | "outputs": [],
161 | "source": [
162 | "states.hist()"
163 | ]
164 | },
165 | {
166 | "cell_type": "markdown",
167 | "id": "99d0edca",
168 | "metadata": {},
169 | "source": [
170 | "Define a color map for the different states"
171 | ]
172 | },
173 | {
174 | "cell_type": "code",
175 | "execution_count": null,
176 | "id": "7944911f",
177 | "metadata": {},
178 | "outputs": [],
179 | "source": [
180 | "color_map = {\n",
181 | " 0.0: \"green\",\n",
182 | " 1.0: \"orange\",\n",
183 | " 2.0: \"red\"\n",
184 | "}"
185 | ]
186 | },
187 | {
188 | "cell_type": "markdown",
189 | "id": "43dda8ae",
190 | "metadata": {},
191 | "source": [
192 | "Concatenate the closing prices and the states, drop missing values, \n",
193 | "set state as a hierarchical index, unstack the state index, and plot the closing prices with different colors for each state"
194 | ]
195 | },
196 | {
197 | "cell_type": "code",
198 | "execution_count": null,
199 | "id": "b77b347f",
200 | "metadata": {},
201 | "outputs": [],
202 | "source": [
203 | "(\n",
204 | " pd.concat([data.Close, states], axis=1)\n",
205 | " .dropna()\n",
206 | " .set_index(\"state\", append=True)\n",
207 | " .Close\n",
208 | " .unstack(\"state\")\n",
209 | " .plot(color=color_map)\n",
210 | ")"
211 | ]
212 | },
213 | {
214 | "cell_type": "markdown",
215 | "id": "5a9b87dd",
216 | "metadata": {},
217 | "source": [
218 | "PyQuant News is where finance practitioners level up with Python for quant finance, algorithmic trading, and market data analysis. Looking to get started? Check out the fastest growing, top-selling course to get started with Python for quant finance. For educational purposes. Not investment advise. Use at your own risk."
219 | ]
220 | }
221 | ],
222 | "metadata": {
223 | "jupytext": {
224 | "cell_metadata_filter": "-all",
225 | "main_language": "python",
226 | "notebook_metadata_filter": "-all"
227 | }
228 | },
229 | "nbformat": 4,
230 | "nbformat_minor": 5
231 | }
232 |
--------------------------------------------------------------------------------
/94_MPLFinanceForStockCharts.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "464b6cd8",
6 | "metadata": {},
7 | "source": [
8 | ""
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "e3bcc3f9",
14 | "metadata": {},
15 | "source": [
16 | "This code downloads historical stock data for specified tickers and time periods using yfinance. \n",
17 | "It then visualizes the data using mplfinance, producing various types of financial charts \n",
18 | "such as candlestick, line, and Renko charts. The code also demonstrates how to plot moving \n",
19 | "averages and how to include volume in the charts. This is useful for technical analysis \n",
20 | "and understanding price movements over different timeframes."
21 | ]
22 | },
23 | {
24 | "cell_type": "code",
25 | "execution_count": null,
26 | "id": "b6f07ce9",
27 | "metadata": {},
28 | "outputs": [],
29 | "source": [
30 | "import yfinance as yf\n",
31 | "import mplfinance as mpf\n",
32 | "import warnings"
33 | ]
34 | },
35 | {
36 | "cell_type": "code",
37 | "execution_count": null,
38 | "id": "1343f752",
39 | "metadata": {},
40 | "outputs": [],
41 | "source": [
42 | "warnings.filterwarnings('ignore')"
43 | ]
44 | },
45 | {
46 | "cell_type": "markdown",
47 | "id": "8a8960a3",
48 | "metadata": {},
49 | "source": [
50 | "Download historical stock data for Apple (AAPL) from yfinance for the specified date range"
51 | ]
52 | },
53 | {
54 | "cell_type": "code",
55 | "execution_count": null,
56 | "id": "14bc43b3",
57 | "metadata": {},
58 | "outputs": [],
59 | "source": [
60 | "data = yf.download(\"AAPL\", start=\"2022-01-01\", end=\"2022-06-30\")"
61 | ]
62 | },
63 | {
64 | "cell_type": "markdown",
65 | "id": "7ded8f61",
66 | "metadata": {},
67 | "source": [
68 | "Plot the downloaded data using a default chart type"
69 | ]
70 | },
71 | {
72 | "cell_type": "code",
73 | "execution_count": null,
74 | "id": "17fa0ce1",
75 | "metadata": {},
76 | "outputs": [],
77 | "source": [
78 | "mpf.plot(data)"
79 | ]
80 | },
81 | {
82 | "cell_type": "markdown",
83 | "id": "1c0611a0",
84 | "metadata": {},
85 | "source": [
86 | "Plot the data using a candlestick chart"
87 | ]
88 | },
89 | {
90 | "cell_type": "code",
91 | "execution_count": null,
92 | "id": "4e278a86",
93 | "metadata": {},
94 | "outputs": [],
95 | "source": [
96 | "mpf.plot(data, type=\"candle\")"
97 | ]
98 | },
99 | {
100 | "cell_type": "markdown",
101 | "id": "e285c032",
102 | "metadata": {},
103 | "source": [
104 | "Plot the data using a line chart"
105 | ]
106 | },
107 | {
108 | "cell_type": "code",
109 | "execution_count": null,
110 | "id": "9214073c",
111 | "metadata": {},
112 | "outputs": [],
113 | "source": [
114 | "mpf.plot(data, type=\"line\")"
115 | ]
116 | },
117 | {
118 | "cell_type": "markdown",
119 | "id": "61bbcc00",
120 | "metadata": {},
121 | "source": [
122 | "Plot the data using a Renko chart"
123 | ]
124 | },
125 | {
126 | "cell_type": "code",
127 | "execution_count": null,
128 | "id": "0f63a046",
129 | "metadata": {},
130 | "outputs": [],
131 | "source": [
132 | "mpf.plot(data, type=\"renko\")"
133 | ]
134 | },
135 | {
136 | "cell_type": "markdown",
137 | "id": "5865feca",
138 | "metadata": {},
139 | "source": [
140 | "Plot the data using an OHLC chart with a 15-day moving average"
141 | ]
142 | },
143 | {
144 | "cell_type": "code",
145 | "execution_count": null,
146 | "id": "7a36a88c",
147 | "metadata": {},
148 | "outputs": [],
149 | "source": [
150 | "mpf.plot(data, type=\"ohlc\", mav=15)"
151 | ]
152 | },
153 | {
154 | "cell_type": "markdown",
155 | "id": "e8b33855",
156 | "metadata": {},
157 | "source": [
158 | "Plot the data using a candlestick chart with moving averages of 7, 14, and 21 days"
159 | ]
160 | },
161 | {
162 | "cell_type": "code",
163 | "execution_count": null,
164 | "id": "3bf408dd",
165 | "metadata": {},
166 | "outputs": [],
167 | "source": [
168 | "mpf.plot(data, type=\"candle\", mav=(7, 14, 21))"
169 | ]
170 | },
171 | {
172 | "cell_type": "markdown",
173 | "id": "622d2692",
174 | "metadata": {},
175 | "source": [
176 | "Plot the candlestick chart with moving averages and volume"
177 | ]
178 | },
179 | {
180 | "cell_type": "code",
181 | "execution_count": null,
182 | "id": "82641833",
183 | "metadata": {},
184 | "outputs": [],
185 | "source": [
186 | "mpf.plot(data, type=\"candle\", mav=(7, 14, 21), volume=True)"
187 | ]
188 | },
189 | {
190 | "cell_type": "markdown",
191 | "id": "f62ec2a9",
192 | "metadata": {},
193 | "source": [
194 | "Plot the candlestick chart with moving averages, volume, and show non-trading periods"
195 | ]
196 | },
197 | {
198 | "cell_type": "code",
199 | "execution_count": null,
200 | "id": "ad60039d",
201 | "metadata": {},
202 | "outputs": [],
203 | "source": [
204 | "mpf.plot(\n",
205 | " data, \n",
206 | " type=\"candle\", \n",
207 | " mav=(7, 14, 21), \n",
208 | " volume=True, \n",
209 | " show_nontrading=True\n",
210 | ")"
211 | ]
212 | },
213 | {
214 | "cell_type": "markdown",
215 | "id": "72374bfa",
216 | "metadata": {},
217 | "source": [
218 | "Download intraday stock data for Palantir (PLTR) with 1-minute intervals over the last 5 days"
219 | ]
220 | },
221 | {
222 | "cell_type": "code",
223 | "execution_count": null,
224 | "id": "600c9dbc",
225 | "metadata": {},
226 | "outputs": [],
227 | "source": [
228 | "intraday = yf.download(tickers=\"PLTR\", period=\"5d\", interval=\"1m\")"
229 | ]
230 | },
231 | {
232 | "cell_type": "markdown",
233 | "id": "fc78ad0c",
234 | "metadata": {},
235 | "source": [
236 | "Select the last 100 rows of intraday data for plotting"
237 | ]
238 | },
239 | {
240 | "cell_type": "code",
241 | "execution_count": null,
242 | "id": "27e18b45",
243 | "metadata": {},
244 | "outputs": [],
245 | "source": [
246 | "iday = intraday.iloc[-100:, :]"
247 | ]
248 | },
249 | {
250 | "cell_type": "markdown",
251 | "id": "017866b3",
252 | "metadata": {},
253 | "source": [
254 | "Plot the selected intraday data using a candlestick chart with 7 and 12-period moving averages and volume"
255 | ]
256 | },
257 | {
258 | "cell_type": "code",
259 | "execution_count": null,
260 | "id": "502d5882",
261 | "metadata": {},
262 | "outputs": [],
263 | "source": [
264 | "mpf.plot(iday, type=\"candle\", mav=(7, 12), volume=True)"
265 | ]
266 | },
267 | {
268 | "cell_type": "markdown",
269 | "id": "76dc895f",
270 | "metadata": {},
271 | "source": [
272 | "PyQuant News is where finance practitioners level up with Python for quant finance, algorithmic trading, and market data analysis. Looking to get started? Check out the fastest growing, top-selling course to get started with Python for quant finance. For educational purposes. Not investment advise. Use at your own risk."
273 | ]
274 | }
275 | ],
276 | "metadata": {
277 | "jupytext": {
278 | "cell_metadata_filter": "-all",
279 | "main_language": "python",
280 | "notebook_metadata_filter": "-all"
281 | }
282 | },
283 | "nbformat": 4,
284 | "nbformat_minor": 5
285 | }
286 |
--------------------------------------------------------------------------------
/98_HestonModel.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "b2530bdd",
6 | "metadata": {},
7 | "source": [
8 | ""
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "c70588cd",
14 | "metadata": {},
15 | "source": [
16 | "This code prices an American call option using the Heston model, a popular stochastic volatility model in quantitative finance. It sets up the necessary market data, including spot price, dividend yield, risk-free rate, and volatility. The code configures the Heston process with parameters such as initial variance, mean reversion rate, long-term variance, volatility of volatility, and correlation. It then constructs the option payoff and exercise type, sets up the finite difference pricing engine, and calculates the option price. This approach is useful for pricing options that cannot be easily handled by closed-form solutions."
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "id": "2b46a1fd",
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "import QuantLib as ql"
27 | ]
28 | },
29 | {
30 | "cell_type": "markdown",
31 | "id": "44750a62",
32 | "metadata": {},
33 | "source": [
34 | "Set the evaluation date for the option pricing"
35 | ]
36 | },
37 | {
38 | "cell_type": "code",
39 | "execution_count": null,
40 | "id": "5af4b1a7",
41 | "metadata": {},
42 | "outputs": [],
43 | "source": [
44 | "evaluation_date = ql.Date(30, 5, 2024)\n",
45 | "ql.Settings.instance().evaluationDate = evaluation_date"
46 | ]
47 | },
48 | {
49 | "cell_type": "markdown",
50 | "id": "30b6b6a4",
51 | "metadata": {},
52 | "source": [
53 | "Define the expiry date, strike price, and option type for the American option"
54 | ]
55 | },
56 | {
57 | "cell_type": "code",
58 | "execution_count": null,
59 | "id": "bb0c8ad2",
60 | "metadata": {},
61 | "outputs": [],
62 | "source": [
63 | "expiry_date = ql.Date(20, 9, 2024)\n",
64 | "strike_price = 190\n",
65 | "option_type = ql.Option.Call"
66 | ]
67 | },
68 | {
69 | "cell_type": "markdown",
70 | "id": "d791b5ae",
71 | "metadata": {},
72 | "source": [
73 | "Set the spot price, dividend rate, risk-free rate, and volatility of the underlying asset"
74 | ]
75 | },
76 | {
77 | "cell_type": "code",
78 | "execution_count": null,
79 | "id": "94c43338",
80 | "metadata": {},
81 | "outputs": [],
82 | "source": [
83 | "spot_price = 191.62\n",
84 | "dividend_rate = 0.0053\n",
85 | "risk_free_rate = 0.05\n",
86 | "volatility = 0.2361"
87 | ]
88 | },
89 | {
90 | "cell_type": "markdown",
91 | "id": "00066811",
92 | "metadata": {},
93 | "source": [
94 | "Define the term structures for dividends and risk-free rates as flat curves"
95 | ]
96 | },
97 | {
98 | "cell_type": "code",
99 | "execution_count": null,
100 | "id": "cf1c5332",
101 | "metadata": {},
102 | "outputs": [],
103 | "source": [
104 | "dividend_ts = ql.YieldTermStructureHandle(\n",
105 | " ql.FlatForward(\n",
106 | " evaluation_date, \n",
107 | " dividend_rate, \n",
108 | " ql.Actual360()\n",
109 | " )\n",
110 | ")"
111 | ]
112 | },
113 | {
114 | "cell_type": "code",
115 | "execution_count": null,
116 | "id": "2307a2d3",
117 | "metadata": {},
118 | "outputs": [],
119 | "source": [
120 | "risk_free_ts = ql.YieldTermStructureHandle(\n",
121 | " ql.FlatForward(\n",
122 | " evaluation_date, \n",
123 | " risk_free_rate, \n",
124 | " ql.Actual360()\n",
125 | " )\n",
126 | ")"
127 | ]
128 | },
129 | {
130 | "cell_type": "markdown",
131 | "id": "45034dc2",
132 | "metadata": {},
133 | "source": [
134 | "Set up the Heston process parameters including initial variance, mean reversion, long-term variance, and correlation"
135 | ]
136 | },
137 | {
138 | "cell_type": "code",
139 | "execution_count": null,
140 | "id": "79b5559d",
141 | "metadata": {},
142 | "outputs": [],
143 | "source": [
144 | "v0 = volatility * volatility\n",
145 | "kappa = 2.0\n",
146 | "theta = volatility * volatility\n",
147 | "sigma = 0.1\n",
148 | "rho = 0.0"
149 | ]
150 | },
151 | {
152 | "cell_type": "markdown",
153 | "id": "b669e4f5",
154 | "metadata": {},
155 | "source": [
156 | "Initialize the Heston process using the defined term structures, spot price, and Heston parameters"
157 | ]
158 | },
159 | {
160 | "cell_type": "code",
161 | "execution_count": null,
162 | "id": "9f20cb3b",
163 | "metadata": {},
164 | "outputs": [],
165 | "source": [
166 | "heston_process = ql.HestonProcess(\n",
167 | " risk_free_ts, \n",
168 | " dividend_ts, \n",
169 | " ql.QuoteHandle(\n",
170 | " ql.SimpleQuote(spot_price)\n",
171 | " ), \n",
172 | " v0, \n",
173 | " kappa, \n",
174 | " theta, \n",
175 | " sigma, \n",
176 | " rho\n",
177 | ")"
178 | ]
179 | },
180 | {
181 | "cell_type": "markdown",
182 | "id": "1d6271b0",
183 | "metadata": {},
184 | "source": [
185 | "Create a Heston model instance using the Heston process"
186 | ]
187 | },
188 | {
189 | "cell_type": "code",
190 | "execution_count": null,
191 | "id": "f590bfdf",
192 | "metadata": {},
193 | "outputs": [],
194 | "source": [
195 | "heston_model = ql.HestonModel(heston_process)"
196 | ]
197 | },
198 | {
199 | "cell_type": "markdown",
200 | "id": "c91b69f6",
201 | "metadata": {},
202 | "source": [
203 | "Define the payoff and exercise type for the American option"
204 | ]
205 | },
206 | {
207 | "cell_type": "code",
208 | "execution_count": null,
209 | "id": "6895b95f",
210 | "metadata": {},
211 | "outputs": [],
212 | "source": [
213 | "payoff = ql.PlainVanillaPayoff(option_type, strike_price)\n",
214 | "exercise = ql.AmericanExercise(evaluation_date, expiry_date)\n",
215 | "american_option = ql.VanillaOption(payoff, exercise)"
216 | ]
217 | },
218 | {
219 | "cell_type": "markdown",
220 | "id": "2a1be49b",
221 | "metadata": {},
222 | "source": [
223 | "Set up the finite difference engine for pricing the American option using the Heston model"
224 | ]
225 | },
226 | {
227 | "cell_type": "code",
228 | "execution_count": null,
229 | "id": "f3683e2c",
230 | "metadata": {},
231 | "outputs": [],
232 | "source": [
233 | "heston_fd_engine = ql.FdHestonVanillaEngine(heston_model)\n",
234 | "american_option.setPricingEngine(heston_fd_engine)"
235 | ]
236 | },
237 | {
238 | "cell_type": "markdown",
239 | "id": "3c268400",
240 | "metadata": {},
241 | "source": [
242 | "Calculate and print the net present value (NPV) or option price"
243 | ]
244 | },
245 | {
246 | "cell_type": "code",
247 | "execution_count": null,
248 | "id": "2b01cd50",
249 | "metadata": {},
250 | "outputs": [],
251 | "source": [
252 | "option_price = american_option.NPV()\n",
253 | "print(f\"Option Price: {option_price:.2f}\")"
254 | ]
255 | },
256 | {
257 | "cell_type": "markdown",
258 | "id": "53c45f10",
259 | "metadata": {},
260 | "source": [
261 | "PyQuant News is where finance practitioners level up with Python for quant finance, algorithmic trading, and market data analysis. Looking to get started? Check out the fastest growing, top-selling course to get started with Python for quant finance. For educational purposes. Not investment advise. Use at your own risk."
262 | ]
263 | }
264 | ],
265 | "metadata": {
266 | "jupytext": {
267 | "cell_metadata_filter": "-all",
268 | "main_language": "python",
269 | "notebook_metadata_filter": "-all"
270 | }
271 | },
272 | "nbformat": 4,
273 | "nbformat_minor": 5
274 | }
275 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ### The PyQuant Newsletter
2 |
3 | Code notebooks from the PyQuant Newsletter
--------------------------------------------------------------------------------
/nvda.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pyquantnews/PyQuantNewsletter/41bc8c11a78206fe53a36a9f1c6403c79d098766/nvda.pdf
--------------------------------------------------------------------------------
/pqn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pyquantnews/PyQuantNewsletter/41bc8c11a78206fe53a36a9f1c6403c79d098766/pqn.png
--------------------------------------------------------------------------------