├── .github
└── workflows
│ ├── deploy.yaml
│ └── test.yaml
├── .gitignore
├── LICENSE
├── MANIFEST.in
├── README.md
├── nbs
├── 00_core.ipynb
├── _quarto.yml
├── index.ipynb
├── nbdev.yml
├── sidebar.yml
└── styles.css
├── settings.ini
├── setup.py
└── ts2ml
├── __init__.py
├── _modidx.py
└── core.py
/.github/workflows/deploy.yaml:
--------------------------------------------------------------------------------
1 | name: Deploy to GitHub Pages
2 |
3 | permissions:
4 | contents: write
5 | pages: write
6 |
7 | on:
8 | push:
9 | branches: [ "main", "master" ]
10 | workflow_dispatch:
11 | jobs:
12 | deploy:
13 | runs-on: ubuntu-latest
14 | steps: [uses: fastai/workflows/quarto-ghp@master]
15 |
--------------------------------------------------------------------------------
/.github/workflows/test.yaml:
--------------------------------------------------------------------------------
1 | name: CI
2 | on: [workflow_dispatch, pull_request, push]
3 |
4 | jobs:
5 | test:
6 | runs-on: ubuntu-latest
7 | steps: [uses: fastai/workflows/nbdev-ci@master]
8 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | _docs/
2 | _proc/
3 |
4 | *.bak
5 | .gitattributes
6 | .last_checked
7 | .gitconfig
8 | *.bak
9 | *.log
10 | *~
11 | ~*
12 | _tmp*
13 | tmp*
14 | tags
15 | *.pkg
16 |
17 | # Byte-compiled / optimized / DLL files
18 | __pycache__/
19 | *.py[cod]
20 | *$py.class
21 |
22 | # C extensions
23 | *.so
24 |
25 | # Distribution / packaging
26 | .Python
27 | env/
28 | build/
29 | develop-eggs/
30 | dist/
31 | downloads/
32 | eggs/
33 | .eggs/
34 | lib/
35 | lib64/
36 | parts/
37 | sdist/
38 | var/
39 | wheels/
40 | *.egg-info/
41 | .installed.cfg
42 | *.egg
43 |
44 | # PyInstaller
45 | # Usually these files are written by a python script from a template
46 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
47 | *.manifest
48 | *.spec
49 |
50 | # Installer logs
51 | pip-log.txt
52 | pip-delete-this-directory.txt
53 |
54 | # Unit test / coverage reports
55 | htmlcov/
56 | .tox/
57 | .coverage
58 | .coverage.*
59 | .cache
60 | nosetests.xml
61 | coverage.xml
62 | *.cover
63 | .hypothesis/
64 |
65 | # Translations
66 | *.mo
67 | *.pot
68 |
69 | # Django stuff:
70 | *.log
71 | local_settings.py
72 |
73 | # Flask stuff:
74 | instance/
75 | .webassets-cache
76 |
77 | # Scrapy stuff:
78 | .scrapy
79 |
80 | # Sphinx documentation
81 | docs/_build/
82 |
83 | # PyBuilder
84 | target/
85 |
86 | # Jupyter Notebook
87 | .ipynb_checkpoints
88 |
89 | # pyenv
90 | .python-version
91 |
92 | # celery beat schedule file
93 | celerybeat-schedule
94 |
95 | # SageMath parsed files
96 | *.sage.py
97 |
98 | # dotenv
99 | .env
100 |
101 | # virtualenv
102 | .venv
103 | venv/
104 | ENV/
105 |
106 | # Spyder project settings
107 | .spyderproject
108 | .spyproject
109 |
110 | # Rope project settings
111 | .ropeproject
112 |
113 | # mkdocs documentation
114 | /site
115 |
116 | # mypy
117 | .mypy_cache/
118 |
119 | .vscode
120 | *.swp
121 |
122 | # osx generated files
123 | .DS_Store
124 | .DS_Store?
125 | .Trashes
126 | ehthumbs.db
127 | Thumbs.db
128 | .idea
129 |
130 | # pytest
131 | .pytest_cache
132 |
133 | # tools/trust-doc-nbs
134 | docs_src/.last_checked
135 |
136 | # symlinks to fastai
137 | docs_src/fastai
138 | tools/fastai
139 |
140 | # link checker
141 | checklink/cookies.txt
142 |
143 | # .gitconfig is now autogenerated
144 | .gitconfig
145 |
146 | # Quarto installer
147 | .deb
148 | .pkg
149 |
150 | # Quarto
151 | .quarto
152 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright 2022, fastai
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include settings.ini
2 | include LICENSE
3 | include CONTRIBUTING.md
4 | include README.md
5 | recursive-exclude * __pycache__
6 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # ts2ml
2 |
3 |
4 |
5 | ## Install
6 |
7 | ``` sh
8 | pip install ts2ml
9 | ```
10 |
11 | ## How to use
12 |
13 | ``` python
14 | import pandas as pd
15 | from ts2ml.core import add_missing_slots
16 | from ts2ml.core import transform_ts_data_into_features_and_target
17 | ```
18 |
19 | ``` python
20 | df = pd.DataFrame({
21 | 'pickup_hour': ['2022-01-01 00:00:00', '2022-01-01 01:00:00', '2022-01-01 03:00:00', '2022-01-01 01:00:00', '2022-01-01 02:00:00', '2022-01-01 05:00:00'],
22 | 'pickup_location_id': [1, 1, 1, 2, 2, 2],
23 | 'rides': [2, 3, 1, 1, 2, 1]
24 | })
25 | df
26 | ```
27 |
28 |
29 |
40 |
41 | | | pickup_hour | pickup_location_id | rides |
42 | |-----|---------------------|--------------------|-------|
43 | | 0 | 2022-01-01 00:00:00 | 1 | 2 |
44 | | 1 | 2022-01-01 01:00:00 | 1 | 3 |
45 | | 2 | 2022-01-01 03:00:00 | 1 | 1 |
46 | | 3 | 2022-01-01 01:00:00 | 2 | 1 |
47 | | 4 | 2022-01-01 02:00:00 | 2 | 2 |
48 | | 5 | 2022-01-01 05:00:00 | 2 | 1 |
49 |
50 |
51 |
52 | Let’s fill the missing slots with zeros
53 |
54 | ``` python
55 | df = add_missing_slots(df, datetime_col='pickup_hour', entity_col='pickup_location_id', value_col='rides', freq='H')
56 | df
57 | ```
58 |
59 | 100%|██████████| 2/2 [00:00<00:00, 907.86it/s]
60 |
61 |
62 |
73 |
74 | | | pickup_hour | pickup_location_id | rides |
75 | |-----|---------------------|--------------------|-------|
76 | | 0 | 2022-01-01 00:00:00 | 1 | 2 |
77 | | 1 | 2022-01-01 01:00:00 | 1 | 3 |
78 | | 2 | 2022-01-01 02:00:00 | 1 | 0 |
79 | | 3 | 2022-01-01 03:00:00 | 1 | 1 |
80 | | 4 | 2022-01-01 04:00:00 | 1 | 0 |
81 | | 5 | 2022-01-01 05:00:00 | 1 | 0 |
82 | | 6 | 2022-01-01 00:00:00 | 2 | 0 |
83 | | 7 | 2022-01-01 01:00:00 | 2 | 1 |
84 | | 8 | 2022-01-01 02:00:00 | 2 | 2 |
85 | | 9 | 2022-01-01 03:00:00 | 2 | 0 |
86 | | 10 | 2022-01-01 04:00:00 | 2 | 0 |
87 | | 11 | 2022-01-01 05:00:00 | 2 | 1 |
88 |
89 |
90 |
91 | Now, let’s build features and targets to predict the number of rides for
92 | the next hour for each location_id, by using the historical number of
93 | rides for the last 3 hours
94 |
95 | ``` python
96 | features, targets = transform_ts_data_into_features_and_target(
97 | df,
98 | n_features=3,
99 | datetime_col='pickup_hour',
100 | entity_col='pickup_location_id',
101 | value_col='rides',
102 | n_targets=1,
103 | step_size=1,
104 | step_name='hour'
105 | )
106 | ```
107 |
108 | 100%|██████████| 2/2 [00:00<00:00, 597.86it/s]
109 |
110 | ``` python
111 | features
112 | ```
113 |
114 |
115 |
126 |
127 | | | rides_previous_3_hour | rides_previous_2_hour | rides_previous_1_hour | pickup_hour | pickup_location_id |
128 | |-----|-----------------------|-----------------------|-----------------------|---------------------|--------------------|
129 | | 0 | 2.0 | 3.0 | 0.0 | 2022-01-01 03:00:00 | 1 |
130 | | 1 | 3.0 | 0.0 | 1.0 | 2022-01-01 04:00:00 | 1 |
131 | | 2 | 0.0 | 1.0 | 2.0 | 2022-01-01 03:00:00 | 2 |
132 | | 3 | 1.0 | 2.0 | 0.0 | 2022-01-01 04:00:00 | 2 |
133 |
134 |
135 |
136 | ``` python
137 | targets
138 | ```
139 |
140 |
141 |
152 |
153 | | | target_rides_next_hour |
154 | |-----|------------------------|
155 | | 0 | 1.0 |
156 | | 1 | 0.0 |
157 | | 2 | 0.0 |
158 | | 3 | 0.0 |
159 |
160 |
161 |
162 | ``` python
163 | Xy_df = pd.concat([features, targets], axis=1)
164 | Xy_df
165 | ```
166 |
167 |
168 |
179 |
180 | | | rides_previous_3_hour | rides_previous_2_hour | rides_previous_1_hour | pickup_hour | pickup_location_id | target_rides_next_hour |
181 | |-----|-----------------------|-----------------------|-----------------------|---------------------|--------------------|------------------------|
182 | | 0 | 2.0 | 3.0 | 0.0 | 2022-01-01 03:00:00 | 1 | 1.0 |
183 | | 1 | 3.0 | 0.0 | 1.0 | 2022-01-01 04:00:00 | 1 | 0.0 |
184 | | 2 | 0.0 | 1.0 | 2.0 | 2022-01-01 03:00:00 | 2 | 0.0 |
185 | | 3 | 1.0 | 2.0 | 0.0 | 2022-01-01 04:00:00 | 2 | 0.0 |
186 |
187 |
188 |
189 | # Another Example
190 |
191 | Montly spaced time series
192 |
193 | ``` python
194 | import pandas as pd
195 | import numpy as np
196 |
197 | # Generate timestamp index with monthly frequency
198 | date_rng = pd.date_range(start='1/1/2020', end='12/1/2022', freq='MS')
199 |
200 | # Create list of city codes
201 | cities = ['FOR', 'SP', 'RJ']
202 |
203 | # Create dataframe with random sales data for each city on each month
204 | df = pd.DataFrame({
205 | 'date': date_rng,
206 | 'city': np.repeat(cities, len(date_rng)//len(cities)),
207 | 'sales': np.random.randint(1000, 5000, size=len(date_rng))
208 | })
209 | df
210 | ```
211 |
212 |
213 |
224 |
225 | | | date | city | sales |
226 | |-----|------------|------|-------|
227 | | 0 | 2020-01-01 | FOR | 4944 |
228 | | 1 | 2020-02-01 | FOR | 3435 |
229 | | 2 | 2020-03-01 | FOR | 4543 |
230 | | 3 | 2020-04-01 | FOR | 3879 |
231 | | 4 | 2020-05-01 | FOR | 2601 |
232 | | 5 | 2020-06-01 | FOR | 2922 |
233 | | 6 | 2020-07-01 | FOR | 4542 |
234 | | 7 | 2020-08-01 | FOR | 1338 |
235 | | 8 | 2020-09-01 | FOR | 2938 |
236 | | 9 | 2020-10-01 | FOR | 2695 |
237 | | 10 | 2020-11-01 | FOR | 4065 |
238 | | 11 | 2020-12-01 | FOR | 3864 |
239 | | 12 | 2021-01-01 | SP | 2652 |
240 | | 13 | 2021-02-01 | SP | 2137 |
241 | | 14 | 2021-03-01 | SP | 2663 |
242 | | 15 | 2021-04-01 | SP | 1168 |
243 | | 16 | 2021-05-01 | SP | 4523 |
244 | | 17 | 2021-06-01 | SP | 4135 |
245 | | 18 | 2021-07-01 | SP | 3566 |
246 | | 19 | 2021-08-01 | SP | 2121 |
247 | | 20 | 2021-09-01 | SP | 1070 |
248 | | 21 | 2021-10-01 | SP | 1624 |
249 | | 22 | 2021-11-01 | SP | 3034 |
250 | | 23 | 2021-12-01 | SP | 4063 |
251 | | 24 | 2022-01-01 | RJ | 2297 |
252 | | 25 | 2022-02-01 | RJ | 3430 |
253 | | 26 | 2022-03-01 | RJ | 2903 |
254 | | 27 | 2022-04-01 | RJ | 4197 |
255 | | 28 | 2022-05-01 | RJ | 4141 |
256 | | 29 | 2022-06-01 | RJ | 2899 |
257 | | 30 | 2022-07-01 | RJ | 4529 |
258 | | 31 | 2022-08-01 | RJ | 3612 |
259 | | 32 | 2022-09-01 | RJ | 1856 |
260 | | 33 | 2022-10-01 | RJ | 4804 |
261 | | 34 | 2022-11-01 | RJ | 1764 |
262 | | 35 | 2022-12-01 | RJ | 4425 |
263 |
264 |
265 |
266 | FOR city only have data for 2020 year, RJ only for 2022 and SP only for
267 | 2021. Let’s also simulate more missing slots between the years.
268 |
269 | ``` python
270 | # Generate random indices to drop
271 | drop_indices = np.random.choice(df.index, size=int(len(df)*0.2), replace=False)
272 |
273 | # Drop selected rows from dataframe
274 | df = df.drop(drop_indices)
275 | df.reset_index(drop=True, inplace=True)
276 | df
277 | ```
278 |
279 |
280 |
291 |
292 | | | date | city | sales |
293 | |-----|------------|------|-------|
294 | | 0 | 2020-01-01 | FOR | 4944 |
295 | | 1 | 2020-02-01 | FOR | 3435 |
296 | | 2 | 2020-03-01 | FOR | 4543 |
297 | | 3 | 2020-04-01 | FOR | 3879 |
298 | | 4 | 2020-05-01 | FOR | 2601 |
299 | | 5 | 2020-06-01 | FOR | 2922 |
300 | | 6 | 2020-07-01 | FOR | 4542 |
301 | | 7 | 2020-08-01 | FOR | 1338 |
302 | | 8 | 2020-09-01 | FOR | 2938 |
303 | | 9 | 2020-11-01 | FOR | 4065 |
304 | | 10 | 2020-12-01 | FOR | 3864 |
305 | | 11 | 2021-01-01 | SP | 2652 |
306 | | 12 | 2021-02-01 | SP | 2137 |
307 | | 13 | 2021-03-01 | SP | 2663 |
308 | | 14 | 2021-07-01 | SP | 3566 |
309 | | 15 | 2021-08-01 | SP | 2121 |
310 | | 16 | 2021-10-01 | SP | 1624 |
311 | | 17 | 2021-11-01 | SP | 3034 |
312 | | 18 | 2021-12-01 | SP | 4063 |
313 | | 19 | 2022-01-01 | RJ | 2297 |
314 | | 20 | 2022-02-01 | RJ | 3430 |
315 | | 21 | 2022-03-01 | RJ | 2903 |
316 | | 22 | 2022-04-01 | RJ | 4197 |
317 | | 23 | 2022-05-01 | RJ | 4141 |
318 | | 24 | 2022-06-01 | RJ | 2899 |
319 | | 25 | 2022-09-01 | RJ | 1856 |
320 | | 26 | 2022-10-01 | RJ | 4804 |
321 | | 27 | 2022-11-01 | RJ | 1764 |
322 | | 28 | 2022-12-01 | RJ | 4425 |
323 |
324 |
325 |
326 | Now lets fill the missing slots with zero values. The function will
327 | complete the missing slots with zeros:
328 |
329 | ``` python
330 | df_full = add_missing_slots(df, datetime_col='date', entity_col='city', value_col='sales', freq='MS')
331 | df_full
332 | ```
333 |
334 | 100%|██████████| 3/3 [00:00<00:00, 843.70it/s]
335 |
336 |
337 |
348 |
349 | | | date | city | sales |
350 | |-----|------------|------|-------|
351 | | 0 | 2020-01-01 | FOR | 4944 |
352 | | 1 | 2020-02-01 | FOR | 3435 |
353 | | 2 | 2020-03-01 | FOR | 4543 |
354 | | 3 | 2020-04-01 | FOR | 3879 |
355 | | 4 | 2020-05-01 | FOR | 2601 |
356 | | ... | ... | ... | ... |
357 | | 103 | 2022-08-01 | RJ | 0 |
358 | | 104 | 2022-09-01 | RJ | 1856 |
359 | | 105 | 2022-10-01 | RJ | 4804 |
360 | | 106 | 2022-11-01 | RJ | 1764 |
361 | | 107 | 2022-12-01 | RJ | 4425 |
362 |
363 |
108 rows × 3 columns
364 |
365 |
366 | Let’s build a dataset for training a machine learning model to predict
367 | the sales for the next 3 months, for each city, based on historical data
368 | of sales for the previous 6 months.
369 |
370 | ``` python
371 | features, targets = transform_ts_data_into_features_and_target(
372 | df_full,
373 | n_features=3,
374 | datetime_col='date',
375 | entity_col='city',
376 | value_col='sales',
377 | n_targets=1,
378 | step_size=1,
379 | step_name='month'
380 | )
381 | ```
382 |
383 | 100%|██████████| 3/3 [00:00<00:00, 205.58it/s]
384 |
385 | ``` python
386 | pd.concat([features, targets], axis=1)
387 | ```
388 |
389 |
390 |
401 |
402 | | | sales_previous_3_month | sales_previous_2_month | sales_previous_1_month | date | city | target_sales_next_month |
403 | |-----|------------------------|------------------------|------------------------|------------|------|-------------------------|
404 | | 0 | 4944.0 | 3435.0 | 4543.0 | 2020-04-01 | FOR | 3879.0 |
405 | | 1 | 3435.0 | 4543.0 | 3879.0 | 2020-05-01 | FOR | 2601.0 |
406 | | 2 | 4543.0 | 3879.0 | 2601.0 | 2020-06-01 | FOR | 2922.0 |
407 | | 3 | 3879.0 | 2601.0 | 2922.0 | 2020-07-01 | FOR | 4542.0 |
408 | | 4 | 2601.0 | 2922.0 | 4542.0 | 2020-08-01 | FOR | 1338.0 |
409 | | ... | ... | ... | ... | ... | ... | ... |
410 | | 91 | 4197.0 | 4141.0 | 2899.0 | 2022-07-01 | RJ | 0.0 |
411 | | 92 | 4141.0 | 2899.0 | 0.0 | 2022-08-01 | RJ | 0.0 |
412 | | 93 | 2899.0 | 0.0 | 0.0 | 2022-09-01 | RJ | 1856.0 |
413 | | 94 | 0.0 | 0.0 | 1856.0 | 2022-10-01 | RJ | 4804.0 |
414 | | 95 | 0.0 | 1856.0 | 4804.0 | 2022-11-01 | RJ | 1764.0 |
415 |
416 |
96 rows × 6 columns
417 |
418 |
419 | # Embedding on Sklearn Pipelines
420 |
421 | ``` python
422 | from sklearn.pipeline import make_pipeline
423 | from sklearn.preprocessing import FunctionTransformer
424 | ```
425 |
426 | ``` python
427 | add_missing_slots_transformer = FunctionTransformer(
428 | add_missing_slots,
429 | kw_args={
430 | 'datetime_col': 'date',
431 | 'entity_col': 'city',
432 | 'value_col': 'sales',
433 | 'freq': 'MS'
434 | }
435 | )
436 |
437 | transform_ts_data_into_features_and_target_transformer = FunctionTransformer(
438 | transform_ts_data_into_features_and_target,
439 | kw_args={
440 | 'n_features': 3,
441 | 'datetime_col': 'date',
442 | 'entity_col': 'city',
443 | 'value_col': 'sales',
444 | 'n_targets': 1,
445 | 'step_size': 1,
446 | 'step_name': 'month',
447 | 'concat_Xy': True
448 | }
449 | )
450 | ```
451 |
452 | ``` python
453 | ts_data_to_features_and_target_pipeline = make_pipeline(
454 | add_missing_slots_transformer,
455 | transform_ts_data_into_features_and_target_transformer
456 | )
457 | ts_data_to_features_and_target_pipeline
458 | ```
459 |
460 | Pipeline(steps=[('functiontransformer-1',
461 | FunctionTransformer(func=<function add_missing_slots at 0x11f8f49d0>,
462 | kw_args={'datetime_col': 'date',
463 | 'entity_col': 'city',
464 | 'freq': 'MS',
465 | 'value_col': 'sales'})),
466 | ('functiontransformer-2',
467 | FunctionTransformer(func=<function transform_ts_data_into_features_and_target at 0x11f925ca0>,
468 | kw_args={'concat_Xy': True,
469 | 'datetime_col': 'date',
470 | 'entity_col': 'city',
471 | 'n_features': 3, 'n_targets': 1,
472 | 'step_name': 'month',
473 | 'step_size': 1,
474 | 'value_col': 'sales'}))]) In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook. On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org. Pipeline Pipeline(steps=[('functiontransformer-1',
475 | FunctionTransformer(func=<function add_missing_slots at 0x11f8f49d0>,
476 | kw_args={'datetime_col': 'date',
477 | 'entity_col': 'city',
478 | 'freq': 'MS',
479 | 'value_col': 'sales'})),
480 | ('functiontransformer-2',
481 | FunctionTransformer(func=<function transform_ts_data_into_features_and_target at 0x11f925ca0>,
482 | kw_args={'concat_Xy': True,
483 | 'datetime_col': 'date',
484 | 'entity_col': 'city',
485 | 'n_features': 3, 'n_targets': 1,
486 | 'step_name': 'month',
487 | 'step_size': 1,
488 | 'value_col': 'sales'}))])
495 |
496 | ``` python
497 | Xy_df = ts_data_to_features_and_target_pipeline.fit_transform(df)
498 | Xy_df
499 | ```
500 |
501 | 100%|██████████| 3/3 [00:00<00:00, 715.47it/s]
502 | 100%|██████████| 3/3 [00:00<00:00, 184.12it/s]
503 |
504 |
505 |
516 |
517 | | | sales_previous_3_month | sales_previous_2_month | sales_previous_1_month | date | city | target_sales_next_month |
518 | |-----|------------------------|------------------------|------------------------|------------|------|-------------------------|
519 | | 0 | 4944.0 | 3435.0 | 4543.0 | 2020-04-01 | FOR | 3879.0 |
520 | | 1 | 3435.0 | 4543.0 | 3879.0 | 2020-05-01 | FOR | 2601.0 |
521 | | 2 | 4543.0 | 3879.0 | 2601.0 | 2020-06-01 | FOR | 2922.0 |
522 | | 3 | 3879.0 | 2601.0 | 2922.0 | 2020-07-01 | FOR | 4542.0 |
523 | | 4 | 2601.0 | 2922.0 | 4542.0 | 2020-08-01 | FOR | 1338.0 |
524 | | ... | ... | ... | ... | ... | ... | ... |
525 | | 91 | 4197.0 | 4141.0 | 2899.0 | 2022-07-01 | RJ | 0.0 |
526 | | 92 | 4141.0 | 2899.0 | 0.0 | 2022-08-01 | RJ | 0.0 |
527 | | 93 | 2899.0 | 0.0 | 0.0 | 2022-09-01 | RJ | 1856.0 |
528 | | 94 | 0.0 | 0.0 | 1856.0 | 2022-10-01 | RJ | 4804.0 |
529 | | 95 | 0.0 | 1856.0 | 4804.0 | 2022-11-01 | RJ | 1764.0 |
530 |
531 |
96 rows × 6 columns
532 |
533 |
--------------------------------------------------------------------------------
/nbs/00_core.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "attachments": {},
5 | "cell_type": "markdown",
6 | "metadata": {},
7 | "source": [
8 | "# core\n",
9 | "\n",
10 | "> Core functions"
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": null,
16 | "metadata": {},
17 | "outputs": [],
18 | "source": [
19 | "#| default_exp core"
20 | ]
21 | },
22 | {
23 | "cell_type": "code",
24 | "execution_count": null,
25 | "metadata": {},
26 | "outputs": [],
27 | "source": [
28 | "#| hide\n",
29 | "from nbdev.showdoc import *"
30 | ]
31 | },
32 | {
33 | "cell_type": "code",
34 | "execution_count": null,
35 | "metadata": {},
36 | "outputs": [],
37 | "source": [
38 | "#| export\n",
39 | "import numpy as np\n",
40 | "import pandas as pd\n",
41 | "from tqdm import tqdm\n",
42 | "from typing import List"
43 | ]
44 | },
45 | {
46 | "cell_type": "code",
47 | "execution_count": null,
48 | "metadata": {},
49 | "outputs": [],
50 | "source": [
51 | "#| export\n",
52 | "def add_missing_slots(\n",
53 | " df: pd.DataFrame, # input dataframe with datetime, entity and value columns - time series format\n",
54 | " datetime_col: str, # name of the datetime column\n",
55 | " entity_col: str, # name of the entity column. If a time series is associated to a location, this column will be 'location_id'\n",
56 | " value_col: str, # name of the value column\n",
57 | " freq: str='H', # frequency of the time series. Default is hourly\n",
58 | " fill_value: int = 0 # value to use to fill missing slots\n",
59 | ") -> pd.DataFrame:\n",
60 | " \"\"\"\n",
61 | " Add missing slots to a time series dataframe.\n",
62 | " This function is useful to fill missing slots in a time series dataframe.\n",
63 | " For example, if a time series is associated to a location, this function will add missing slots for each location.\n",
64 | " Missing slots are filled with the value specified in the 'fill_value' parameter.\n",
65 | " By default, the frequency of the time series is hourly.\n",
66 | " \"\"\"\n",
67 | "\n",
68 | " entity_ids = df[entity_col].unique()\n",
69 | " all_hours = pd.date_range(start=df[datetime_col].min(), end=df[datetime_col].max(), freq=freq)\n",
70 | "\n",
71 | " output = pd.DataFrame()\n",
72 | "\n",
73 | " for entity_id in tqdm(entity_ids):\n",
74 | "\n",
75 | " # keep only rides for this 'location_id'\n",
76 | " df_entity_id = df.loc[df[entity_col] == entity_id, [datetime_col, value_col]]\n",
77 | "\n",
78 | " # quick way to add missing dates with 0 in a Series\n",
79 | " # taken from https://stackoverflow.com/a/19324591\n",
80 | " df_entity_id.set_index(datetime_col, inplace=True)\n",
81 | " df_entity_id.index = pd.DatetimeIndex(df_entity_id.index)\n",
82 | " df_entity_id = df_entity_id.reindex(all_hours, fill_value=0)\n",
83 | "\n",
84 | " # add back 'location_id' column\n",
85 | " df_entity_id[entity_col] = entity_id\n",
86 | "\n",
87 | " output = pd.concat([output, df_entity_id])\n",
88 | "\n",
89 | " # move the purchase_day from index to column\n",
90 | " output = output.reset_index().rename(columns={'index': datetime_col})\n",
91 | " output = output[[datetime_col, entity_col, value_col]].copy()\n",
92 | "\n",
93 | " return output"
94 | ]
95 | },
96 | {
97 | "cell_type": "code",
98 | "execution_count": null,
99 | "metadata": {},
100 | "outputs": [
101 | {
102 | "data": {
103 | "text/html": [
104 | "\n",
105 | "\n",
118 | "
\n",
119 | " \n",
120 | " \n",
121 | " \n",
122 | " pickup_hour \n",
123 | " pickup_location_id \n",
124 | " rides \n",
125 | " \n",
126 | " \n",
127 | " \n",
128 | " \n",
129 | " 0 \n",
130 | " 2022-01-01 00:00:00 \n",
131 | " 1 \n",
132 | " 2 \n",
133 | " \n",
134 | " \n",
135 | " 1 \n",
136 | " 2022-01-01 01:00:00 \n",
137 | " 1 \n",
138 | " 3 \n",
139 | " \n",
140 | " \n",
141 | " 2 \n",
142 | " 2022-01-01 03:00:00 \n",
143 | " 1 \n",
144 | " 1 \n",
145 | " \n",
146 | " \n",
147 | " 3 \n",
148 | " 2022-01-01 01:00:00 \n",
149 | " 2 \n",
150 | " 1 \n",
151 | " \n",
152 | " \n",
153 | " 4 \n",
154 | " 2022-01-01 02:00:00 \n",
155 | " 2 \n",
156 | " 2 \n",
157 | " \n",
158 | " \n",
159 | " 5 \n",
160 | " 2022-01-01 05:00:00 \n",
161 | " 2 \n",
162 | " 1 \n",
163 | " \n",
164 | " \n",
165 | "
\n",
166 | "
"
167 | ],
168 | "text/plain": [
169 | " pickup_hour pickup_location_id rides\n",
170 | "0 2022-01-01 00:00:00 1 2\n",
171 | "1 2022-01-01 01:00:00 1 3\n",
172 | "2 2022-01-01 03:00:00 1 1\n",
173 | "3 2022-01-01 01:00:00 2 1\n",
174 | "4 2022-01-01 02:00:00 2 2\n",
175 | "5 2022-01-01 05:00:00 2 1"
176 | ]
177 | },
178 | "execution_count": null,
179 | "metadata": {},
180 | "output_type": "execute_result"
181 | }
182 | ],
183 | "source": [
184 | "df = pd.DataFrame({\n",
185 | " 'pickup_hour': ['2022-01-01 00:00:00', '2022-01-01 01:00:00', '2022-01-01 03:00:00', '2022-01-01 01:00:00', '2022-01-01 02:00:00', '2022-01-01 05:00:00'],\n",
186 | " 'pickup_location_id': [1, 1, 1, 2, 2, 2],\n",
187 | " 'rides': [2, 3, 1, 1, 2, 1]\n",
188 | "})\n",
189 | "df"
190 | ]
191 | },
192 | {
193 | "cell_type": "code",
194 | "execution_count": null,
195 | "metadata": {},
196 | "outputs": [
197 | {
198 | "name": "stderr",
199 | "output_type": "stream",
200 | "text": [
201 | "100%|██████████| 2/2 [00:00<00:00, 448.83it/s]\n"
202 | ]
203 | },
204 | {
205 | "data": {
206 | "text/html": [
207 | "\n",
208 | "\n",
221 | "
\n",
222 | " \n",
223 | " \n",
224 | " \n",
225 | " pickup_hour \n",
226 | " pickup_location_id \n",
227 | " rides \n",
228 | " \n",
229 | " \n",
230 | " \n",
231 | " \n",
232 | " 0 \n",
233 | " 2022-01-01 00:00:00 \n",
234 | " 1 \n",
235 | " 2 \n",
236 | " \n",
237 | " \n",
238 | " 1 \n",
239 | " 2022-01-01 01:00:00 \n",
240 | " 1 \n",
241 | " 3 \n",
242 | " \n",
243 | " \n",
244 | " 2 \n",
245 | " 2022-01-01 02:00:00 \n",
246 | " 1 \n",
247 | " 0 \n",
248 | " \n",
249 | " \n",
250 | " 3 \n",
251 | " 2022-01-01 03:00:00 \n",
252 | " 1 \n",
253 | " 1 \n",
254 | " \n",
255 | " \n",
256 | " 4 \n",
257 | " 2022-01-01 04:00:00 \n",
258 | " 1 \n",
259 | " 0 \n",
260 | " \n",
261 | " \n",
262 | " 5 \n",
263 | " 2022-01-01 05:00:00 \n",
264 | " 1 \n",
265 | " 0 \n",
266 | " \n",
267 | " \n",
268 | " 6 \n",
269 | " 2022-01-01 00:00:00 \n",
270 | " 2 \n",
271 | " 0 \n",
272 | " \n",
273 | " \n",
274 | " 7 \n",
275 | " 2022-01-01 01:00:00 \n",
276 | " 2 \n",
277 | " 1 \n",
278 | " \n",
279 | " \n",
280 | " 8 \n",
281 | " 2022-01-01 02:00:00 \n",
282 | " 2 \n",
283 | " 2 \n",
284 | " \n",
285 | " \n",
286 | " 9 \n",
287 | " 2022-01-01 03:00:00 \n",
288 | " 2 \n",
289 | " 0 \n",
290 | " \n",
291 | " \n",
292 | " 10 \n",
293 | " 2022-01-01 04:00:00 \n",
294 | " 2 \n",
295 | " 0 \n",
296 | " \n",
297 | " \n",
298 | " 11 \n",
299 | " 2022-01-01 05:00:00 \n",
300 | " 2 \n",
301 | " 1 \n",
302 | " \n",
303 | " \n",
304 | "
\n",
305 | "
"
306 | ],
307 | "text/plain": [
308 | " pickup_hour pickup_location_id rides\n",
309 | "0 2022-01-01 00:00:00 1 2\n",
310 | "1 2022-01-01 01:00:00 1 3\n",
311 | "2 2022-01-01 02:00:00 1 0\n",
312 | "3 2022-01-01 03:00:00 1 1\n",
313 | "4 2022-01-01 04:00:00 1 0\n",
314 | "5 2022-01-01 05:00:00 1 0\n",
315 | "6 2022-01-01 00:00:00 2 0\n",
316 | "7 2022-01-01 01:00:00 2 1\n",
317 | "8 2022-01-01 02:00:00 2 2\n",
318 | "9 2022-01-01 03:00:00 2 0\n",
319 | "10 2022-01-01 04:00:00 2 0\n",
320 | "11 2022-01-01 05:00:00 2 1"
321 | ]
322 | },
323 | "execution_count": null,
324 | "metadata": {},
325 | "output_type": "execute_result"
326 | }
327 | ],
328 | "source": [
329 | "add_missing_slots(df, datetime_col='pickup_hour', entity_col='pickup_location_id', value_col='rides', freq='H')"
330 | ]
331 | },
332 | {
333 | "cell_type": "code",
334 | "execution_count": null,
335 | "metadata": {},
336 | "outputs": [],
337 | "source": [
338 | "#| export\n",
339 | "def get_cutoff_indices_features_and_target(\n",
340 | " ts_data: pd.DataFrame, # Time Series DataFrame\n",
341 | " datetime_col: str, # Name of the datetime column\n",
342 | " n_features: int, # Number of features to use for the prediction\n",
343 | " n_targets: int=1, # Number of target values to predict\n",
344 | " step_size: int=1, # Step size to use to slide the Time Series DataFrame\n",
345 | ") -> List[tuple]:\n",
346 | " \"\"\"Function to get the indices for the cutoffs of a Time Series DataFrame.\n",
347 | " The Time Series DataFrame should be orderded by time.\"\"\"\n",
348 | "\n",
349 | " # check if it is ordered\n",
350 | " if not ts_data[datetime_col].is_monotonic_increasing:\n",
351 | " raise ValueError(\"The DataFrame should be ordered by time.\")\n",
352 | " \n",
353 | " # check if the index is ordered\n",
354 | " if not ts_data.index.is_monotonic_increasing:\n",
355 | " raise ValueError(\"The index should be ordered by time.\")\n",
356 | " \n",
357 | " # check if the step_size is valid\n",
358 | " if step_size < 1:\n",
359 | " raise ValueError(\"The step_size should be >= 1.\")\n",
360 | " \n",
361 | " # check if the number of features is valid\n",
362 | " if n_features < 1:\n",
363 | " raise ValueError(\"The number of features should be >= 1.\")\n",
364 | " \n",
365 | " # check if the number of target values is valid\n",
366 | " if n_targets < 1:\n",
367 | " raise ValueError(\"The number of target values should be >= 1.\")\n",
368 | " \n",
369 | " # check if the number of features and target values is valid\n",
370 | " if n_features + n_targets > len(ts_data):\n",
371 | " raise ValueError(\"The number of features + target values should be <= the length of the time series.\")\n",
372 | " \n",
373 | " # below we compute the indices for the cutoffs\n",
374 | " stop_position = len(ts_data) - 1\n",
375 | "\n",
376 | " # Start the first sub-sequence at index position 0\n",
377 | " subseq_first_idx = 0\n",
378 | " subseq_mid_idx = n_features\n",
379 | " subseq_last_idx = n_features + n_targets\n",
380 | " indices = []\n",
381 | "\n",
382 | " while subseq_last_idx <= stop_position:\n",
383 | " indices.append((subseq_first_idx, subseq_mid_idx, subseq_last_idx))\n",
384 | "\n",
385 | " subseq_first_idx += step_size\n",
386 | " subseq_mid_idx += step_size\n",
387 | " subseq_last_idx += step_size\n",
388 | "\n",
389 | " return indices"
390 | ]
391 | },
392 | {
393 | "cell_type": "code",
394 | "execution_count": null,
395 | "metadata": {},
396 | "outputs": [
397 | {
398 | "data": {
399 | "text/html": [
400 | "\n",
401 | "\n",
414 | "
\n",
415 | " \n",
416 | " \n",
417 | " \n",
418 | " pickup_hour \n",
419 | " rides \n",
420 | " \n",
421 | " \n",
422 | " \n",
423 | " \n",
424 | " 0 \n",
425 | " 2022-01-01 01:00:00 \n",
426 | " 2 \n",
427 | " \n",
428 | " \n",
429 | " 1 \n",
430 | " 2022-01-01 00:00:00 \n",
431 | " 3 \n",
432 | " \n",
433 | " \n",
434 | " 2 \n",
435 | " 2022-01-01 03:00:00 \n",
436 | " 1 \n",
437 | " \n",
438 | " \n",
439 | " 3 \n",
440 | " 2022-01-01 04:00:00 \n",
441 | " 1 \n",
442 | " \n",
443 | " \n",
444 | " 4 \n",
445 | " 2022-01-01 02:00:00 \n",
446 | " 2 \n",
447 | " \n",
448 | " \n",
449 | " 5 \n",
450 | " 2022-01-01 05:00:00 \n",
451 | " 1 \n",
452 | " \n",
453 | " \n",
454 | " 6 \n",
455 | " 2022-01-01 09:00:00 \n",
456 | " 1 \n",
457 | " \n",
458 | " \n",
459 | " 7 \n",
460 | " 2022-01-01 06:00:00 \n",
461 | " 2 \n",
462 | " \n",
463 | " \n",
464 | " 8 \n",
465 | " 2022-01-01 07:00:00 \n",
466 | " 1 \n",
467 | " \n",
468 | " \n",
469 | " 9 \n",
470 | " 2022-01-01 08:00:00 \n",
471 | " 1 \n",
472 | " \n",
473 | " \n",
474 | "
\n",
475 | "
"
476 | ],
477 | "text/plain": [
478 | " pickup_hour rides\n",
479 | "0 2022-01-01 01:00:00 2\n",
480 | "1 2022-01-01 00:00:00 3\n",
481 | "2 2022-01-01 03:00:00 1\n",
482 | "3 2022-01-01 04:00:00 1\n",
483 | "4 2022-01-01 02:00:00 2\n",
484 | "5 2022-01-01 05:00:00 1\n",
485 | "6 2022-01-01 09:00:00 1\n",
486 | "7 2022-01-01 06:00:00 2\n",
487 | "8 2022-01-01 07:00:00 1\n",
488 | "9 2022-01-01 08:00:00 1"
489 | ]
490 | },
491 | "execution_count": null,
492 | "metadata": {},
493 | "output_type": "execute_result"
494 | }
495 | ],
496 | "source": [
497 | "# build a time series dataframe with 10 hours of data in random order\n",
498 | "ts_data = pd.DataFrame({\n",
499 | " 'pickup_hour': ['2022-01-01 01:00:00', '2022-01-01 00:00:00', '2022-01-01 03:00:00', '2022-01-01 04:00:00', '2022-01-01 02:00:00', '2022-01-01 05:00:00', '2022-01-01 09:00:00', '2022-01-01 06:00:00', '2022-01-01 07:00:00', '2022-01-01 08:00:00'],\n",
500 | " 'rides': [2, 3, 1, 1, 2, 1, 1, 2, 1, 1]\n",
501 | "})\n",
502 | "ts_data"
503 | ]
504 | },
505 | {
506 | "cell_type": "code",
507 | "execution_count": null,
508 | "metadata": {},
509 | "outputs": [
510 | {
511 | "data": {
512 | "text/plain": [
513 | "[(0, 3, 5), (1, 4, 6), (2, 5, 7), (3, 6, 8), (4, 7, 9)]"
514 | ]
515 | },
516 | "execution_count": null,
517 | "metadata": {},
518 | "output_type": "execute_result"
519 | }
520 | ],
521 | "source": [
522 | "# the time series should be ordered by time, otherwise it will not work and throw a ValueError\n",
523 | "ts_data.sort_values(by='pickup_hour', inplace=True, ignore_index=True)\n",
524 | "cutoff_idxs = get_cutoff_indices_features_and_target(ts_data, datetime_col='pickup_hour', n_features=3, n_targets=2, step_size=1)\n",
525 | "cutoff_idxs"
526 | ]
527 | },
528 | {
529 | "cell_type": "code",
530 | "execution_count": null,
531 | "metadata": {},
532 | "outputs": [],
533 | "source": [
534 | "assert cutoff_idxs == [(0, 3, 5), (1, 4, 6), (2, 5, 7), (3, 6, 8), (4, 7, 9)]"
535 | ]
536 | },
537 | {
538 | "cell_type": "code",
539 | "execution_count": null,
540 | "metadata": {},
541 | "outputs": [],
542 | "source": [
543 | "#| export\n",
544 | "def transform_ts_data_into_features_and_target(\n",
545 | " ts_data: pd.DataFrame, # Time Series DataFrame\n",
546 | " n_features: int, # Number of features to use for the prediction\n",
547 | " datetime_col: str, # Name of the datetime column\n",
548 | " entity_col: str, # Name of the entity column, e.g. location_id\n",
549 | " value_col: str, # Name of the value column\n",
550 | " n_targets: int=1, # Number of target values to predict\n",
551 | " step_size: int=1, # Step size to use to slide the Time Series DataFrame\n",
552 | " step_name: str=None, # Name of the step column\n",
553 | " concat_Xy: bool=False # Whether to concat X and y on the same dataframe or not\n",
554 | ") -> pd.DataFrame:\n",
555 | " \"\"\"\n",
556 | " Slices and transposes data from time-series format into a (features, target)\n",
557 | " format that we can use to train Supervised ML models.\n",
558 | " \"\"\"\n",
559 | "\n",
560 | " entity_ids = ts_data[entity_col].unique()\n",
561 | " features = pd.DataFrame()\n",
562 | " targets = pd.DataFrame()\n",
563 | " \n",
564 | " for entity_id in tqdm(entity_ids):\n",
565 | " \n",
566 | " # keep only ts data for this `location_id`\n",
567 | " ts_data_one_location = ts_data.loc[\n",
568 | " ts_data[entity_col] == entity_id, \n",
569 | " [datetime_col, value_col]\n",
570 | " ]\n",
571 | "\n",
572 | " # pre-compute cutoff indices to split dataframe rows\n",
573 | " indices = get_cutoff_indices_features_and_target(\n",
574 | " ts_data=ts_data_one_location,\n",
575 | " datetime_col=datetime_col,\n",
576 | " n_features=n_features,\n",
577 | " n_targets=n_targets,\n",
578 | " step_size=step_size\n",
579 | " )\n",
580 | "\n",
581 | " # slice and transpose data into numpy arrays for features and targets\n",
582 | " time_values = []\n",
583 | " n_examples = len(indices)\n",
584 | " x = np.ndarray(shape=(n_examples, n_features), dtype=np.float32)\n",
585 | " if n_targets == 1:\n",
586 | " y = np.ndarray(shape=(n_examples), dtype=np.float32)\n",
587 | " for i, idx in enumerate(indices):\n",
588 | " x[i, :] = ts_data_one_location.iloc[idx[0]:idx[1]][value_col].values\n",
589 | " y[i] = ts_data_one_location.iloc[idx[1]:idx[2]][value_col].values\n",
590 | " time_values.append(ts_data_one_location.iloc[idx[1]][datetime_col])\n",
591 | " else:\n",
592 | " y = np.ndarray(shape=(n_examples, n_targets), dtype=np.float32)\n",
593 | " for i, idx in enumerate(indices):\n",
594 | " x[i, :] = ts_data_one_location.iloc[idx[0]:idx[1]][value_col].values\n",
595 | " y[i, :] = ts_data_one_location.iloc[idx[1]:idx[2]][value_col].values\n",
596 | " time_values.append(ts_data_one_location.iloc[idx[1]][datetime_col])\n",
597 | "\n",
598 | " # numpy -> pandas\n",
599 | " if step_name is None:\n",
600 | " features_one_location = pd.DataFrame(\n",
601 | " x,\n",
602 | " columns=[f'{value_col}_previous_{i+1}' for i in reversed(range(n_features))]\n",
603 | " )\n",
604 | " features_one_location[datetime_col] = time_values\n",
605 | " features_one_location[entity_col] = entity_id\n",
606 | "\n",
607 | " # numpy -> pandas\n",
608 | " if n_targets == 1:\n",
609 | " targets_one_location = pd.DataFrame(y, columns=[f'target_{value_col}_next'])\n",
610 | " else:\n",
611 | " targets_one_location = pd.DataFrame(\n",
612 | " y,\n",
613 | " columns=[f'target_{value_col}_next_{i+1}' for i in range(n_targets)]\n",
614 | " )\n",
615 | " else:\n",
616 | " features_one_location = pd.DataFrame(\n",
617 | " x,\n",
618 | " columns=[f'{value_col}_previous_{i+1}_{step_name}' for i in reversed(range(n_features))]\n",
619 | " )\n",
620 | " features_one_location[datetime_col] = time_values\n",
621 | " features_one_location[entity_col] = entity_id\n",
622 | "\n",
623 | " # numpy -> pandas\n",
624 | " if n_targets == 1:\n",
625 | " targets_one_location = pd.DataFrame(y, columns=[f'target_{value_col}_next_{step_name}'])\n",
626 | " else:\n",
627 | " targets_one_location = pd.DataFrame(\n",
628 | " y,\n",
629 | " columns=[f'target_{value_col}_next_{i+1}_{step_name}' for i in range(n_targets)]\n",
630 | " )\n",
631 | " \n",
632 | " # concatenate results\n",
633 | " features = pd.concat([features, features_one_location])\n",
634 | " targets = pd.concat([targets, targets_one_location])\n",
635 | "\n",
636 | " features.reset_index(inplace=True, drop=True)\n",
637 | " targets.reset_index(inplace=True, drop=True)\n",
638 | "\n",
639 | " if concat_Xy:\n",
640 | " return pd.concat([features, targets], axis=1)\n",
641 | " else:\n",
642 | " return features, targets"
643 | ]
644 | },
645 | {
646 | "cell_type": "code",
647 | "execution_count": null,
648 | "metadata": {},
649 | "outputs": [
650 | {
651 | "data": {
652 | "text/html": [
653 | "\n",
654 | "\n",
667 | "
\n",
668 | " \n",
669 | " \n",
670 | " \n",
671 | " pickup_hour \n",
672 | " location_id \n",
673 | " rides \n",
674 | " \n",
675 | " \n",
676 | " \n",
677 | " \n",
678 | " 0 \n",
679 | " 2022-01-01 01:00:00 \n",
680 | " 1 \n",
681 | " 2 \n",
682 | " \n",
683 | " \n",
684 | " 1 \n",
685 | " 2022-01-01 00:00:00 \n",
686 | " 1 \n",
687 | " 3 \n",
688 | " \n",
689 | " \n",
690 | " 2 \n",
691 | " 2022-01-01 03:00:00 \n",
692 | " 1 \n",
693 | " 1 \n",
694 | " \n",
695 | " \n",
696 | " 3 \n",
697 | " 2022-01-01 04:00:00 \n",
698 | " 1 \n",
699 | " 1 \n",
700 | " \n",
701 | " \n",
702 | " 4 \n",
703 | " 2022-01-01 02:00:00 \n",
704 | " 1 \n",
705 | " 2 \n",
706 | " \n",
707 | " \n",
708 | " 5 \n",
709 | " 2022-01-01 05:00:00 \n",
710 | " 1 \n",
711 | " 1 \n",
712 | " \n",
713 | " \n",
714 | " 6 \n",
715 | " 2022-01-01 09:00:00 \n",
716 | " 2 \n",
717 | " 1 \n",
718 | " \n",
719 | " \n",
720 | " 7 \n",
721 | " 2022-01-01 06:00:00 \n",
722 | " 2 \n",
723 | " 2 \n",
724 | " \n",
725 | " \n",
726 | " 8 \n",
727 | " 2022-01-01 07:00:00 \n",
728 | " 2 \n",
729 | " 1 \n",
730 | " \n",
731 | " \n",
732 | " 9 \n",
733 | " 2022-01-01 08:00:00 \n",
734 | " 2 \n",
735 | " 1 \n",
736 | " \n",
737 | " \n",
738 | "
\n",
739 | "
"
740 | ],
741 | "text/plain": [
742 | " pickup_hour location_id rides\n",
743 | "0 2022-01-01 01:00:00 1 2\n",
744 | "1 2022-01-01 00:00:00 1 3\n",
745 | "2 2022-01-01 03:00:00 1 1\n",
746 | "3 2022-01-01 04:00:00 1 1\n",
747 | "4 2022-01-01 02:00:00 1 2\n",
748 | "5 2022-01-01 05:00:00 1 1\n",
749 | "6 2022-01-01 09:00:00 2 1\n",
750 | "7 2022-01-01 06:00:00 2 2\n",
751 | "8 2022-01-01 07:00:00 2 1\n",
752 | "9 2022-01-01 08:00:00 2 1"
753 | ]
754 | },
755 | "execution_count": null,
756 | "metadata": {},
757 | "output_type": "execute_result"
758 | }
759 | ],
760 | "source": [
761 | "# build a time series dataframe with 10 hours of data in random order and a location id column with 1 and 2\n",
762 | "ts_data = pd.DataFrame({\n",
763 | " 'pickup_hour': ['2022-01-01 01:00:00', '2022-01-01 00:00:00', '2022-01-01 03:00:00', '2022-01-01 04:00:00', '2022-01-01 02:00:00', '2022-01-01 05:00:00', '2022-01-01 09:00:00', '2022-01-01 06:00:00', '2022-01-01 07:00:00', '2022-01-01 08:00:00'],\n",
764 | " 'location_id': [1, 1, 1, 1, 1, 1, 2, 2, 2, 2],\n",
765 | " 'rides': [2, 3, 1, 1, 2, 1, 1, 2, 1, 1]\n",
766 | "})\n",
767 | "ts_data"
768 | ]
769 | },
770 | {
771 | "cell_type": "code",
772 | "execution_count": null,
773 | "metadata": {},
774 | "outputs": [
775 | {
776 | "name": "stderr",
777 | "output_type": "stream",
778 | "text": [
779 | "100%|██████████| 2/2 [00:00<00:00, 708.92it/s]\n"
780 | ]
781 | },
782 | {
783 | "data": {
784 | "text/html": [
785 | "\n",
786 | "\n",
799 | "
\n",
800 | " \n",
801 | " \n",
802 | " \n",
803 | " pickup_hour \n",
804 | " location_id \n",
805 | " rides \n",
806 | " \n",
807 | " \n",
808 | " \n",
809 | " \n",
810 | " 0 \n",
811 | " 2022-01-01 00:00:00 \n",
812 | " 1 \n",
813 | " 3 \n",
814 | " \n",
815 | " \n",
816 | " 1 \n",
817 | " 2022-01-01 01:00:00 \n",
818 | " 1 \n",
819 | " 2 \n",
820 | " \n",
821 | " \n",
822 | " 2 \n",
823 | " 2022-01-01 02:00:00 \n",
824 | " 1 \n",
825 | " 2 \n",
826 | " \n",
827 | " \n",
828 | " 3 \n",
829 | " 2022-01-01 03:00:00 \n",
830 | " 1 \n",
831 | " 1 \n",
832 | " \n",
833 | " \n",
834 | " 4 \n",
835 | " 2022-01-01 04:00:00 \n",
836 | " 1 \n",
837 | " 1 \n",
838 | " \n",
839 | " \n",
840 | " 5 \n",
841 | " 2022-01-01 05:00:00 \n",
842 | " 1 \n",
843 | " 1 \n",
844 | " \n",
845 | " \n",
846 | " 6 \n",
847 | " 2022-01-01 06:00:00 \n",
848 | " 1 \n",
849 | " 0 \n",
850 | " \n",
851 | " \n",
852 | " 7 \n",
853 | " 2022-01-01 07:00:00 \n",
854 | " 1 \n",
855 | " 0 \n",
856 | " \n",
857 | " \n",
858 | " 8 \n",
859 | " 2022-01-01 08:00:00 \n",
860 | " 1 \n",
861 | " 0 \n",
862 | " \n",
863 | " \n",
864 | " 9 \n",
865 | " 2022-01-01 09:00:00 \n",
866 | " 1 \n",
867 | " 0 \n",
868 | " \n",
869 | " \n",
870 | " 10 \n",
871 | " 2022-01-01 00:00:00 \n",
872 | " 2 \n",
873 | " 0 \n",
874 | " \n",
875 | " \n",
876 | " 11 \n",
877 | " 2022-01-01 01:00:00 \n",
878 | " 2 \n",
879 | " 0 \n",
880 | " \n",
881 | " \n",
882 | " 12 \n",
883 | " 2022-01-01 02:00:00 \n",
884 | " 2 \n",
885 | " 0 \n",
886 | " \n",
887 | " \n",
888 | " 13 \n",
889 | " 2022-01-01 03:00:00 \n",
890 | " 2 \n",
891 | " 0 \n",
892 | " \n",
893 | " \n",
894 | " 14 \n",
895 | " 2022-01-01 04:00:00 \n",
896 | " 2 \n",
897 | " 0 \n",
898 | " \n",
899 | " \n",
900 | " 15 \n",
901 | " 2022-01-01 05:00:00 \n",
902 | " 2 \n",
903 | " 0 \n",
904 | " \n",
905 | " \n",
906 | " 16 \n",
907 | " 2022-01-01 06:00:00 \n",
908 | " 2 \n",
909 | " 2 \n",
910 | " \n",
911 | " \n",
912 | " 17 \n",
913 | " 2022-01-01 07:00:00 \n",
914 | " 2 \n",
915 | " 1 \n",
916 | " \n",
917 | " \n",
918 | " 18 \n",
919 | " 2022-01-01 08:00:00 \n",
920 | " 2 \n",
921 | " 1 \n",
922 | " \n",
923 | " \n",
924 | " 19 \n",
925 | " 2022-01-01 09:00:00 \n",
926 | " 2 \n",
927 | " 1 \n",
928 | " \n",
929 | " \n",
930 | "
\n",
931 | "
"
932 | ],
933 | "text/plain": [
934 | " pickup_hour location_id rides\n",
935 | "0 2022-01-01 00:00:00 1 3\n",
936 | "1 2022-01-01 01:00:00 1 2\n",
937 | "2 2022-01-01 02:00:00 1 2\n",
938 | "3 2022-01-01 03:00:00 1 1\n",
939 | "4 2022-01-01 04:00:00 1 1\n",
940 | "5 2022-01-01 05:00:00 1 1\n",
941 | "6 2022-01-01 06:00:00 1 0\n",
942 | "7 2022-01-01 07:00:00 1 0\n",
943 | "8 2022-01-01 08:00:00 1 0\n",
944 | "9 2022-01-01 09:00:00 1 0\n",
945 | "10 2022-01-01 00:00:00 2 0\n",
946 | "11 2022-01-01 01:00:00 2 0\n",
947 | "12 2022-01-01 02:00:00 2 0\n",
948 | "13 2022-01-01 03:00:00 2 0\n",
949 | "14 2022-01-01 04:00:00 2 0\n",
950 | "15 2022-01-01 05:00:00 2 0\n",
951 | "16 2022-01-01 06:00:00 2 2\n",
952 | "17 2022-01-01 07:00:00 2 1\n",
953 | "18 2022-01-01 08:00:00 2 1\n",
954 | "19 2022-01-01 09:00:00 2 1"
955 | ]
956 | },
957 | "execution_count": null,
958 | "metadata": {},
959 | "output_type": "execute_result"
960 | }
961 | ],
962 | "source": [
963 | "ts_data = add_missing_slots(ts_data, datetime_col='pickup_hour', entity_col='location_id', value_col='rides', freq='1H')\n",
964 | "ts_data"
965 | ]
966 | },
967 | {
968 | "cell_type": "code",
969 | "execution_count": null,
970 | "metadata": {},
971 | "outputs": [
972 | {
973 | "name": "stderr",
974 | "output_type": "stream",
975 | "text": [
976 | "100%|██████████| 2/2 [00:00<00:00, 371.60it/s]\n"
977 | ]
978 | }
979 | ],
980 | "source": [
981 | "features, targets = transform_ts_data_into_features_and_target(\n",
982 | " ts_data=ts_data,\n",
983 | " n_features=3,\n",
984 | " datetime_col='pickup_hour',\n",
985 | " entity_col='location_id',\n",
986 | " value_col='rides',\n",
987 | " n_targets=2,\n",
988 | " step_size=1\n",
989 | ")"
990 | ]
991 | },
992 | {
993 | "cell_type": "code",
994 | "execution_count": null,
995 | "metadata": {},
996 | "outputs": [
997 | {
998 | "data": {
999 | "text/html": [
1000 | "\n",
1001 | "\n",
1014 | "
\n",
1015 | " \n",
1016 | " \n",
1017 | " \n",
1018 | " rides_previous_3 \n",
1019 | " rides_previous_2 \n",
1020 | " rides_previous_1 \n",
1021 | " pickup_hour \n",
1022 | " location_id \n",
1023 | " \n",
1024 | " \n",
1025 | " \n",
1026 | " \n",
1027 | " 0 \n",
1028 | " 3.0 \n",
1029 | " 2.0 \n",
1030 | " 2.0 \n",
1031 | " 2022-01-01 03:00:00 \n",
1032 | " 1 \n",
1033 | " \n",
1034 | " \n",
1035 | " 1 \n",
1036 | " 2.0 \n",
1037 | " 2.0 \n",
1038 | " 1.0 \n",
1039 | " 2022-01-01 04:00:00 \n",
1040 | " 1 \n",
1041 | " \n",
1042 | " \n",
1043 | " 2 \n",
1044 | " 2.0 \n",
1045 | " 1.0 \n",
1046 | " 1.0 \n",
1047 | " 2022-01-01 05:00:00 \n",
1048 | " 1 \n",
1049 | " \n",
1050 | " \n",
1051 | " 3 \n",
1052 | " 1.0 \n",
1053 | " 1.0 \n",
1054 | " 1.0 \n",
1055 | " 2022-01-01 06:00:00 \n",
1056 | " 1 \n",
1057 | " \n",
1058 | " \n",
1059 | " 4 \n",
1060 | " 1.0 \n",
1061 | " 1.0 \n",
1062 | " 0.0 \n",
1063 | " 2022-01-01 07:00:00 \n",
1064 | " 1 \n",
1065 | " \n",
1066 | " \n",
1067 | " 5 \n",
1068 | " 0.0 \n",
1069 | " 0.0 \n",
1070 | " 0.0 \n",
1071 | " 2022-01-01 03:00:00 \n",
1072 | " 2 \n",
1073 | " \n",
1074 | " \n",
1075 | " 6 \n",
1076 | " 0.0 \n",
1077 | " 0.0 \n",
1078 | " 0.0 \n",
1079 | " 2022-01-01 04:00:00 \n",
1080 | " 2 \n",
1081 | " \n",
1082 | " \n",
1083 | " 7 \n",
1084 | " 0.0 \n",
1085 | " 0.0 \n",
1086 | " 0.0 \n",
1087 | " 2022-01-01 05:00:00 \n",
1088 | " 2 \n",
1089 | " \n",
1090 | " \n",
1091 | " 8 \n",
1092 | " 0.0 \n",
1093 | " 0.0 \n",
1094 | " 0.0 \n",
1095 | " 2022-01-01 06:00:00 \n",
1096 | " 2 \n",
1097 | " \n",
1098 | " \n",
1099 | " 9 \n",
1100 | " 0.0 \n",
1101 | " 0.0 \n",
1102 | " 2.0 \n",
1103 | " 2022-01-01 07:00:00 \n",
1104 | " 2 \n",
1105 | " \n",
1106 | " \n",
1107 | "
\n",
1108 | "
"
1109 | ],
1110 | "text/plain": [
1111 | " rides_previous_3 rides_previous_2 rides_previous_1 pickup_hour \\\n",
1112 | "0 3.0 2.0 2.0 2022-01-01 03:00:00 \n",
1113 | "1 2.0 2.0 1.0 2022-01-01 04:00:00 \n",
1114 | "2 2.0 1.0 1.0 2022-01-01 05:00:00 \n",
1115 | "3 1.0 1.0 1.0 2022-01-01 06:00:00 \n",
1116 | "4 1.0 1.0 0.0 2022-01-01 07:00:00 \n",
1117 | "5 0.0 0.0 0.0 2022-01-01 03:00:00 \n",
1118 | "6 0.0 0.0 0.0 2022-01-01 04:00:00 \n",
1119 | "7 0.0 0.0 0.0 2022-01-01 05:00:00 \n",
1120 | "8 0.0 0.0 0.0 2022-01-01 06:00:00 \n",
1121 | "9 0.0 0.0 2.0 2022-01-01 07:00:00 \n",
1122 | "\n",
1123 | " location_id \n",
1124 | "0 1 \n",
1125 | "1 1 \n",
1126 | "2 1 \n",
1127 | "3 1 \n",
1128 | "4 1 \n",
1129 | "5 2 \n",
1130 | "6 2 \n",
1131 | "7 2 \n",
1132 | "8 2 \n",
1133 | "9 2 "
1134 | ]
1135 | },
1136 | "execution_count": null,
1137 | "metadata": {},
1138 | "output_type": "execute_result"
1139 | }
1140 | ],
1141 | "source": [
1142 | "features"
1143 | ]
1144 | },
1145 | {
1146 | "cell_type": "code",
1147 | "execution_count": null,
1148 | "metadata": {},
1149 | "outputs": [
1150 | {
1151 | "data": {
1152 | "text/html": [
1153 | "\n",
1154 | "\n",
1167 | "
\n",
1168 | " \n",
1169 | " \n",
1170 | " \n",
1171 | " target_rides_next_1 \n",
1172 | " target_rides_next_2 \n",
1173 | " \n",
1174 | " \n",
1175 | " \n",
1176 | " \n",
1177 | " 0 \n",
1178 | " 1.0 \n",
1179 | " 1.0 \n",
1180 | " \n",
1181 | " \n",
1182 | " 1 \n",
1183 | " 1.0 \n",
1184 | " 1.0 \n",
1185 | " \n",
1186 | " \n",
1187 | " 2 \n",
1188 | " 1.0 \n",
1189 | " 0.0 \n",
1190 | " \n",
1191 | " \n",
1192 | " 3 \n",
1193 | " 0.0 \n",
1194 | " 0.0 \n",
1195 | " \n",
1196 | " \n",
1197 | " 4 \n",
1198 | " 0.0 \n",
1199 | " 0.0 \n",
1200 | " \n",
1201 | " \n",
1202 | " 5 \n",
1203 | " 0.0 \n",
1204 | " 0.0 \n",
1205 | " \n",
1206 | " \n",
1207 | " 6 \n",
1208 | " 0.0 \n",
1209 | " 0.0 \n",
1210 | " \n",
1211 | " \n",
1212 | " 7 \n",
1213 | " 0.0 \n",
1214 | " 2.0 \n",
1215 | " \n",
1216 | " \n",
1217 | " 8 \n",
1218 | " 2.0 \n",
1219 | " 1.0 \n",
1220 | " \n",
1221 | " \n",
1222 | " 9 \n",
1223 | " 1.0 \n",
1224 | " 1.0 \n",
1225 | " \n",
1226 | " \n",
1227 | "
\n",
1228 | "
"
1229 | ],
1230 | "text/plain": [
1231 | " target_rides_next_1 target_rides_next_2\n",
1232 | "0 1.0 1.0\n",
1233 | "1 1.0 1.0\n",
1234 | "2 1.0 0.0\n",
1235 | "3 0.0 0.0\n",
1236 | "4 0.0 0.0\n",
1237 | "5 0.0 0.0\n",
1238 | "6 0.0 0.0\n",
1239 | "7 0.0 2.0\n",
1240 | "8 2.0 1.0\n",
1241 | "9 1.0 1.0"
1242 | ]
1243 | },
1244 | "execution_count": null,
1245 | "metadata": {},
1246 | "output_type": "execute_result"
1247 | }
1248 | ],
1249 | "source": [
1250 | "targets"
1251 | ]
1252 | },
1253 | {
1254 | "cell_type": "code",
1255 | "execution_count": null,
1256 | "metadata": {},
1257 | "outputs": [
1258 | {
1259 | "data": {
1260 | "text/html": [
1261 | "\n",
1262 | "\n",
1275 | "
\n",
1276 | " \n",
1277 | " \n",
1278 | " \n",
1279 | " rides_previous_3 \n",
1280 | " rides_previous_2 \n",
1281 | " rides_previous_1 \n",
1282 | " pickup_hour \n",
1283 | " location_id \n",
1284 | " target_rides_next_1 \n",
1285 | " target_rides_next_2 \n",
1286 | " \n",
1287 | " \n",
1288 | " \n",
1289 | " \n",
1290 | " 0 \n",
1291 | " 3.0 \n",
1292 | " 2.0 \n",
1293 | " 2.0 \n",
1294 | " 2022-01-01 03:00:00 \n",
1295 | " 1 \n",
1296 | " 1.0 \n",
1297 | " 1.0 \n",
1298 | " \n",
1299 | " \n",
1300 | " 1 \n",
1301 | " 2.0 \n",
1302 | " 2.0 \n",
1303 | " 1.0 \n",
1304 | " 2022-01-01 04:00:00 \n",
1305 | " 1 \n",
1306 | " 1.0 \n",
1307 | " 1.0 \n",
1308 | " \n",
1309 | " \n",
1310 | " 2 \n",
1311 | " 2.0 \n",
1312 | " 1.0 \n",
1313 | " 1.0 \n",
1314 | " 2022-01-01 05:00:00 \n",
1315 | " 1 \n",
1316 | " 1.0 \n",
1317 | " 0.0 \n",
1318 | " \n",
1319 | " \n",
1320 | " 3 \n",
1321 | " 1.0 \n",
1322 | " 1.0 \n",
1323 | " 1.0 \n",
1324 | " 2022-01-01 06:00:00 \n",
1325 | " 1 \n",
1326 | " 0.0 \n",
1327 | " 0.0 \n",
1328 | " \n",
1329 | " \n",
1330 | " 4 \n",
1331 | " 1.0 \n",
1332 | " 1.0 \n",
1333 | " 0.0 \n",
1334 | " 2022-01-01 07:00:00 \n",
1335 | " 1 \n",
1336 | " 0.0 \n",
1337 | " 0.0 \n",
1338 | " \n",
1339 | " \n",
1340 | " 5 \n",
1341 | " 0.0 \n",
1342 | " 0.0 \n",
1343 | " 0.0 \n",
1344 | " 2022-01-01 03:00:00 \n",
1345 | " 2 \n",
1346 | " 0.0 \n",
1347 | " 0.0 \n",
1348 | " \n",
1349 | " \n",
1350 | " 6 \n",
1351 | " 0.0 \n",
1352 | " 0.0 \n",
1353 | " 0.0 \n",
1354 | " 2022-01-01 04:00:00 \n",
1355 | " 2 \n",
1356 | " 0.0 \n",
1357 | " 0.0 \n",
1358 | " \n",
1359 | " \n",
1360 | " 7 \n",
1361 | " 0.0 \n",
1362 | " 0.0 \n",
1363 | " 0.0 \n",
1364 | " 2022-01-01 05:00:00 \n",
1365 | " 2 \n",
1366 | " 0.0 \n",
1367 | " 2.0 \n",
1368 | " \n",
1369 | " \n",
1370 | " 8 \n",
1371 | " 0.0 \n",
1372 | " 0.0 \n",
1373 | " 0.0 \n",
1374 | " 2022-01-01 06:00:00 \n",
1375 | " 2 \n",
1376 | " 2.0 \n",
1377 | " 1.0 \n",
1378 | " \n",
1379 | " \n",
1380 | " 9 \n",
1381 | " 0.0 \n",
1382 | " 0.0 \n",
1383 | " 2.0 \n",
1384 | " 2022-01-01 07:00:00 \n",
1385 | " 2 \n",
1386 | " 1.0 \n",
1387 | " 1.0 \n",
1388 | " \n",
1389 | " \n",
1390 | "
\n",
1391 | "
"
1392 | ],
1393 | "text/plain": [
1394 | " rides_previous_3 rides_previous_2 rides_previous_1 pickup_hour \\\n",
1395 | "0 3.0 2.0 2.0 2022-01-01 03:00:00 \n",
1396 | "1 2.0 2.0 1.0 2022-01-01 04:00:00 \n",
1397 | "2 2.0 1.0 1.0 2022-01-01 05:00:00 \n",
1398 | "3 1.0 1.0 1.0 2022-01-01 06:00:00 \n",
1399 | "4 1.0 1.0 0.0 2022-01-01 07:00:00 \n",
1400 | "5 0.0 0.0 0.0 2022-01-01 03:00:00 \n",
1401 | "6 0.0 0.0 0.0 2022-01-01 04:00:00 \n",
1402 | "7 0.0 0.0 0.0 2022-01-01 05:00:00 \n",
1403 | "8 0.0 0.0 0.0 2022-01-01 06:00:00 \n",
1404 | "9 0.0 0.0 2.0 2022-01-01 07:00:00 \n",
1405 | "\n",
1406 | " location_id target_rides_next_1 target_rides_next_2 \n",
1407 | "0 1 1.0 1.0 \n",
1408 | "1 1 1.0 1.0 \n",
1409 | "2 1 1.0 0.0 \n",
1410 | "3 1 0.0 0.0 \n",
1411 | "4 1 0.0 0.0 \n",
1412 | "5 2 0.0 0.0 \n",
1413 | "6 2 0.0 0.0 \n",
1414 | "7 2 0.0 2.0 \n",
1415 | "8 2 2.0 1.0 \n",
1416 | "9 2 1.0 1.0 "
1417 | ]
1418 | },
1419 | "execution_count": null,
1420 | "metadata": {},
1421 | "output_type": "execute_result"
1422 | }
1423 | ],
1424 | "source": [
1425 | "pd.concat([features, targets], axis=1)"
1426 | ]
1427 | },
1428 | {
1429 | "attachments": {},
1430 | "cell_type": "markdown",
1431 | "metadata": {},
1432 | "source": [
1433 | "This dataset could be use to predict the rides for the next 2 hours, for each location_id, by using historical rides from the previous 3 hours."
1434 | ]
1435 | },
1436 | {
1437 | "cell_type": "code",
1438 | "execution_count": null,
1439 | "metadata": {},
1440 | "outputs": [],
1441 | "source": [
1442 | "#| hide\n",
1443 | "import nbdev; nbdev.nbdev_export()"
1444 | ]
1445 | }
1446 | ],
1447 | "metadata": {
1448 | "kernelspec": {
1449 | "display_name": "python3",
1450 | "language": "python",
1451 | "name": "python3"
1452 | }
1453 | },
1454 | "nbformat": 4,
1455 | "nbformat_minor": 4
1456 | }
1457 |
--------------------------------------------------------------------------------
/nbs/_quarto.yml:
--------------------------------------------------------------------------------
1 | project:
2 | type: website
3 |
4 | format:
5 | html:
6 | theme: cosmo
7 | css: styles.css
8 | toc: true
9 |
10 | website:
11 | twitter-card: true
12 | open-graph: true
13 | repo-actions: [issue]
14 | navbar:
15 | background: primary
16 | search: true
17 | sidebar:
18 | style: floating
19 |
20 | metadata-files: [nbdev.yml, sidebar.yml]
--------------------------------------------------------------------------------
/nbs/index.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "attachments": {},
5 | "cell_type": "markdown",
6 | "metadata": {},
7 | "source": [
8 | "# ts2ml\n",
9 | "\n",
10 | "> Tools to Transform a Time Series into Features and Target Dataset"
11 | ]
12 | },
13 | {
14 | "attachments": {},
15 | "cell_type": "markdown",
16 | "metadata": {},
17 | "source": [
18 | "## Install"
19 | ]
20 | },
21 | {
22 | "attachments": {},
23 | "cell_type": "markdown",
24 | "metadata": {},
25 | "source": [
26 | "```sh\n",
27 | "pip install ts2ml\n",
28 | "```"
29 | ]
30 | },
31 | {
32 | "attachments": {},
33 | "cell_type": "markdown",
34 | "metadata": {},
35 | "source": [
36 | "## How to use"
37 | ]
38 | },
39 | {
40 | "cell_type": "code",
41 | "execution_count": null,
42 | "metadata": {},
43 | "outputs": [],
44 | "source": [
45 | "import pandas as pd\n",
46 | "from ts2ml.core import add_missing_slots\n",
47 | "from ts2ml.core import transform_ts_data_into_features_and_target"
48 | ]
49 | },
50 | {
51 | "cell_type": "code",
52 | "execution_count": null,
53 | "metadata": {},
54 | "outputs": [
55 | {
56 | "data": {
57 | "text/html": [
58 | "\n",
59 | "\n",
72 | "
\n",
73 | " \n",
74 | " \n",
75 | " \n",
76 | " pickup_hour \n",
77 | " pickup_location_id \n",
78 | " rides \n",
79 | " \n",
80 | " \n",
81 | " \n",
82 | " \n",
83 | " 0 \n",
84 | " 2022-01-01 00:00:00 \n",
85 | " 1 \n",
86 | " 2 \n",
87 | " \n",
88 | " \n",
89 | " 1 \n",
90 | " 2022-01-01 01:00:00 \n",
91 | " 1 \n",
92 | " 3 \n",
93 | " \n",
94 | " \n",
95 | " 2 \n",
96 | " 2022-01-01 03:00:00 \n",
97 | " 1 \n",
98 | " 1 \n",
99 | " \n",
100 | " \n",
101 | " 3 \n",
102 | " 2022-01-01 01:00:00 \n",
103 | " 2 \n",
104 | " 1 \n",
105 | " \n",
106 | " \n",
107 | " 4 \n",
108 | " 2022-01-01 02:00:00 \n",
109 | " 2 \n",
110 | " 2 \n",
111 | " \n",
112 | " \n",
113 | " 5 \n",
114 | " 2022-01-01 05:00:00 \n",
115 | " 2 \n",
116 | " 1 \n",
117 | " \n",
118 | " \n",
119 | "
\n",
120 | "
"
121 | ],
122 | "text/plain": [
123 | " pickup_hour pickup_location_id rides\n",
124 | "0 2022-01-01 00:00:00 1 2\n",
125 | "1 2022-01-01 01:00:00 1 3\n",
126 | "2 2022-01-01 03:00:00 1 1\n",
127 | "3 2022-01-01 01:00:00 2 1\n",
128 | "4 2022-01-01 02:00:00 2 2\n",
129 | "5 2022-01-01 05:00:00 2 1"
130 | ]
131 | },
132 | "execution_count": null,
133 | "metadata": {},
134 | "output_type": "execute_result"
135 | }
136 | ],
137 | "source": [
138 | "df = pd.DataFrame({\n",
139 | " 'pickup_hour': ['2022-01-01 00:00:00', '2022-01-01 01:00:00', '2022-01-01 03:00:00', '2022-01-01 01:00:00', '2022-01-01 02:00:00', '2022-01-01 05:00:00'],\n",
140 | " 'pickup_location_id': [1, 1, 1, 2, 2, 2],\n",
141 | " 'rides': [2, 3, 1, 1, 2, 1]\n",
142 | "})\n",
143 | "df"
144 | ]
145 | },
146 | {
147 | "attachments": {},
148 | "cell_type": "markdown",
149 | "metadata": {},
150 | "source": [
151 | "Let's fill the missing slots with zeros"
152 | ]
153 | },
154 | {
155 | "cell_type": "code",
156 | "execution_count": null,
157 | "metadata": {},
158 | "outputs": [
159 | {
160 | "name": "stderr",
161 | "output_type": "stream",
162 | "text": [
163 | "100%|██████████| 2/2 [00:00<00:00, 907.86it/s]\n"
164 | ]
165 | },
166 | {
167 | "data": {
168 | "text/html": [
169 | "\n",
170 | "\n",
183 | "
\n",
184 | " \n",
185 | " \n",
186 | " \n",
187 | " pickup_hour \n",
188 | " pickup_location_id \n",
189 | " rides \n",
190 | " \n",
191 | " \n",
192 | " \n",
193 | " \n",
194 | " 0 \n",
195 | " 2022-01-01 00:00:00 \n",
196 | " 1 \n",
197 | " 2 \n",
198 | " \n",
199 | " \n",
200 | " 1 \n",
201 | " 2022-01-01 01:00:00 \n",
202 | " 1 \n",
203 | " 3 \n",
204 | " \n",
205 | " \n",
206 | " 2 \n",
207 | " 2022-01-01 02:00:00 \n",
208 | " 1 \n",
209 | " 0 \n",
210 | " \n",
211 | " \n",
212 | " 3 \n",
213 | " 2022-01-01 03:00:00 \n",
214 | " 1 \n",
215 | " 1 \n",
216 | " \n",
217 | " \n",
218 | " 4 \n",
219 | " 2022-01-01 04:00:00 \n",
220 | " 1 \n",
221 | " 0 \n",
222 | " \n",
223 | " \n",
224 | " 5 \n",
225 | " 2022-01-01 05:00:00 \n",
226 | " 1 \n",
227 | " 0 \n",
228 | " \n",
229 | " \n",
230 | " 6 \n",
231 | " 2022-01-01 00:00:00 \n",
232 | " 2 \n",
233 | " 0 \n",
234 | " \n",
235 | " \n",
236 | " 7 \n",
237 | " 2022-01-01 01:00:00 \n",
238 | " 2 \n",
239 | " 1 \n",
240 | " \n",
241 | " \n",
242 | " 8 \n",
243 | " 2022-01-01 02:00:00 \n",
244 | " 2 \n",
245 | " 2 \n",
246 | " \n",
247 | " \n",
248 | " 9 \n",
249 | " 2022-01-01 03:00:00 \n",
250 | " 2 \n",
251 | " 0 \n",
252 | " \n",
253 | " \n",
254 | " 10 \n",
255 | " 2022-01-01 04:00:00 \n",
256 | " 2 \n",
257 | " 0 \n",
258 | " \n",
259 | " \n",
260 | " 11 \n",
261 | " 2022-01-01 05:00:00 \n",
262 | " 2 \n",
263 | " 1 \n",
264 | " \n",
265 | " \n",
266 | "
\n",
267 | "
"
268 | ],
269 | "text/plain": [
270 | " pickup_hour pickup_location_id rides\n",
271 | "0 2022-01-01 00:00:00 1 2\n",
272 | "1 2022-01-01 01:00:00 1 3\n",
273 | "2 2022-01-01 02:00:00 1 0\n",
274 | "3 2022-01-01 03:00:00 1 1\n",
275 | "4 2022-01-01 04:00:00 1 0\n",
276 | "5 2022-01-01 05:00:00 1 0\n",
277 | "6 2022-01-01 00:00:00 2 0\n",
278 | "7 2022-01-01 01:00:00 2 1\n",
279 | "8 2022-01-01 02:00:00 2 2\n",
280 | "9 2022-01-01 03:00:00 2 0\n",
281 | "10 2022-01-01 04:00:00 2 0\n",
282 | "11 2022-01-01 05:00:00 2 1"
283 | ]
284 | },
285 | "execution_count": null,
286 | "metadata": {},
287 | "output_type": "execute_result"
288 | }
289 | ],
290 | "source": [
291 | "df = add_missing_slots(df, datetime_col='pickup_hour', entity_col='pickup_location_id', value_col='rides', freq='H')\n",
292 | "df"
293 | ]
294 | },
295 | {
296 | "attachments": {},
297 | "cell_type": "markdown",
298 | "metadata": {},
299 | "source": [
300 | "Now, let's build features and targets to predict the number of rides for the next hour for each location_id, by using the historical number of rides for the last 3 hours"
301 | ]
302 | },
303 | {
304 | "cell_type": "code",
305 | "execution_count": null,
306 | "metadata": {},
307 | "outputs": [
308 | {
309 | "name": "stderr",
310 | "output_type": "stream",
311 | "text": [
312 | "100%|██████████| 2/2 [00:00<00:00, 597.86it/s]\n"
313 | ]
314 | }
315 | ],
316 | "source": [
317 | "features, targets = transform_ts_data_into_features_and_target(\n",
318 | " df,\n",
319 | " n_features=3,\n",
320 | " datetime_col='pickup_hour', \n",
321 | " entity_col='pickup_location_id', \n",
322 | " value_col='rides',\n",
323 | " n_targets=1,\n",
324 | " step_size=1,\n",
325 | " step_name='hour'\n",
326 | ")"
327 | ]
328 | },
329 | {
330 | "cell_type": "code",
331 | "execution_count": null,
332 | "metadata": {},
333 | "outputs": [
334 | {
335 | "data": {
336 | "text/html": [
337 | "\n",
338 | "\n",
351 | "
\n",
352 | " \n",
353 | " \n",
354 | " \n",
355 | " rides_previous_3_hour \n",
356 | " rides_previous_2_hour \n",
357 | " rides_previous_1_hour \n",
358 | " pickup_hour \n",
359 | " pickup_location_id \n",
360 | " \n",
361 | " \n",
362 | " \n",
363 | " \n",
364 | " 0 \n",
365 | " 2.0 \n",
366 | " 3.0 \n",
367 | " 0.0 \n",
368 | " 2022-01-01 03:00:00 \n",
369 | " 1 \n",
370 | " \n",
371 | " \n",
372 | " 1 \n",
373 | " 3.0 \n",
374 | " 0.0 \n",
375 | " 1.0 \n",
376 | " 2022-01-01 04:00:00 \n",
377 | " 1 \n",
378 | " \n",
379 | " \n",
380 | " 2 \n",
381 | " 0.0 \n",
382 | " 1.0 \n",
383 | " 2.0 \n",
384 | " 2022-01-01 03:00:00 \n",
385 | " 2 \n",
386 | " \n",
387 | " \n",
388 | " 3 \n",
389 | " 1.0 \n",
390 | " 2.0 \n",
391 | " 0.0 \n",
392 | " 2022-01-01 04:00:00 \n",
393 | " 2 \n",
394 | " \n",
395 | " \n",
396 | "
\n",
397 | "
"
398 | ],
399 | "text/plain": [
400 | " rides_previous_3_hour rides_previous_2_hour rides_previous_1_hour \\\n",
401 | "0 2.0 3.0 0.0 \n",
402 | "1 3.0 0.0 1.0 \n",
403 | "2 0.0 1.0 2.0 \n",
404 | "3 1.0 2.0 0.0 \n",
405 | "\n",
406 | " pickup_hour pickup_location_id \n",
407 | "0 2022-01-01 03:00:00 1 \n",
408 | "1 2022-01-01 04:00:00 1 \n",
409 | "2 2022-01-01 03:00:00 2 \n",
410 | "3 2022-01-01 04:00:00 2 "
411 | ]
412 | },
413 | "execution_count": null,
414 | "metadata": {},
415 | "output_type": "execute_result"
416 | }
417 | ],
418 | "source": [
419 | "features"
420 | ]
421 | },
422 | {
423 | "cell_type": "code",
424 | "execution_count": null,
425 | "metadata": {},
426 | "outputs": [
427 | {
428 | "data": {
429 | "text/html": [
430 | "\n",
431 | "\n",
444 | "
\n",
445 | " \n",
446 | " \n",
447 | " \n",
448 | " target_rides_next_hour \n",
449 | " \n",
450 | " \n",
451 | " \n",
452 | " \n",
453 | " 0 \n",
454 | " 1.0 \n",
455 | " \n",
456 | " \n",
457 | " 1 \n",
458 | " 0.0 \n",
459 | " \n",
460 | " \n",
461 | " 2 \n",
462 | " 0.0 \n",
463 | " \n",
464 | " \n",
465 | " 3 \n",
466 | " 0.0 \n",
467 | " \n",
468 | " \n",
469 | "
\n",
470 | "
"
471 | ],
472 | "text/plain": [
473 | " target_rides_next_hour\n",
474 | "0 1.0\n",
475 | "1 0.0\n",
476 | "2 0.0\n",
477 | "3 0.0"
478 | ]
479 | },
480 | "execution_count": null,
481 | "metadata": {},
482 | "output_type": "execute_result"
483 | }
484 | ],
485 | "source": [
486 | "targets"
487 | ]
488 | },
489 | {
490 | "cell_type": "code",
491 | "execution_count": null,
492 | "metadata": {},
493 | "outputs": [
494 | {
495 | "data": {
496 | "text/html": [
497 | "\n",
498 | "\n",
511 | "
\n",
512 | " \n",
513 | " \n",
514 | " \n",
515 | " rides_previous_3_hour \n",
516 | " rides_previous_2_hour \n",
517 | " rides_previous_1_hour \n",
518 | " pickup_hour \n",
519 | " pickup_location_id \n",
520 | " target_rides_next_hour \n",
521 | " \n",
522 | " \n",
523 | " \n",
524 | " \n",
525 | " 0 \n",
526 | " 2.0 \n",
527 | " 3.0 \n",
528 | " 0.0 \n",
529 | " 2022-01-01 03:00:00 \n",
530 | " 1 \n",
531 | " 1.0 \n",
532 | " \n",
533 | " \n",
534 | " 1 \n",
535 | " 3.0 \n",
536 | " 0.0 \n",
537 | " 1.0 \n",
538 | " 2022-01-01 04:00:00 \n",
539 | " 1 \n",
540 | " 0.0 \n",
541 | " \n",
542 | " \n",
543 | " 2 \n",
544 | " 0.0 \n",
545 | " 1.0 \n",
546 | " 2.0 \n",
547 | " 2022-01-01 03:00:00 \n",
548 | " 2 \n",
549 | " 0.0 \n",
550 | " \n",
551 | " \n",
552 | " 3 \n",
553 | " 1.0 \n",
554 | " 2.0 \n",
555 | " 0.0 \n",
556 | " 2022-01-01 04:00:00 \n",
557 | " 2 \n",
558 | " 0.0 \n",
559 | " \n",
560 | " \n",
561 | "
\n",
562 | "
"
563 | ],
564 | "text/plain": [
565 | " rides_previous_3_hour rides_previous_2_hour rides_previous_1_hour \\\n",
566 | "0 2.0 3.0 0.0 \n",
567 | "1 3.0 0.0 1.0 \n",
568 | "2 0.0 1.0 2.0 \n",
569 | "3 1.0 2.0 0.0 \n",
570 | "\n",
571 | " pickup_hour pickup_location_id target_rides_next_hour \n",
572 | "0 2022-01-01 03:00:00 1 1.0 \n",
573 | "1 2022-01-01 04:00:00 1 0.0 \n",
574 | "2 2022-01-01 03:00:00 2 0.0 \n",
575 | "3 2022-01-01 04:00:00 2 0.0 "
576 | ]
577 | },
578 | "execution_count": null,
579 | "metadata": {},
580 | "output_type": "execute_result"
581 | }
582 | ],
583 | "source": [
584 | "Xy_df = pd.concat([features, targets], axis=1)\n",
585 | "Xy_df"
586 | ]
587 | },
588 | {
589 | "attachments": {},
590 | "cell_type": "markdown",
591 | "metadata": {},
592 | "source": [
593 | "# Another Example\n",
594 | "Montly spaced time series"
595 | ]
596 | },
597 | {
598 | "cell_type": "code",
599 | "execution_count": null,
600 | "metadata": {},
601 | "outputs": [
602 | {
603 | "data": {
604 | "text/html": [
605 | "\n",
606 | "\n",
619 | "
\n",
620 | " \n",
621 | " \n",
622 | " \n",
623 | " date \n",
624 | " city \n",
625 | " sales \n",
626 | " \n",
627 | " \n",
628 | " \n",
629 | " \n",
630 | " 0 \n",
631 | " 2020-01-01 \n",
632 | " FOR \n",
633 | " 4944 \n",
634 | " \n",
635 | " \n",
636 | " 1 \n",
637 | " 2020-02-01 \n",
638 | " FOR \n",
639 | " 3435 \n",
640 | " \n",
641 | " \n",
642 | " 2 \n",
643 | " 2020-03-01 \n",
644 | " FOR \n",
645 | " 4543 \n",
646 | " \n",
647 | " \n",
648 | " 3 \n",
649 | " 2020-04-01 \n",
650 | " FOR \n",
651 | " 3879 \n",
652 | " \n",
653 | " \n",
654 | " 4 \n",
655 | " 2020-05-01 \n",
656 | " FOR \n",
657 | " 2601 \n",
658 | " \n",
659 | " \n",
660 | " 5 \n",
661 | " 2020-06-01 \n",
662 | " FOR \n",
663 | " 2922 \n",
664 | " \n",
665 | " \n",
666 | " 6 \n",
667 | " 2020-07-01 \n",
668 | " FOR \n",
669 | " 4542 \n",
670 | " \n",
671 | " \n",
672 | " 7 \n",
673 | " 2020-08-01 \n",
674 | " FOR \n",
675 | " 1338 \n",
676 | " \n",
677 | " \n",
678 | " 8 \n",
679 | " 2020-09-01 \n",
680 | " FOR \n",
681 | " 2938 \n",
682 | " \n",
683 | " \n",
684 | " 9 \n",
685 | " 2020-10-01 \n",
686 | " FOR \n",
687 | " 2695 \n",
688 | " \n",
689 | " \n",
690 | " 10 \n",
691 | " 2020-11-01 \n",
692 | " FOR \n",
693 | " 4065 \n",
694 | " \n",
695 | " \n",
696 | " 11 \n",
697 | " 2020-12-01 \n",
698 | " FOR \n",
699 | " 3864 \n",
700 | " \n",
701 | " \n",
702 | " 12 \n",
703 | " 2021-01-01 \n",
704 | " SP \n",
705 | " 2652 \n",
706 | " \n",
707 | " \n",
708 | " 13 \n",
709 | " 2021-02-01 \n",
710 | " SP \n",
711 | " 2137 \n",
712 | " \n",
713 | " \n",
714 | " 14 \n",
715 | " 2021-03-01 \n",
716 | " SP \n",
717 | " 2663 \n",
718 | " \n",
719 | " \n",
720 | " 15 \n",
721 | " 2021-04-01 \n",
722 | " SP \n",
723 | " 1168 \n",
724 | " \n",
725 | " \n",
726 | " 16 \n",
727 | " 2021-05-01 \n",
728 | " SP \n",
729 | " 4523 \n",
730 | " \n",
731 | " \n",
732 | " 17 \n",
733 | " 2021-06-01 \n",
734 | " SP \n",
735 | " 4135 \n",
736 | " \n",
737 | " \n",
738 | " 18 \n",
739 | " 2021-07-01 \n",
740 | " SP \n",
741 | " 3566 \n",
742 | " \n",
743 | " \n",
744 | " 19 \n",
745 | " 2021-08-01 \n",
746 | " SP \n",
747 | " 2121 \n",
748 | " \n",
749 | " \n",
750 | " 20 \n",
751 | " 2021-09-01 \n",
752 | " SP \n",
753 | " 1070 \n",
754 | " \n",
755 | " \n",
756 | " 21 \n",
757 | " 2021-10-01 \n",
758 | " SP \n",
759 | " 1624 \n",
760 | " \n",
761 | " \n",
762 | " 22 \n",
763 | " 2021-11-01 \n",
764 | " SP \n",
765 | " 3034 \n",
766 | " \n",
767 | " \n",
768 | " 23 \n",
769 | " 2021-12-01 \n",
770 | " SP \n",
771 | " 4063 \n",
772 | " \n",
773 | " \n",
774 | " 24 \n",
775 | " 2022-01-01 \n",
776 | " RJ \n",
777 | " 2297 \n",
778 | " \n",
779 | " \n",
780 | " 25 \n",
781 | " 2022-02-01 \n",
782 | " RJ \n",
783 | " 3430 \n",
784 | " \n",
785 | " \n",
786 | " 26 \n",
787 | " 2022-03-01 \n",
788 | " RJ \n",
789 | " 2903 \n",
790 | " \n",
791 | " \n",
792 | " 27 \n",
793 | " 2022-04-01 \n",
794 | " RJ \n",
795 | " 4197 \n",
796 | " \n",
797 | " \n",
798 | " 28 \n",
799 | " 2022-05-01 \n",
800 | " RJ \n",
801 | " 4141 \n",
802 | " \n",
803 | " \n",
804 | " 29 \n",
805 | " 2022-06-01 \n",
806 | " RJ \n",
807 | " 2899 \n",
808 | " \n",
809 | " \n",
810 | " 30 \n",
811 | " 2022-07-01 \n",
812 | " RJ \n",
813 | " 4529 \n",
814 | " \n",
815 | " \n",
816 | " 31 \n",
817 | " 2022-08-01 \n",
818 | " RJ \n",
819 | " 3612 \n",
820 | " \n",
821 | " \n",
822 | " 32 \n",
823 | " 2022-09-01 \n",
824 | " RJ \n",
825 | " 1856 \n",
826 | " \n",
827 | " \n",
828 | " 33 \n",
829 | " 2022-10-01 \n",
830 | " RJ \n",
831 | " 4804 \n",
832 | " \n",
833 | " \n",
834 | " 34 \n",
835 | " 2022-11-01 \n",
836 | " RJ \n",
837 | " 1764 \n",
838 | " \n",
839 | " \n",
840 | " 35 \n",
841 | " 2022-12-01 \n",
842 | " RJ \n",
843 | " 4425 \n",
844 | " \n",
845 | " \n",
846 | "
\n",
847 | "
"
848 | ],
849 | "text/plain": [
850 | " date city sales\n",
851 | "0 2020-01-01 FOR 4944\n",
852 | "1 2020-02-01 FOR 3435\n",
853 | "2 2020-03-01 FOR 4543\n",
854 | "3 2020-04-01 FOR 3879\n",
855 | "4 2020-05-01 FOR 2601\n",
856 | "5 2020-06-01 FOR 2922\n",
857 | "6 2020-07-01 FOR 4542\n",
858 | "7 2020-08-01 FOR 1338\n",
859 | "8 2020-09-01 FOR 2938\n",
860 | "9 2020-10-01 FOR 2695\n",
861 | "10 2020-11-01 FOR 4065\n",
862 | "11 2020-12-01 FOR 3864\n",
863 | "12 2021-01-01 SP 2652\n",
864 | "13 2021-02-01 SP 2137\n",
865 | "14 2021-03-01 SP 2663\n",
866 | "15 2021-04-01 SP 1168\n",
867 | "16 2021-05-01 SP 4523\n",
868 | "17 2021-06-01 SP 4135\n",
869 | "18 2021-07-01 SP 3566\n",
870 | "19 2021-08-01 SP 2121\n",
871 | "20 2021-09-01 SP 1070\n",
872 | "21 2021-10-01 SP 1624\n",
873 | "22 2021-11-01 SP 3034\n",
874 | "23 2021-12-01 SP 4063\n",
875 | "24 2022-01-01 RJ 2297\n",
876 | "25 2022-02-01 RJ 3430\n",
877 | "26 2022-03-01 RJ 2903\n",
878 | "27 2022-04-01 RJ 4197\n",
879 | "28 2022-05-01 RJ 4141\n",
880 | "29 2022-06-01 RJ 2899\n",
881 | "30 2022-07-01 RJ 4529\n",
882 | "31 2022-08-01 RJ 3612\n",
883 | "32 2022-09-01 RJ 1856\n",
884 | "33 2022-10-01 RJ 4804\n",
885 | "34 2022-11-01 RJ 1764\n",
886 | "35 2022-12-01 RJ 4425"
887 | ]
888 | },
889 | "execution_count": null,
890 | "metadata": {},
891 | "output_type": "execute_result"
892 | }
893 | ],
894 | "source": [
895 | "import pandas as pd\n",
896 | "import numpy as np\n",
897 | "\n",
898 | "# Generate timestamp index with monthly frequency\n",
899 | "date_rng = pd.date_range(start='1/1/2020', end='12/1/2022', freq='MS')\n",
900 | "\n",
901 | "# Create list of city codes\n",
902 | "cities = ['FOR', 'SP', 'RJ']\n",
903 | "\n",
904 | "# Create dataframe with random sales data for each city on each month\n",
905 | "df = pd.DataFrame({\n",
906 | " 'date': date_rng,\n",
907 | " 'city': np.repeat(cities, len(date_rng)//len(cities)),\n",
908 | " 'sales': np.random.randint(1000, 5000, size=len(date_rng))\n",
909 | "})\n",
910 | "df"
911 | ]
912 | },
913 | {
914 | "attachments": {},
915 | "cell_type": "markdown",
916 | "metadata": {},
917 | "source": [
918 | "FOR city only have data for 2020 year, RJ only for 2022 and SP only for 2021. Let's also simulate more missing slots between the years."
919 | ]
920 | },
921 | {
922 | "cell_type": "code",
923 | "execution_count": null,
924 | "metadata": {},
925 | "outputs": [
926 | {
927 | "data": {
928 | "text/html": [
929 | "\n",
930 | "\n",
943 | "
\n",
944 | " \n",
945 | " \n",
946 | " \n",
947 | " date \n",
948 | " city \n",
949 | " sales \n",
950 | " \n",
951 | " \n",
952 | " \n",
953 | " \n",
954 | " 0 \n",
955 | " 2020-01-01 \n",
956 | " FOR \n",
957 | " 4944 \n",
958 | " \n",
959 | " \n",
960 | " 1 \n",
961 | " 2020-02-01 \n",
962 | " FOR \n",
963 | " 3435 \n",
964 | " \n",
965 | " \n",
966 | " 2 \n",
967 | " 2020-03-01 \n",
968 | " FOR \n",
969 | " 4543 \n",
970 | " \n",
971 | " \n",
972 | " 3 \n",
973 | " 2020-04-01 \n",
974 | " FOR \n",
975 | " 3879 \n",
976 | " \n",
977 | " \n",
978 | " 4 \n",
979 | " 2020-05-01 \n",
980 | " FOR \n",
981 | " 2601 \n",
982 | " \n",
983 | " \n",
984 | " 5 \n",
985 | " 2020-06-01 \n",
986 | " FOR \n",
987 | " 2922 \n",
988 | " \n",
989 | " \n",
990 | " 6 \n",
991 | " 2020-07-01 \n",
992 | " FOR \n",
993 | " 4542 \n",
994 | " \n",
995 | " \n",
996 | " 7 \n",
997 | " 2020-08-01 \n",
998 | " FOR \n",
999 | " 1338 \n",
1000 | " \n",
1001 | " \n",
1002 | " 8 \n",
1003 | " 2020-09-01 \n",
1004 | " FOR \n",
1005 | " 2938 \n",
1006 | " \n",
1007 | " \n",
1008 | " 9 \n",
1009 | " 2020-11-01 \n",
1010 | " FOR \n",
1011 | " 4065 \n",
1012 | " \n",
1013 | " \n",
1014 | " 10 \n",
1015 | " 2020-12-01 \n",
1016 | " FOR \n",
1017 | " 3864 \n",
1018 | " \n",
1019 | " \n",
1020 | " 11 \n",
1021 | " 2021-01-01 \n",
1022 | " SP \n",
1023 | " 2652 \n",
1024 | " \n",
1025 | " \n",
1026 | " 12 \n",
1027 | " 2021-02-01 \n",
1028 | " SP \n",
1029 | " 2137 \n",
1030 | " \n",
1031 | " \n",
1032 | " 13 \n",
1033 | " 2021-03-01 \n",
1034 | " SP \n",
1035 | " 2663 \n",
1036 | " \n",
1037 | " \n",
1038 | " 14 \n",
1039 | " 2021-07-01 \n",
1040 | " SP \n",
1041 | " 3566 \n",
1042 | " \n",
1043 | " \n",
1044 | " 15 \n",
1045 | " 2021-08-01 \n",
1046 | " SP \n",
1047 | " 2121 \n",
1048 | " \n",
1049 | " \n",
1050 | " 16 \n",
1051 | " 2021-10-01 \n",
1052 | " SP \n",
1053 | " 1624 \n",
1054 | " \n",
1055 | " \n",
1056 | " 17 \n",
1057 | " 2021-11-01 \n",
1058 | " SP \n",
1059 | " 3034 \n",
1060 | " \n",
1061 | " \n",
1062 | " 18 \n",
1063 | " 2021-12-01 \n",
1064 | " SP \n",
1065 | " 4063 \n",
1066 | " \n",
1067 | " \n",
1068 | " 19 \n",
1069 | " 2022-01-01 \n",
1070 | " RJ \n",
1071 | " 2297 \n",
1072 | " \n",
1073 | " \n",
1074 | " 20 \n",
1075 | " 2022-02-01 \n",
1076 | " RJ \n",
1077 | " 3430 \n",
1078 | " \n",
1079 | " \n",
1080 | " 21 \n",
1081 | " 2022-03-01 \n",
1082 | " RJ \n",
1083 | " 2903 \n",
1084 | " \n",
1085 | " \n",
1086 | " 22 \n",
1087 | " 2022-04-01 \n",
1088 | " RJ \n",
1089 | " 4197 \n",
1090 | " \n",
1091 | " \n",
1092 | " 23 \n",
1093 | " 2022-05-01 \n",
1094 | " RJ \n",
1095 | " 4141 \n",
1096 | " \n",
1097 | " \n",
1098 | " 24 \n",
1099 | " 2022-06-01 \n",
1100 | " RJ \n",
1101 | " 2899 \n",
1102 | " \n",
1103 | " \n",
1104 | " 25 \n",
1105 | " 2022-09-01 \n",
1106 | " RJ \n",
1107 | " 1856 \n",
1108 | " \n",
1109 | " \n",
1110 | " 26 \n",
1111 | " 2022-10-01 \n",
1112 | " RJ \n",
1113 | " 4804 \n",
1114 | " \n",
1115 | " \n",
1116 | " 27 \n",
1117 | " 2022-11-01 \n",
1118 | " RJ \n",
1119 | " 1764 \n",
1120 | " \n",
1121 | " \n",
1122 | " 28 \n",
1123 | " 2022-12-01 \n",
1124 | " RJ \n",
1125 | " 4425 \n",
1126 | " \n",
1127 | " \n",
1128 | "
\n",
1129 | "
"
1130 | ],
1131 | "text/plain": [
1132 | " date city sales\n",
1133 | "0 2020-01-01 FOR 4944\n",
1134 | "1 2020-02-01 FOR 3435\n",
1135 | "2 2020-03-01 FOR 4543\n",
1136 | "3 2020-04-01 FOR 3879\n",
1137 | "4 2020-05-01 FOR 2601\n",
1138 | "5 2020-06-01 FOR 2922\n",
1139 | "6 2020-07-01 FOR 4542\n",
1140 | "7 2020-08-01 FOR 1338\n",
1141 | "8 2020-09-01 FOR 2938\n",
1142 | "9 2020-11-01 FOR 4065\n",
1143 | "10 2020-12-01 FOR 3864\n",
1144 | "11 2021-01-01 SP 2652\n",
1145 | "12 2021-02-01 SP 2137\n",
1146 | "13 2021-03-01 SP 2663\n",
1147 | "14 2021-07-01 SP 3566\n",
1148 | "15 2021-08-01 SP 2121\n",
1149 | "16 2021-10-01 SP 1624\n",
1150 | "17 2021-11-01 SP 3034\n",
1151 | "18 2021-12-01 SP 4063\n",
1152 | "19 2022-01-01 RJ 2297\n",
1153 | "20 2022-02-01 RJ 3430\n",
1154 | "21 2022-03-01 RJ 2903\n",
1155 | "22 2022-04-01 RJ 4197\n",
1156 | "23 2022-05-01 RJ 4141\n",
1157 | "24 2022-06-01 RJ 2899\n",
1158 | "25 2022-09-01 RJ 1856\n",
1159 | "26 2022-10-01 RJ 4804\n",
1160 | "27 2022-11-01 RJ 1764\n",
1161 | "28 2022-12-01 RJ 4425"
1162 | ]
1163 | },
1164 | "execution_count": null,
1165 | "metadata": {},
1166 | "output_type": "execute_result"
1167 | }
1168 | ],
1169 | "source": [
1170 | "# Generate random indices to drop\n",
1171 | "drop_indices = np.random.choice(df.index, size=int(len(df)*0.2), replace=False)\n",
1172 | "\n",
1173 | "# Drop selected rows from dataframe\n",
1174 | "df = df.drop(drop_indices)\n",
1175 | "df.reset_index(drop=True, inplace=True)\n",
1176 | "df"
1177 | ]
1178 | },
1179 | {
1180 | "attachments": {},
1181 | "cell_type": "markdown",
1182 | "metadata": {},
1183 | "source": [
1184 | "Now lets fill the missing slots with zero values. The function will complete the missing slots with zeros:"
1185 | ]
1186 | },
1187 | {
1188 | "cell_type": "code",
1189 | "execution_count": null,
1190 | "metadata": {},
1191 | "outputs": [
1192 | {
1193 | "name": "stderr",
1194 | "output_type": "stream",
1195 | "text": [
1196 | "100%|██████████| 3/3 [00:00<00:00, 843.70it/s]\n"
1197 | ]
1198 | },
1199 | {
1200 | "data": {
1201 | "text/html": [
1202 | "\n",
1203 | "\n",
1216 | "
\n",
1217 | " \n",
1218 | " \n",
1219 | " \n",
1220 | " date \n",
1221 | " city \n",
1222 | " sales \n",
1223 | " \n",
1224 | " \n",
1225 | " \n",
1226 | " \n",
1227 | " 0 \n",
1228 | " 2020-01-01 \n",
1229 | " FOR \n",
1230 | " 4944 \n",
1231 | " \n",
1232 | " \n",
1233 | " 1 \n",
1234 | " 2020-02-01 \n",
1235 | " FOR \n",
1236 | " 3435 \n",
1237 | " \n",
1238 | " \n",
1239 | " 2 \n",
1240 | " 2020-03-01 \n",
1241 | " FOR \n",
1242 | " 4543 \n",
1243 | " \n",
1244 | " \n",
1245 | " 3 \n",
1246 | " 2020-04-01 \n",
1247 | " FOR \n",
1248 | " 3879 \n",
1249 | " \n",
1250 | " \n",
1251 | " 4 \n",
1252 | " 2020-05-01 \n",
1253 | " FOR \n",
1254 | " 2601 \n",
1255 | " \n",
1256 | " \n",
1257 | " ... \n",
1258 | " ... \n",
1259 | " ... \n",
1260 | " ... \n",
1261 | " \n",
1262 | " \n",
1263 | " 103 \n",
1264 | " 2022-08-01 \n",
1265 | " RJ \n",
1266 | " 0 \n",
1267 | " \n",
1268 | " \n",
1269 | " 104 \n",
1270 | " 2022-09-01 \n",
1271 | " RJ \n",
1272 | " 1856 \n",
1273 | " \n",
1274 | " \n",
1275 | " 105 \n",
1276 | " 2022-10-01 \n",
1277 | " RJ \n",
1278 | " 4804 \n",
1279 | " \n",
1280 | " \n",
1281 | " 106 \n",
1282 | " 2022-11-01 \n",
1283 | " RJ \n",
1284 | " 1764 \n",
1285 | " \n",
1286 | " \n",
1287 | " 107 \n",
1288 | " 2022-12-01 \n",
1289 | " RJ \n",
1290 | " 4425 \n",
1291 | " \n",
1292 | " \n",
1293 | "
\n",
1294 | "
108 rows × 3 columns
\n",
1295 | "
"
1296 | ],
1297 | "text/plain": [
1298 | " date city sales\n",
1299 | "0 2020-01-01 FOR 4944\n",
1300 | "1 2020-02-01 FOR 3435\n",
1301 | "2 2020-03-01 FOR 4543\n",
1302 | "3 2020-04-01 FOR 3879\n",
1303 | "4 2020-05-01 FOR 2601\n",
1304 | ".. ... ... ...\n",
1305 | "103 2022-08-01 RJ 0\n",
1306 | "104 2022-09-01 RJ 1856\n",
1307 | "105 2022-10-01 RJ 4804\n",
1308 | "106 2022-11-01 RJ 1764\n",
1309 | "107 2022-12-01 RJ 4425\n",
1310 | "\n",
1311 | "[108 rows x 3 columns]"
1312 | ]
1313 | },
1314 | "execution_count": null,
1315 | "metadata": {},
1316 | "output_type": "execute_result"
1317 | }
1318 | ],
1319 | "source": [
1320 | "df_full = add_missing_slots(df, datetime_col='date', entity_col='city', value_col='sales', freq='MS')\n",
1321 | "df_full"
1322 | ]
1323 | },
1324 | {
1325 | "attachments": {},
1326 | "cell_type": "markdown",
1327 | "metadata": {},
1328 | "source": [
1329 | "Let's build a dataset for training a machine learning model to predict the sales for the next 3 months, for each city, based on historical data of sales for the previous 6 months."
1330 | ]
1331 | },
1332 | {
1333 | "cell_type": "code",
1334 | "execution_count": null,
1335 | "metadata": {},
1336 | "outputs": [
1337 | {
1338 | "name": "stderr",
1339 | "output_type": "stream",
1340 | "text": [
1341 | "100%|██████████| 3/3 [00:00<00:00, 205.58it/s]\n"
1342 | ]
1343 | }
1344 | ],
1345 | "source": [
1346 | "features, targets = transform_ts_data_into_features_and_target(\n",
1347 | " df_full,\n",
1348 | " n_features=3,\n",
1349 | " datetime_col='date',\n",
1350 | " entity_col='city',\n",
1351 | " value_col='sales',\n",
1352 | " n_targets=1,\n",
1353 | " step_size=1,\n",
1354 | " step_name='month'\n",
1355 | ")"
1356 | ]
1357 | },
1358 | {
1359 | "cell_type": "code",
1360 | "execution_count": null,
1361 | "metadata": {},
1362 | "outputs": [
1363 | {
1364 | "data": {
1365 | "text/html": [
1366 | "\n",
1367 | "\n",
1380 | "
\n",
1381 | " \n",
1382 | " \n",
1383 | " \n",
1384 | " sales_previous_3_month \n",
1385 | " sales_previous_2_month \n",
1386 | " sales_previous_1_month \n",
1387 | " date \n",
1388 | " city \n",
1389 | " target_sales_next_month \n",
1390 | " \n",
1391 | " \n",
1392 | " \n",
1393 | " \n",
1394 | " 0 \n",
1395 | " 4944.0 \n",
1396 | " 3435.0 \n",
1397 | " 4543.0 \n",
1398 | " 2020-04-01 \n",
1399 | " FOR \n",
1400 | " 3879.0 \n",
1401 | " \n",
1402 | " \n",
1403 | " 1 \n",
1404 | " 3435.0 \n",
1405 | " 4543.0 \n",
1406 | " 3879.0 \n",
1407 | " 2020-05-01 \n",
1408 | " FOR \n",
1409 | " 2601.0 \n",
1410 | " \n",
1411 | " \n",
1412 | " 2 \n",
1413 | " 4543.0 \n",
1414 | " 3879.0 \n",
1415 | " 2601.0 \n",
1416 | " 2020-06-01 \n",
1417 | " FOR \n",
1418 | " 2922.0 \n",
1419 | " \n",
1420 | " \n",
1421 | " 3 \n",
1422 | " 3879.0 \n",
1423 | " 2601.0 \n",
1424 | " 2922.0 \n",
1425 | " 2020-07-01 \n",
1426 | " FOR \n",
1427 | " 4542.0 \n",
1428 | " \n",
1429 | " \n",
1430 | " 4 \n",
1431 | " 2601.0 \n",
1432 | " 2922.0 \n",
1433 | " 4542.0 \n",
1434 | " 2020-08-01 \n",
1435 | " FOR \n",
1436 | " 1338.0 \n",
1437 | " \n",
1438 | " \n",
1439 | " ... \n",
1440 | " ... \n",
1441 | " ... \n",
1442 | " ... \n",
1443 | " ... \n",
1444 | " ... \n",
1445 | " ... \n",
1446 | " \n",
1447 | " \n",
1448 | " 91 \n",
1449 | " 4197.0 \n",
1450 | " 4141.0 \n",
1451 | " 2899.0 \n",
1452 | " 2022-07-01 \n",
1453 | " RJ \n",
1454 | " 0.0 \n",
1455 | " \n",
1456 | " \n",
1457 | " 92 \n",
1458 | " 4141.0 \n",
1459 | " 2899.0 \n",
1460 | " 0.0 \n",
1461 | " 2022-08-01 \n",
1462 | " RJ \n",
1463 | " 0.0 \n",
1464 | " \n",
1465 | " \n",
1466 | " 93 \n",
1467 | " 2899.0 \n",
1468 | " 0.0 \n",
1469 | " 0.0 \n",
1470 | " 2022-09-01 \n",
1471 | " RJ \n",
1472 | " 1856.0 \n",
1473 | " \n",
1474 | " \n",
1475 | " 94 \n",
1476 | " 0.0 \n",
1477 | " 0.0 \n",
1478 | " 1856.0 \n",
1479 | " 2022-10-01 \n",
1480 | " RJ \n",
1481 | " 4804.0 \n",
1482 | " \n",
1483 | " \n",
1484 | " 95 \n",
1485 | " 0.0 \n",
1486 | " 1856.0 \n",
1487 | " 4804.0 \n",
1488 | " 2022-11-01 \n",
1489 | " RJ \n",
1490 | " 1764.0 \n",
1491 | " \n",
1492 | " \n",
1493 | "
\n",
1494 | "
96 rows × 6 columns
\n",
1495 | "
"
1496 | ],
1497 | "text/plain": [
1498 | " sales_previous_3_month sales_previous_2_month sales_previous_1_month \\\n",
1499 | "0 4944.0 3435.0 4543.0 \n",
1500 | "1 3435.0 4543.0 3879.0 \n",
1501 | "2 4543.0 3879.0 2601.0 \n",
1502 | "3 3879.0 2601.0 2922.0 \n",
1503 | "4 2601.0 2922.0 4542.0 \n",
1504 | ".. ... ... ... \n",
1505 | "91 4197.0 4141.0 2899.0 \n",
1506 | "92 4141.0 2899.0 0.0 \n",
1507 | "93 2899.0 0.0 0.0 \n",
1508 | "94 0.0 0.0 1856.0 \n",
1509 | "95 0.0 1856.0 4804.0 \n",
1510 | "\n",
1511 | " date city target_sales_next_month \n",
1512 | "0 2020-04-01 FOR 3879.0 \n",
1513 | "1 2020-05-01 FOR 2601.0 \n",
1514 | "2 2020-06-01 FOR 2922.0 \n",
1515 | "3 2020-07-01 FOR 4542.0 \n",
1516 | "4 2020-08-01 FOR 1338.0 \n",
1517 | ".. ... ... ... \n",
1518 | "91 2022-07-01 RJ 0.0 \n",
1519 | "92 2022-08-01 RJ 0.0 \n",
1520 | "93 2022-09-01 RJ 1856.0 \n",
1521 | "94 2022-10-01 RJ 4804.0 \n",
1522 | "95 2022-11-01 RJ 1764.0 \n",
1523 | "\n",
1524 | "[96 rows x 6 columns]"
1525 | ]
1526 | },
1527 | "execution_count": null,
1528 | "metadata": {},
1529 | "output_type": "execute_result"
1530 | }
1531 | ],
1532 | "source": [
1533 | "pd.concat([features, targets], axis=1)"
1534 | ]
1535 | },
1536 | {
1537 | "attachments": {},
1538 | "cell_type": "markdown",
1539 | "metadata": {},
1540 | "source": [
1541 | "# Embedding on Sklearn Pipelines"
1542 | ]
1543 | },
1544 | {
1545 | "cell_type": "code",
1546 | "execution_count": null,
1547 | "metadata": {},
1548 | "outputs": [],
1549 | "source": [
1550 | "from sklearn.pipeline import make_pipeline\n",
1551 | "from sklearn.preprocessing import FunctionTransformer"
1552 | ]
1553 | },
1554 | {
1555 | "cell_type": "code",
1556 | "execution_count": null,
1557 | "metadata": {},
1558 | "outputs": [],
1559 | "source": [
1560 | "add_missing_slots_transformer = FunctionTransformer(\n",
1561 | " add_missing_slots, \n",
1562 | " kw_args={\n",
1563 | " 'datetime_col': 'date', \n",
1564 | " 'entity_col': 'city', \n",
1565 | " 'value_col': 'sales', \n",
1566 | " 'freq': 'MS'\n",
1567 | " }\n",
1568 | ")\n",
1569 | "\n",
1570 | "transform_ts_data_into_features_and_target_transformer = FunctionTransformer(\n",
1571 | " transform_ts_data_into_features_and_target, \n",
1572 | " kw_args={\n",
1573 | " 'n_features': 3, \n",
1574 | " 'datetime_col': 'date', \n",
1575 | " 'entity_col': 'city', \n",
1576 | " 'value_col': 'sales', \n",
1577 | " 'n_targets': 1, \n",
1578 | " 'step_size': 1, \n",
1579 | " 'step_name': 'month',\n",
1580 | " 'concat_Xy': True\n",
1581 | " }\n",
1582 | ")"
1583 | ]
1584 | },
1585 | {
1586 | "cell_type": "code",
1587 | "execution_count": null,
1588 | "metadata": {},
1589 | "outputs": [
1590 | {
1591 | "data": {
1592 | "text/html": [
1593 | "Pipeline(steps=[('functiontransformer-1',\n",
1594 | " FunctionTransformer(func=<function add_missing_slots at 0x11f8f49d0>,\n",
1595 | " kw_args={'datetime_col': 'date',\n",
1596 | " 'entity_col': 'city',\n",
1597 | " 'freq': 'MS',\n",
1598 | " 'value_col': 'sales'})),\n",
1599 | " ('functiontransformer-2',\n",
1600 | " FunctionTransformer(func=<function transform_ts_data_into_features_and_target at 0x11f925ca0>,\n",
1601 | " kw_args={'concat_Xy': True,\n",
1602 | " 'datetime_col': 'date',\n",
1603 | " 'entity_col': 'city',\n",
1604 | " 'n_features': 3, 'n_targets': 1,\n",
1605 | " 'step_name': 'month',\n",
1606 | " 'step_size': 1,\n",
1607 | " 'value_col': 'sales'}))]) In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook. On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org. Pipeline Pipeline(steps=[('functiontransformer-1',\n",
1608 | " FunctionTransformer(func=<function add_missing_slots at 0x11f8f49d0>,\n",
1609 | " kw_args={'datetime_col': 'date',\n",
1610 | " 'entity_col': 'city',\n",
1611 | " 'freq': 'MS',\n",
1612 | " 'value_col': 'sales'})),\n",
1613 | " ('functiontransformer-2',\n",
1614 | " FunctionTransformer(func=<function transform_ts_data_into_features_and_target at 0x11f925ca0>,\n",
1615 | " kw_args={'concat_Xy': True,\n",
1616 | " 'datetime_col': 'date',\n",
1617 | " 'entity_col': 'city',\n",
1618 | " 'n_features': 3, 'n_targets': 1,\n",
1619 | " 'step_name': 'month',\n",
1620 | " 'step_size': 1,\n",
1621 | " 'value_col': 'sales'}))]) FunctionTransformer FunctionTransformer(func=<function transform_ts_data_into_features_and_target at 0x11f925ca0>,\n",
1624 | " kw_args={'concat_Xy': True, 'datetime_col': 'date',\n",
1625 | " 'entity_col': 'city', 'n_features': 3,\n",
1626 | " 'n_targets': 1, 'step_name': 'month',\n",
1627 | " 'step_size': 1, 'value_col': 'sales'}) "
1628 | ],
1629 | "text/plain": [
1630 | "Pipeline(steps=[('functiontransformer-1',\n",
1631 | " FunctionTransformer(func=,\n",
1632 | " kw_args={'datetime_col': 'date',\n",
1633 | " 'entity_col': 'city',\n",
1634 | " 'freq': 'MS',\n",
1635 | " 'value_col': 'sales'})),\n",
1636 | " ('functiontransformer-2',\n",
1637 | " FunctionTransformer(func=,\n",
1638 | " kw_args={'concat_Xy': True,\n",
1639 | " 'datetime_col': 'date',\n",
1640 | " 'entity_col': 'city',\n",
1641 | " 'n_features': 3, 'n_targets': 1,\n",
1642 | " 'step_name': 'month',\n",
1643 | " 'step_size': 1,\n",
1644 | " 'value_col': 'sales'}))])"
1645 | ]
1646 | },
1647 | "execution_count": null,
1648 | "metadata": {},
1649 | "output_type": "execute_result"
1650 | }
1651 | ],
1652 | "source": [
1653 | "ts_data_to_features_and_target_pipeline = make_pipeline(\n",
1654 | " add_missing_slots_transformer,\n",
1655 | " transform_ts_data_into_features_and_target_transformer\n",
1656 | ")\n",
1657 | "ts_data_to_features_and_target_pipeline"
1658 | ]
1659 | },
1660 | {
1661 | "cell_type": "code",
1662 | "execution_count": null,
1663 | "metadata": {},
1664 | "outputs": [
1665 | {
1666 | "name": "stderr",
1667 | "output_type": "stream",
1668 | "text": [
1669 | "100%|██████████| 3/3 [00:00<00:00, 715.47it/s]\n",
1670 | "100%|██████████| 3/3 [00:00<00:00, 184.12it/s]\n"
1671 | ]
1672 | },
1673 | {
1674 | "data": {
1675 | "text/html": [
1676 | "\n",
1677 | "\n",
1690 | "
\n",
1691 | " \n",
1692 | " \n",
1693 | " \n",
1694 | " sales_previous_3_month \n",
1695 | " sales_previous_2_month \n",
1696 | " sales_previous_1_month \n",
1697 | " date \n",
1698 | " city \n",
1699 | " target_sales_next_month \n",
1700 | " \n",
1701 | " \n",
1702 | " \n",
1703 | " \n",
1704 | " 0 \n",
1705 | " 4944.0 \n",
1706 | " 3435.0 \n",
1707 | " 4543.0 \n",
1708 | " 2020-04-01 \n",
1709 | " FOR \n",
1710 | " 3879.0 \n",
1711 | " \n",
1712 | " \n",
1713 | " 1 \n",
1714 | " 3435.0 \n",
1715 | " 4543.0 \n",
1716 | " 3879.0 \n",
1717 | " 2020-05-01 \n",
1718 | " FOR \n",
1719 | " 2601.0 \n",
1720 | " \n",
1721 | " \n",
1722 | " 2 \n",
1723 | " 4543.0 \n",
1724 | " 3879.0 \n",
1725 | " 2601.0 \n",
1726 | " 2020-06-01 \n",
1727 | " FOR \n",
1728 | " 2922.0 \n",
1729 | " \n",
1730 | " \n",
1731 | " 3 \n",
1732 | " 3879.0 \n",
1733 | " 2601.0 \n",
1734 | " 2922.0 \n",
1735 | " 2020-07-01 \n",
1736 | " FOR \n",
1737 | " 4542.0 \n",
1738 | " \n",
1739 | " \n",
1740 | " 4 \n",
1741 | " 2601.0 \n",
1742 | " 2922.0 \n",
1743 | " 4542.0 \n",
1744 | " 2020-08-01 \n",
1745 | " FOR \n",
1746 | " 1338.0 \n",
1747 | " \n",
1748 | " \n",
1749 | " ... \n",
1750 | " ... \n",
1751 | " ... \n",
1752 | " ... \n",
1753 | " ... \n",
1754 | " ... \n",
1755 | " ... \n",
1756 | " \n",
1757 | " \n",
1758 | " 91 \n",
1759 | " 4197.0 \n",
1760 | " 4141.0 \n",
1761 | " 2899.0 \n",
1762 | " 2022-07-01 \n",
1763 | " RJ \n",
1764 | " 0.0 \n",
1765 | " \n",
1766 | " \n",
1767 | " 92 \n",
1768 | " 4141.0 \n",
1769 | " 2899.0 \n",
1770 | " 0.0 \n",
1771 | " 2022-08-01 \n",
1772 | " RJ \n",
1773 | " 0.0 \n",
1774 | " \n",
1775 | " \n",
1776 | " 93 \n",
1777 | " 2899.0 \n",
1778 | " 0.0 \n",
1779 | " 0.0 \n",
1780 | " 2022-09-01 \n",
1781 | " RJ \n",
1782 | " 1856.0 \n",
1783 | " \n",
1784 | " \n",
1785 | " 94 \n",
1786 | " 0.0 \n",
1787 | " 0.0 \n",
1788 | " 1856.0 \n",
1789 | " 2022-10-01 \n",
1790 | " RJ \n",
1791 | " 4804.0 \n",
1792 | " \n",
1793 | " \n",
1794 | " 95 \n",
1795 | " 0.0 \n",
1796 | " 1856.0 \n",
1797 | " 4804.0 \n",
1798 | " 2022-11-01 \n",
1799 | " RJ \n",
1800 | " 1764.0 \n",
1801 | " \n",
1802 | " \n",
1803 | "
\n",
1804 | "
96 rows × 6 columns
\n",
1805 | "
"
1806 | ],
1807 | "text/plain": [
1808 | " sales_previous_3_month sales_previous_2_month sales_previous_1_month \\\n",
1809 | "0 4944.0 3435.0 4543.0 \n",
1810 | "1 3435.0 4543.0 3879.0 \n",
1811 | "2 4543.0 3879.0 2601.0 \n",
1812 | "3 3879.0 2601.0 2922.0 \n",
1813 | "4 2601.0 2922.0 4542.0 \n",
1814 | ".. ... ... ... \n",
1815 | "91 4197.0 4141.0 2899.0 \n",
1816 | "92 4141.0 2899.0 0.0 \n",
1817 | "93 2899.0 0.0 0.0 \n",
1818 | "94 0.0 0.0 1856.0 \n",
1819 | "95 0.0 1856.0 4804.0 \n",
1820 | "\n",
1821 | " date city target_sales_next_month \n",
1822 | "0 2020-04-01 FOR 3879.0 \n",
1823 | "1 2020-05-01 FOR 2601.0 \n",
1824 | "2 2020-06-01 FOR 2922.0 \n",
1825 | "3 2020-07-01 FOR 4542.0 \n",
1826 | "4 2020-08-01 FOR 1338.0 \n",
1827 | ".. ... ... ... \n",
1828 | "91 2022-07-01 RJ 0.0 \n",
1829 | "92 2022-08-01 RJ 0.0 \n",
1830 | "93 2022-09-01 RJ 1856.0 \n",
1831 | "94 2022-10-01 RJ 4804.0 \n",
1832 | "95 2022-11-01 RJ 1764.0 \n",
1833 | "\n",
1834 | "[96 rows x 6 columns]"
1835 | ]
1836 | },
1837 | "execution_count": null,
1838 | "metadata": {},
1839 | "output_type": "execute_result"
1840 | }
1841 | ],
1842 | "source": [
1843 | "Xy_df = ts_data_to_features_and_target_pipeline.fit_transform(df)\n",
1844 | "Xy_df"
1845 | ]
1846 | },
1847 | {
1848 | "cell_type": "code",
1849 | "execution_count": null,
1850 | "metadata": {},
1851 | "outputs": [],
1852 | "source": []
1853 | }
1854 | ],
1855 | "metadata": {
1856 | "kernelspec": {
1857 | "display_name": "python3",
1858 | "language": "python",
1859 | "name": "python3"
1860 | }
1861 | },
1862 | "nbformat": 4,
1863 | "nbformat_minor": 4
1864 | }
1865 |
--------------------------------------------------------------------------------
/nbs/nbdev.yml:
--------------------------------------------------------------------------------
1 | project:
2 | output-dir: _docs
3 |
4 | website:
5 | title: "ts2ml"
6 | site-url: "https://joaopcnogueira.github.io/ts2ml"
7 | description: "Tools to Transform a Time Series into Features and Target a.k.a Supervised Learning"
8 | repo-branch: main
9 | repo-url: "https://github.com/joaopcnogueira/ts2ml"
10 |
--------------------------------------------------------------------------------
/nbs/sidebar.yml:
--------------------------------------------------------------------------------
1 | website:
2 | sidebar:
3 | contents:
4 | - index.ipynb
5 | - 00_core.ipynb
6 |
--------------------------------------------------------------------------------
/nbs/styles.css:
--------------------------------------------------------------------------------
1 | .cell {
2 | margin-bottom: 1rem;
3 | }
4 |
5 | .cell > .sourceCode {
6 | margin-bottom: 0;
7 | }
8 |
9 | .cell-output > pre {
10 | margin-bottom: 0;
11 | }
12 |
13 | .cell-output > pre, .cell-output > .sourceCode > pre, .cell-output-stdout > pre {
14 | margin-left: 0.8rem;
15 | margin-top: 0;
16 | background: none;
17 | border-left: 2px solid lightsalmon;
18 | border-top-left-radius: 0;
19 | border-top-right-radius: 0;
20 | }
21 |
22 | .cell-output > .sourceCode {
23 | border: none;
24 | }
25 |
26 | .cell-output > .sourceCode {
27 | background: none;
28 | margin-top: 0;
29 | }
30 |
31 | div.description {
32 | padding-left: 2px;
33 | padding-top: 5px;
34 | font-style: italic;
35 | font-size: 135%;
36 | opacity: 70%;
37 | }
38 |
--------------------------------------------------------------------------------
/settings.ini:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 | repo = ts2ml
3 | lib_name = ts2ml
4 | version = 1.0.1
5 | min_python = 3.7
6 | license = apache2
7 | black_formatting = False
8 | doc_path = _docs
9 | lib_path = ts2ml
10 | nbs_path = nbs
11 | recursive = True
12 | tst_flags = notest
13 | put_version_in_init = True
14 | branch = main
15 | custom_sidebar = False
16 | doc_host = https://joaopcnogueira.github.io
17 | doc_baseurl = /ts2ml
18 | git_url = https://github.com/joaopcnogueira/ts2ml
19 | title = ts2ml
20 | audience = Developers
21 | author = João Nogueira
22 | author_email = joaopcnogueira@gmail.com
23 | copyright = 2023 onwards, João Nogueira
24 | description = Tools to Transform a Time Series into Features and Target a.k.a Supervised Learning
25 | keywords = nbdev jupyter notebook python
26 | language = English
27 | status = 3
28 | user = joaopcnogueira
29 | requirements = fastcore pandas numpy tqdm scikit-learn
30 | dev_requirements = jupyter
31 | readme_nb = index.ipynb
32 | allowed_metadata_keys =
33 | allowed_cell_metadata_keys =
34 | jupyter_hooks = True
35 | clean_ids = True
36 | clear_all = False
37 |
38 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from pkg_resources import parse_version
2 | from configparser import ConfigParser
3 | import setuptools, shlex
4 | assert parse_version(setuptools.__version__)>=parse_version('36.2')
5 |
6 | # note: all settings are in settings.ini; edit there, not here
7 | config = ConfigParser(delimiters=['='])
8 | config.read('settings.ini', encoding='utf-8')
9 | cfg = config['DEFAULT']
10 |
11 | cfg_keys = 'version description keywords author author_email'.split()
12 | expected = cfg_keys + "lib_name user branch license status min_python audience language".split()
13 | for o in expected: assert o in cfg, "missing expected setting: {}".format(o)
14 | setup_cfg = {o:cfg[o] for o in cfg_keys}
15 |
16 | licenses = {
17 | 'apache2': ('Apache Software License 2.0','OSI Approved :: Apache Software License'),
18 | 'mit': ('MIT License', 'OSI Approved :: MIT License'),
19 | 'gpl2': ('GNU General Public License v2', 'OSI Approved :: GNU General Public License v2 (GPLv2)'),
20 | 'gpl3': ('GNU General Public License v3', 'OSI Approved :: GNU General Public License v3 (GPLv3)'),
21 | 'bsd3': ('BSD License', 'OSI Approved :: BSD License'),
22 | }
23 | statuses = [ '1 - Planning', '2 - Pre-Alpha', '3 - Alpha',
24 | '4 - Beta', '5 - Production/Stable', '6 - Mature', '7 - Inactive' ]
25 | py_versions = '3.6 3.7 3.8 3.9 3.10'.split()
26 |
27 | requirements = shlex.split(cfg.get('requirements', ''))
28 | if cfg.get('pip_requirements'): requirements += shlex.split(cfg.get('pip_requirements', ''))
29 | min_python = cfg['min_python']
30 | lic = licenses.get(cfg['license'].lower(), (cfg['license'], None))
31 | dev_requirements = (cfg.get('dev_requirements') or '').split()
32 |
33 | setuptools.setup(
34 | name = cfg['lib_name'],
35 | license = lic[0],
36 | classifiers = [
37 | 'Development Status :: ' + statuses[int(cfg['status'])],
38 | 'Intended Audience :: ' + cfg['audience'].title(),
39 | 'Natural Language :: ' + cfg['language'].title(),
40 | ] + ['Programming Language :: Python :: '+o for o in py_versions[py_versions.index(min_python):]] + (['License :: ' + lic[1] ] if lic[1] else []),
41 | url = cfg['git_url'],
42 | packages = setuptools.find_packages(),
43 | include_package_data = True,
44 | install_requires = requirements,
45 | extras_require={ 'dev': dev_requirements },
46 | dependency_links = cfg.get('dep_links','').split(),
47 | python_requires = '>=' + cfg['min_python'],
48 | long_description = open('README.md', encoding='utf-8').read(),
49 | long_description_content_type = 'text/markdown',
50 | zip_safe = False,
51 | entry_points = {
52 | 'console_scripts': cfg.get('console_scripts','').split(),
53 | 'nbdev': [f'{cfg.get("lib_path")}={cfg.get("lib_path")}._modidx:d']
54 | },
55 | **setup_cfg)
56 |
57 |
58 |
--------------------------------------------------------------------------------
/ts2ml/__init__.py:
--------------------------------------------------------------------------------
1 | __version__ = "1.0.1"
2 |
--------------------------------------------------------------------------------
/ts2ml/_modidx.py:
--------------------------------------------------------------------------------
1 | # Autogenerated by nbdev
2 |
3 | d = { 'settings': { 'branch': 'main',
4 | 'doc_baseurl': '/ts2ml',
5 | 'doc_host': 'https://joaopcnogueira.github.io',
6 | 'git_url': 'https://github.com/joaopcnogueira/ts2ml',
7 | 'lib_path': 'ts2ml'},
8 | 'syms': { 'ts2ml.core': { 'ts2ml.core.add_missing_slots': ('core.html#add_missing_slots', 'ts2ml/core.py'),
9 | 'ts2ml.core.get_cutoff_indices_features_and_target': ( 'core.html#get_cutoff_indices_features_and_target',
10 | 'ts2ml/core.py'),
11 | 'ts2ml.core.transform_ts_data_into_features_and_target': ( 'core.html#transform_ts_data_into_features_and_target',
12 | 'ts2ml/core.py')}}}
13 |
--------------------------------------------------------------------------------
/ts2ml/core.py:
--------------------------------------------------------------------------------
1 | # AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/00_core.ipynb.
2 |
3 | # %% auto 0
4 | __all__ = ['add_missing_slots', 'get_cutoff_indices_features_and_target', 'transform_ts_data_into_features_and_target']
5 |
6 | # %% ../nbs/00_core.ipynb 3
7 | import numpy as np
8 | import pandas as pd
9 | from tqdm import tqdm
10 | from typing import List
11 |
12 | # %% ../nbs/00_core.ipynb 4
13 | def add_missing_slots(
14 | df: pd.DataFrame, # input dataframe with datetime, entity and value columns - time series format
15 | datetime_col: str, # name of the datetime column
16 | entity_col: str, # name of the entity column. If a time series is associated to a location, this column will be 'location_id'
17 | value_col: str, # name of the value column
18 | freq: str='H', # frequency of the time series. Default is hourly
19 | fill_value: int = 0 # value to use to fill missing slots
20 | ) -> pd.DataFrame:
21 | """
22 | Add missing slots to a time series dataframe.
23 | This function is useful to fill missing slots in a time series dataframe.
24 | For example, if a time series is associated to a location, this function will add missing slots for each location.
25 | Missing slots are filled with the value specified in the 'fill_value' parameter.
26 | By default, the frequency of the time series is hourly.
27 | """
28 |
29 | entity_ids = df[entity_col].unique()
30 | all_hours = pd.date_range(start=df[datetime_col].min(), end=df[datetime_col].max(), freq=freq)
31 |
32 | output = pd.DataFrame()
33 |
34 | for entity_id in tqdm(entity_ids):
35 |
36 | # keep only rides for this 'location_id'
37 | df_entity_id = df.loc[df[entity_col] == entity_id, [datetime_col, value_col]]
38 |
39 | # quick way to add missing dates with 0 in a Series
40 | # taken from https://stackoverflow.com/a/19324591
41 | df_entity_id.set_index(datetime_col, inplace=True)
42 | df_entity_id.index = pd.DatetimeIndex(df_entity_id.index)
43 | df_entity_id = df_entity_id.reindex(all_hours, fill_value=0)
44 |
45 | # add back 'location_id' column
46 | df_entity_id[entity_col] = entity_id
47 |
48 | output = pd.concat([output, df_entity_id])
49 |
50 | # move the purchase_day from index to column
51 | output = output.reset_index().rename(columns={'index': datetime_col})
52 | output = output[[datetime_col, entity_col, value_col]].copy()
53 |
54 | return output
55 |
56 | # %% ../nbs/00_core.ipynb 7
57 | def get_cutoff_indices_features_and_target(
58 | ts_data: pd.DataFrame, # Time Series DataFrame
59 | datetime_col: str, # Name of the datetime column
60 | n_features: int, # Number of features to use for the prediction
61 | n_targets: int=1, # Number of target values to predict
62 | step_size: int=1, # Step size to use to slide the Time Series DataFrame
63 | ) -> List[tuple]:
64 | """Function to get the indices for the cutoffs of a Time Series DataFrame.
65 | The Time Series DataFrame should be orderded by time."""
66 |
67 | # check if it is ordered
68 | if not ts_data[datetime_col].is_monotonic_increasing:
69 | raise ValueError("The DataFrame should be ordered by time.")
70 |
71 | # check if the index is ordered
72 | if not ts_data.index.is_monotonic_increasing:
73 | raise ValueError("The index should be ordered by time.")
74 |
75 | # check if the step_size is valid
76 | if step_size < 1:
77 | raise ValueError("The step_size should be >= 1.")
78 |
79 | # check if the number of features is valid
80 | if n_features < 1:
81 | raise ValueError("The number of features should be >= 1.")
82 |
83 | # check if the number of target values is valid
84 | if n_targets < 1:
85 | raise ValueError("The number of target values should be >= 1.")
86 |
87 | # check if the number of features and target values is valid
88 | if n_features + n_targets > len(ts_data):
89 | raise ValueError("The number of features + target values should be <= the length of the time series.")
90 |
91 | # below we compute the indices for the cutoffs
92 | stop_position = len(ts_data) - 1
93 |
94 | # Start the first sub-sequence at index position 0
95 | subseq_first_idx = 0
96 | subseq_mid_idx = n_features
97 | subseq_last_idx = n_features + n_targets
98 | indices = []
99 |
100 | while subseq_last_idx <= stop_position:
101 | indices.append((subseq_first_idx, subseq_mid_idx, subseq_last_idx))
102 |
103 | subseq_first_idx += step_size
104 | subseq_mid_idx += step_size
105 | subseq_last_idx += step_size
106 |
107 | return indices
108 |
109 | # %% ../nbs/00_core.ipynb 11
110 | def transform_ts_data_into_features_and_target(
111 | ts_data: pd.DataFrame, # Time Series DataFrame
112 | n_features: int, # Number of features to use for the prediction
113 | datetime_col: str, # Name of the datetime column
114 | entity_col: str, # Name of the entity column, e.g. location_id
115 | value_col: str, # Name of the value column
116 | n_targets: int=1, # Number of target values to predict
117 | step_size: int=1, # Step size to use to slide the Time Series DataFrame
118 | step_name: str=None, # Name of the step column
119 | concat_Xy: bool=False # Whether to concat X and y on the same dataframe or not
120 | ) -> pd.DataFrame:
121 | """
122 | Slices and transposes data from time-series format into a (features, target)
123 | format that we can use to train Supervised ML models.
124 | """
125 |
126 | entity_ids = ts_data[entity_col].unique()
127 | features = pd.DataFrame()
128 | targets = pd.DataFrame()
129 |
130 | for entity_id in tqdm(entity_ids):
131 |
132 | # keep only ts data for this `location_id`
133 | ts_data_one_location = ts_data.loc[
134 | ts_data[entity_col] == entity_id,
135 | [datetime_col, value_col]
136 | ]
137 |
138 | # pre-compute cutoff indices to split dataframe rows
139 | indices = get_cutoff_indices_features_and_target(
140 | ts_data=ts_data_one_location,
141 | datetime_col=datetime_col,
142 | n_features=n_features,
143 | n_targets=n_targets,
144 | step_size=step_size
145 | )
146 |
147 | # slice and transpose data into numpy arrays for features and targets
148 | time_values = []
149 | n_examples = len(indices)
150 | x = np.ndarray(shape=(n_examples, n_features), dtype=np.float32)
151 | if n_targets == 1:
152 | y = np.ndarray(shape=(n_examples), dtype=np.float32)
153 | for i, idx in enumerate(indices):
154 | x[i, :] = ts_data_one_location.iloc[idx[0]:idx[1]][value_col].values
155 | y[i] = ts_data_one_location.iloc[idx[1]:idx[2]][value_col].values
156 | time_values.append(ts_data_one_location.iloc[idx[1]][datetime_col])
157 | else:
158 | y = np.ndarray(shape=(n_examples, n_targets), dtype=np.float32)
159 | for i, idx in enumerate(indices):
160 | x[i, :] = ts_data_one_location.iloc[idx[0]:idx[1]][value_col].values
161 | y[i, :] = ts_data_one_location.iloc[idx[1]:idx[2]][value_col].values
162 | time_values.append(ts_data_one_location.iloc[idx[1]][datetime_col])
163 |
164 | # numpy -> pandas
165 | if step_name is None:
166 | features_one_location = pd.DataFrame(
167 | x,
168 | columns=[f'{value_col}_previous_{i+1}' for i in reversed(range(n_features))]
169 | )
170 | features_one_location[datetime_col] = time_values
171 | features_one_location[entity_col] = entity_id
172 |
173 | # numpy -> pandas
174 | if n_targets == 1:
175 | targets_one_location = pd.DataFrame(y, columns=[f'target_{value_col}_next'])
176 | else:
177 | targets_one_location = pd.DataFrame(
178 | y,
179 | columns=[f'target_{value_col}_next_{i+1}' for i in range(n_targets)]
180 | )
181 | else:
182 | features_one_location = pd.DataFrame(
183 | x,
184 | columns=[f'{value_col}_previous_{i+1}_{step_name}' for i in reversed(range(n_features))]
185 | )
186 | features_one_location[datetime_col] = time_values
187 | features_one_location[entity_col] = entity_id
188 |
189 | # numpy -> pandas
190 | if n_targets == 1:
191 | targets_one_location = pd.DataFrame(y, columns=[f'target_{value_col}_next_{step_name}'])
192 | else:
193 | targets_one_location = pd.DataFrame(
194 | y,
195 | columns=[f'target_{value_col}_next_{i+1}_{step_name}' for i in range(n_targets)]
196 | )
197 |
198 | # concatenate results
199 | features = pd.concat([features, features_one_location])
200 | targets = pd.concat([targets, targets_one_location])
201 |
202 | features.reset_index(inplace=True, drop=True)
203 | targets.reset_index(inplace=True, drop=True)
204 |
205 | if concat_Xy:
206 | return pd.concat([features, targets], axis=1)
207 | else:
208 | return features, targets
209 |
--------------------------------------------------------------------------------