├── .gitattributes
├── 9781484256169.jpg
├── Chapter 1
└── ch01_correlating_inline_views.sql
├── Chapter 10
└── ch10_creating_delimited_text.sql
├── Chapter 11
└── ch11_analytic_partitions_ordering_and_windows.sql
├── Chapter 12
└── ch12_answering_top_n_questions.sql
├── Chapter 13
└── ch13_ordered_subsets_with_rolling_sums.sql
├── Chapter 14
└── ch14_analyzing_activity_logs_with_lead.sql
├── Chapter 15
└── ch15_forecasting_with_linear_regression.sql
├── Chapter 16
└── ch16_rolling_sums_to_forecast_reaching_minimums.sql
├── Chapter 17
└── ch17_up_and_down_patterns.sql
├── Chapter 18
└── ch18_grouping_data_with_patterns.sql
├── Chapter 19
└── ch19_merging_date_ranges.sql
├── Chapter 2
└── ch02_pitfalls_of_set_operations.sql
├── Chapter 20
└── ch20_finding_abnormal_peaks.sql
├── Chapter 21
└── ch21_bin_fitting.sql
├── Chapter 22
└── ch22_counting_children_in_trees.sql
├── Chapter 3
└── ch03_divide_and_conquer_with_subquery_factoring.sql
├── Chapter 4
└── ch04_tree_calculations_with_recursion.sql
├── Chapter 5
└── ch05_functions_defined_within_sql.sql
├── Chapter 6
└── ch06_iterative_calculations_with_multidimensional_data.sql
├── Chapter 7
└── ch07_unpivoting_columns_to_rows.sql
├── Chapter 8
└── ch08_pivoting_rows_to_columns.sql
├── Chapter 9
└── ch09_splitting_delimited_text.sql
├── Contributing.md
├── LICENSE.txt
├── README.md
├── practical_clean_schema.sql
├── practical_create_schema.sql
├── practical_drop_schema.sql
├── practical_fill_schema.sql
└── practical_readme.txt
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Auto detect text files and perform LF normalization
2 | * text=auto
3 |
--------------------------------------------------------------------------------
/9781484256169.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Apress/practical-oracle-sql/5bbc0d14e9d0c165156e194bb171c2a2d5c5729f/9781484256169.jpg
--------------------------------------------------------------------------------
/Chapter 1/ch01_correlating_inline_views.sql:
--------------------------------------------------------------------------------
1 | /* ***************************************************** **
2 | ch01_correlating_inline_views.sql
3 |
4 | Companion script for Practical Oracle SQL, Apress 2020
5 | by Kim Berg Hansen, https://www.kibeha.dk
6 | Use at your own risk
7 | *****************************************************
8 |
9 | Chapter 1
10 | Correlating Inline Views
11 |
12 | To be executed in schema PRACTICAL
13 | ** ***************************************************** */
14 |
15 | /* -----------------------------------------------------
16 | sqlcl formatting setup
17 | ----------------------------------------------------- */
18 |
19 | set pagesize 80
20 | set linesize 80
21 | set sqlformat ansiconsole
22 |
23 | /* -----------------------------------------------------
24 | Chapter 1 example code
25 | ----------------------------------------------------- */
26 |
27 | -- Listing 1-1. The yearly sales of the 3 beers from Balthazar Brauerei
28 |
29 | select
30 | bp.brewery_name
31 | , bp.product_id as p_id
32 | , bp.product_name
33 | , ys.yr
34 | , ys.yr_qty
35 | from brewery_products bp
36 | join yearly_sales ys
37 | on ys.product_id = bp.product_id
38 | where bp.brewery_id = 518
39 | order by bp.product_id, ys.yr;
40 |
41 | -- Listing 1-2. Retrieving two columns from the best-selling year per beer
42 |
43 | select
44 | bp.brewery_name
45 | , bp.product_id as p_id
46 | , bp.product_name
47 | , (
48 | select ys.yr
49 | from yearly_sales ys
50 | where ys.product_id = bp.product_id
51 | order by ys.yr_qty desc
52 | fetch first row only
53 | ) as yr
54 | , (
55 | select ys.yr_qty
56 | from yearly_sales ys
57 | where ys.product_id = bp.product_id
58 | order by ys.yr_qty desc
59 | fetch first row only
60 | ) as yr_qty
61 | from brewery_products bp
62 | where bp.brewery_id = 518
63 | order by bp.product_id;
64 |
65 | -- Listing 1-3. Using just a single scalar subquery and value concatenation
66 |
67 | select
68 | brewery_name
69 | , product_id as p_id
70 | , product_name
71 | , to_number(
72 | substr(yr_qty_str, 1, instr(yr_qty_str, ';') - 1)
73 | ) as yr
74 | , to_number(
75 | substr(yr_qty_str, instr(yr_qty_str, ';') + 1)
76 | ) as yr_qty
77 | from (
78 | select
79 | bp.brewery_name
80 | , bp.product_id
81 | , bp.product_name
82 | , (
83 | select ys.yr || ';' || ys.yr_qty
84 | from yearly_sales ys
85 | where ys.product_id = bp.product_id
86 | order by ys.yr_qty desc
87 | fetch first row only
88 | ) as yr_qty_str
89 | from brewery_products bp
90 | where bp.brewery_id = 518
91 | )
92 | order by product_id;
93 |
94 | -- Listing 1-4. Using analytic function to be able to retrieve all columns if desired
95 |
96 | select
97 | brewery_name
98 | , product_id as p_id
99 | , product_name
100 | , yr
101 | , yr_qty
102 | from (
103 | select
104 | bp.brewery_name
105 | , bp.product_id
106 | , bp.product_name
107 | , ys.yr
108 | , ys.yr_qty
109 | , row_number() over (
110 | partition by bp.product_id
111 | order by ys.yr_qty desc
112 | ) as rn
113 | from brewery_products bp
114 | join yearly_sales ys
115 | on ys.product_id = bp.product_id
116 | where bp.brewery_id = 518
117 | )
118 | where rn = 1
119 | order by product_id;
120 |
121 | -- Listing 1-5. Achieving the same with a lateral inline view
122 |
123 | select
124 | bp.brewery_name
125 | , bp.product_id as p_id
126 | , bp.product_name
127 | , top_ys.yr
128 | , top_ys.yr_qty
129 | from brewery_products bp
130 | cross join lateral(
131 | select
132 | ys.yr
133 | , ys.yr_qty
134 | from yearly_sales ys
135 | where ys.product_id = bp.product_id
136 | order by ys.yr_qty desc
137 | fetch first row only
138 | ) top_ys
139 | where bp.brewery_id = 518
140 | order by bp.product_id;
141 |
142 | -- Traditional style from clause without ANSI style cross join
143 |
144 | select
145 | bp.brewery_name
146 | , bp.product_id as p_id
147 | , bp.product_name
148 | , top_ys.yr
149 | , top_ys.yr_qty
150 | from brewery_products bp
151 | , lateral(
152 | select
153 | ys.yr
154 | , ys.yr_qty
155 | from yearly_sales ys
156 | where ys.product_id = bp.product_id
157 | order by ys.yr_qty desc
158 | fetch first row only
159 | ) top_ys
160 | where bp.brewery_id = 518
161 | order by bp.product_id;
162 |
163 | -- Combining both lateral and join predicates in the on clause
164 |
165 | select
166 | bp.brewery_name
167 | , bp.product_id as p_id
168 | , bp.product_name
169 | , top_ys.yr
170 | , top_ys.yr_qty
171 | from brewery_products bp
172 | join lateral(
173 | select
174 | ys.yr
175 | , ys.yr_qty
176 | from yearly_sales ys
177 | where ys.product_id = bp.product_id
178 | order by ys.yr_qty desc
179 | fetch first row only
180 | ) top_ys
181 | on 1=1
182 | where bp.brewery_id = 518
183 | order by bp.product_id;
184 |
185 | -- Listing 1-6. The alternative syntax cross apply
186 |
187 | select
188 | bp.brewery_name
189 | , bp.product_id as p_id
190 | , bp.product_name
191 | , top_ys.yr
192 | , top_ys.yr_qty
193 | from brewery_products bp
194 | cross apply(
195 | select
196 | ys.yr
197 | , ys.yr_qty
198 | from yearly_sales ys
199 | where ys.product_id = bp.product_id
200 | order by ys.yr_qty desc
201 | fetch first row only
202 | ) top_ys
203 | where bp.brewery_id = 518
204 | order by bp.product_id;
205 |
206 | -- Listing 1-7. Using outer apply when you need outer join functionality
207 |
208 | select
209 | bp.brewery_name
210 | , bp.product_id as p_id
211 | , bp.product_name
212 | , top_ys.yr
213 | , top_ys.yr_qty
214 | from brewery_products bp
215 | outer apply(
216 | select
217 | ys.yr
218 | , ys.yr_qty
219 | from yearly_sales ys
220 | where ys.product_id = bp.product_id
221 | and ys.yr_qty < 400
222 | order by ys.yr_qty desc
223 | fetch first row only
224 | ) top_ys
225 | where bp.brewery_id = 518
226 | order by bp.product_id;
227 |
228 | -- Listing 1-8. Outer join with the lateral keyword
229 |
230 | select
231 | bp.brewery_name
232 | , bp.product_id as p_id
233 | , bp.product_name
234 | , top_ys.yr
235 | , top_ys.yr_qty
236 | from brewery_products bp
237 | left outer join lateral(
238 | select
239 | ys.yr
240 | , ys.yr_qty
241 | from yearly_sales ys
242 | where ys.product_id = bp.product_id
243 | order by ys.yr_qty desc
244 | fetch first row only
245 | ) top_ys
246 | on top_ys.yr_qty < 500
247 | where bp.brewery_id = 518
248 | order by bp.product_id;
249 |
250 | /* ***************************************************** */
251 |
--------------------------------------------------------------------------------
/Chapter 10/ch10_creating_delimited_text.sql:
--------------------------------------------------------------------------------
1 | /* ***************************************************** **
2 | ch10_creating_delimited_text.sql
3 |
4 | Companion script for Practical Oracle SQL, Apress 2020
5 | by Kim Berg Hansen, https://www.kibeha.dk
6 | Use at your own risk
7 | *****************************************************
8 |
9 | Chapter 10
10 | Creating Delimited Text
11 |
12 | To be executed in schema PRACTICAL
13 | ** ***************************************************** */
14 |
15 | /* -----------------------------------------------------
16 | sqlcl formatting setup
17 | ----------------------------------------------------- */
18 |
19 | set pagesize 80
20 | set linesize 80
21 | set sqlformat ansiconsole
22 | set long 8000
23 |
24 | /* -----------------------------------------------------
25 | Chapter 10 example code
26 | ----------------------------------------------------- */
27 |
28 | -- Listing 10-2. The breweries and products
29 |
30 | select *
31 | from brewery_products
32 | order by brewery_id, product_id;
33 |
34 | -- Listing 10-3. Using listagg to create product list
35 |
36 | select
37 | max(brewery_name) as brewery_name
38 | , listagg(product_name, ',') within group (
39 | order by product_id
40 | ) as product_list
41 | from brewery_products
42 | group by brewery_id
43 | order by brewery_id;
44 |
45 | -- Changing order of the list
46 |
47 | select
48 | max(brewery_name) as brewery_name
49 | , listagg(product_name, ',') within group (
50 | order by product_name
51 | ) as product_list
52 | from brewery_products
53 | group by brewery_id
54 | order by brewery_id;
55 |
56 | -- Listing 10-5. Using collect and the created function
57 |
58 | select
59 | max(brewery_name) as brewery_name
60 | , name_coll_type_to_varchar2(
61 | cast(
62 | collect(
63 | product_name
64 | order by product_id
65 | )
66 | as name_coll_type
67 | )
68 | , ','
69 | ) as product_list
70 | from brewery_products
71 | group by brewery_id
72 | order by brewery_id;
73 |
74 | -- Alternative using generic apex_t_varchar2 and apex_string.join
75 |
76 | select
77 | max(brewery_name) as brewery_name
78 | , apex_string.join(
79 | cast(
80 | collect(
81 | product_name
82 | order by product_id
83 | )
84 | as apex_t_varchar2
85 | )
86 | , ','
87 | ) as product_list
88 | from brewery_products
89 | group by brewery_id
90 | order by brewery_id;
91 |
92 | -- Listing 10-7. Using stragg custom aggregate function
93 |
94 | select
95 | max(brewery_name) as brewery_name
96 | , stragg(
97 | stragg_expr_type(product_name, ',')
98 | ) as product_list
99 | from brewery_products
100 | group by brewery_id
101 | order by brewery_id;
102 |
103 | -- Using unnecessary distinct can get lucky and get ordered result
104 |
105 | select
106 | max(brewery_name) as brewery_name
107 | , stragg(
108 | distinct stragg_expr_type(product_name, ',')
109 | ) as product_list
110 | from brewery_products
111 | group by brewery_id
112 | order by brewery_id;
113 |
114 | -- Listing 10-8. Using xmlagg and extract text from xml
115 |
116 | select
117 | max(brewery_name) as brewery_name
118 | , rtrim(
119 | xmlagg(
120 | xmlelement(z, product_name, ',')
121 | order by product_id
122 | ).extract('//text()').getstringval()
123 | , ','
124 | ) as product_list
125 | from brewery_products
126 | group by brewery_id
127 | order by brewery_id;
128 |
129 | -- Skip extraction by claiming content is xml
130 |
131 | select
132 | max(brewery_name) as brewery_name
133 | , rtrim(
134 | xmlagg(
135 | xmlparse(content product_name || ',' wellformed)
136 | order by product_id
137 | ).getstringval()
138 | , ','
139 | ) as product_list
140 | from brewery_products
141 | group by brewery_id
142 | order by brewery_id;
143 |
144 | -- Listing 10-9. Getting ORA-01489 with listagg
145 |
146 | select
147 | listagg(rpad(p.name, 20)) within group (
148 | order by p.id
149 | ) as product_list
150 | from products p
151 | join monthly_sales ms
152 | on ms.product_id = p.id;
153 |
154 | -- Listing 10-10. Suppressing error in listagg
155 |
156 | select
157 | listagg(
158 | rpad(p.name, 20)
159 | on overflow truncate '{more}' with count
160 | ) within group (
161 | order by p.id
162 | ) as product_list
163 | from products p
164 | join monthly_sales ms
165 | on ms.product_id = p.id;
166 |
167 | -- Listing 10-11. Reducing data with distinct
168 |
169 | select
170 | listagg(distinct rpad(p.name, 20)) within group (
171 | order by p.id
172 | ) as product_list
173 | from products p
174 | join monthly_sales ms
175 | on ms.product_id = p.id;
176 |
177 | -- Bonus: Un-ordered pre-19c version using stragg
178 |
179 | select
180 | stragg(
181 | distinct stragg_expr_type(rpad(p.name, 20), null)
182 | ) as product_list
183 | from products p
184 | join monthly_sales ms
185 | on ms.product_id = p.id;
186 |
187 | -- Listing 10-12. Using xmlagg to aggregate to a clob
188 |
189 | select
190 | xmlagg(
191 | xmlparse(
192 | content rpad(p.name, 20) wellformed
193 | )
194 | order by p.id
195 | ).getclobval() as product_list
196 | from products p
197 | join monthly_sales ms
198 | on ms.product_id = p.id;
199 |
200 | -- Listing 10-13. Using json_arrayagg to aggregate to a clob
201 |
202 | select
203 | json_value(
204 | replace(
205 | json_arrayagg(
206 | rpad(p.name, 20)
207 | order by p.id
208 | returning clob
209 | )
210 | , '","'
211 | , ''
212 | )
213 | , '$[0]' returning clob
214 | ) as product_list
215 | from products p
216 | join monthly_sales ms
217 | on ms.product_id = p.id;
218 |
219 | -- Listing 10-14. Using apex_string.join_clob to aggregate to a clob
220 |
221 | select
222 | apex_string.join_clob(
223 | cast(
224 | collect(
225 | rpad(p.name, 20)
226 | order by p.id
227 | )
228 | as apex_t_varchar2
229 | )
230 | , ''
231 | , 12 /* dbms_lob.call */
232 | ) as product_list
233 | from products p
234 | join monthly_sales ms
235 | on ms.product_id = p.id;
236 |
237 | /* ***************************************************** */
238 |
--------------------------------------------------------------------------------
/Chapter 11/ch11_analytic_partitions_ordering_and_windows.sql:
--------------------------------------------------------------------------------
1 | /* ***************************************************** **
2 | ch11_analytic_partitions_ordering_and_windows.sql
3 |
4 | Companion script for Practical Oracle SQL, Apress 2020
5 | by Kim Berg Hansen, https://www.kibeha.dk
6 | Use at your own risk
7 | *****************************************************
8 |
9 | Chapter 11
10 | Analytic Partitions, Ordering and Windows
11 |
12 | To be executed in schema PRACTICAL
13 | ** ***************************************************** */
14 |
15 | /* -----------------------------------------------------
16 | sqlcl formatting setup
17 | ----------------------------------------------------- */
18 |
19 | set pagesize 80
20 | set linesize 80
21 | set sqlformat ansiconsole
22 |
23 | /* -----------------------------------------------------
24 | Chapter 11 example code
25 | ----------------------------------------------------- */
26 |
27 | -- Listing 11-1. Content of orderlines table for two beers
28 |
29 | select
30 | ol.product_id as p_id
31 | , p.name as product_name
32 | , ol.order_id as o_id
33 | , ol.qty
34 | from orderlines ol
35 | join products p
36 | on p.id = ol.product_id
37 | where ol.product_id in (4280, 6600)
38 | order by ol.product_id, ol.qty;
39 |
40 | -- Listing 11-2. The simplest analytic function call is a grand total
41 |
42 | select
43 | ol.product_id as p_id
44 | , p.name as product_name
45 | , ol.order_id as o_id
46 | , ol.qty
47 | , sum(ol.qty) over () as t_qty
48 | from orderlines ol
49 | join products p
50 | on p.id = ol.product_id
51 | where ol.product_id in (4280, 6600)
52 | order by ol.product_id, ol.qty;
53 |
54 | -- Listing 11-3. Creating subtotals by product with partitioning
55 |
56 | select
57 | ol.product_id as p_id
58 | , p.name as product_name
59 | , ol.order_id as o_id
60 | , ol.qty
61 | , sum(ol.qty) over (
62 | partition by ol.product_id
63 | ) as p_qty
64 | from orderlines ol
65 | join products p
66 | on p.id = ol.product_id
67 | where ol.product_id in (4280, 6600)
68 | order by ol.product_id, ol.qty;
69 |
70 | -- Listing 11-4. Creating a running sum with ordering and windowing
71 |
72 | select
73 | ol.product_id as p_id
74 | , p.name as product_name
75 | , ol.order_id as o_id
76 | , ol.qty
77 | , sum(ol.qty) over (
78 | order by ol.qty
79 | rows between unbounded preceding
80 | and current row
81 | ) as r_qty
82 | from orderlines ol
83 | join products p
84 | on p.id = ol.product_id
85 | where ol.product_id in (4280, 6600)
86 | order by ol.qty;
87 |
88 | -- Ordering of analytic function and query does not need to be identical
89 |
90 | select
91 | ol.product_id as p_id
92 | , p.name as product_name
93 | , ol.order_id as o_id
94 | , ol.qty
95 | , sum(ol.qty) over (
96 | order by ol.qty
97 | rows between unbounded preceding
98 | and current row
99 | ) as r_qty
100 | from orderlines ol
101 | join products p
102 | on p.id = ol.product_id
103 | where ol.product_id in (4280, 6600)
104 | order by ol.product_id, ol.qty;
105 |
106 | -- Listing 11-5. Combining partitioning, ordering and windowing
107 |
108 | select
109 | ol.product_id as p_id
110 | , p.name as product_name
111 | , ol.order_id as o_id
112 | , ol.qty
113 | , sum(ol.qty) over (
114 | partition by ol.product_id
115 | order by ol.qty
116 | rows between unbounded preceding
117 | and current row
118 | ) as p_qty
119 | from orderlines ol
120 | join products p
121 | on p.id = ol.product_id
122 | where ol.product_id in (4280, 6600)
123 | order by ol.product_id, ol.qty;
124 |
125 | -- Listing 11-6. Window with all previous rows
126 |
127 | select
128 | ol.product_id as p_id
129 | , p.name as product_name
130 | , ol.order_id as o_id
131 | , ol.qty
132 | , sum(ol.qty) over (
133 | partition by ol.product_id
134 | order by ol.qty
135 | rows between unbounded preceding
136 | and 1 preceding
137 | ) as p_qty
138 | from orderlines ol
139 | join products p
140 | on p.id = ol.product_id
141 | where ol.product_id in (4280, 6600)
142 | order by ol.product_id, ol.qty;
143 |
144 | -- Window with reversed running sum
145 |
146 | select
147 | ol.product_id as p_id
148 | , p.name as product_name
149 | , ol.order_id as o_id
150 | , ol.qty
151 | , sum(ol.qty) over (
152 | partition by ol.product_id
153 | order by ol.qty
154 | rows between current row
155 | and unbounded following
156 | ) as p_qty
157 | from orderlines ol
158 | join products p
159 | on p.id = ol.product_id
160 | where ol.product_id in (4280, 6600)
161 | order by ol.product_id, ol.qty;
162 |
163 | -- Window of all rows yet to come
164 |
165 | select
166 | ol.product_id as p_id
167 | , p.name as product_name
168 | , ol.order_id as o_id
169 | , ol.qty
170 | , sum(ol.qty) over (
171 | partition by ol.product_id
172 | order by ol.qty
173 | rows between 1 following
174 | and unbounded following
175 | ) as p_qty
176 | from orderlines ol
177 | join products p
178 | on p.id = ol.product_id
179 | where ol.product_id in (4280, 6600)
180 | order by ol.product_id, ol.qty;
181 |
182 | -- Window bounded in both ends
183 |
184 | select
185 | ol.product_id as p_id
186 | , p.name as product_name
187 | , ol.order_id as o_id
188 | , ol.qty
189 | , sum(ol.qty) over (
190 | partition by ol.product_id
191 | order by ol.qty
192 | rows between 1 preceding
193 | and 1 following
194 | ) as p_qty
195 | from orderlines ol
196 | join products p
197 | on p.id = ol.product_id
198 | where ol.product_id in (4280, 6600)
199 | order by ol.product_id, ol.qty;
200 |
201 | -- Window unbounded in both ends
202 |
203 | select
204 | ol.product_id as p_id
205 | , p.name as product_name
206 | , ol.order_id as o_id
207 | , ol.qty
208 | , sum(ol.qty) over (
209 | partition by ol.product_id
210 | order by ol.qty
211 | rows between unbounded preceding
212 | and unbounded following
213 | ) as p_qty
214 | from orderlines ol
215 | join products p
216 | on p.id = ol.product_id
217 | where ol.product_id in (4280, 6600)
218 | order by ol.product_id, ol.qty;
219 |
220 | -- Listing 11-7. Range window based on qty value
221 |
222 | select
223 | ol.product_id as p_id
224 | , p.name as product_name
225 | , ol.order_id as o_id
226 | , ol.qty
227 | , sum(ol.qty) over (
228 | partition by ol.product_id
229 | order by ol.qty
230 | range between 20 preceding
231 | and 20 following
232 | ) as p_qty
233 | from orderlines ol
234 | join products p
235 | on p.id = ol.product_id
236 | where ol.product_id in (4280, 6600)
237 | order by ol.product_id, ol.qty;
238 |
239 | -- Window does not have to include current row value
240 |
241 | select
242 | ol.product_id as p_id
243 | , p.name as product_name
244 | , ol.order_id as o_id
245 | , ol.qty
246 | , sum(ol.qty) over (
247 | partition by ol.product_id
248 | order by ol.qty
249 | range between 5 following
250 | and 25 following
251 | ) as p_qty
252 | from orderlines ol
253 | join products p
254 | on p.id = ol.product_id
255 | where ol.product_id in (4280, 6600)
256 | order by ol.product_id, ol.qty;
257 |
258 | -- Running sum in range window includes following rows in case of ties
259 |
260 | select
261 | ol.product_id as p_id
262 | , p.name as product_name
263 | , ol.order_id as o_id
264 | , ol.qty
265 | , sum(ol.qty) over (
266 | partition by ol.product_id
267 | order by ol.qty
268 | range between unbounded preceding
269 | and current row
270 | ) as p_qty
271 | from orderlines ol
272 | join products p
273 | on p.id = ol.product_id
274 | where ol.product_id in (4280, 6600)
275 | order by ol.product_id, ol.qty;
276 |
277 | -- Listing 11-8. Comparing running sum with default, range and rows window
278 |
279 | select
280 | ol.product_id as p_id
281 | , p.name as product_name
282 | , ol.order_id as o_id
283 | , ol.qty
284 | , sum(ol.qty) over (
285 | partition by ol.product_id
286 | order by ol.qty
287 | /* no window - rely on default */
288 | ) as def_q
289 | , sum(ol.qty) over (
290 | partition by ol.product_id
291 | order by ol.qty
292 | range between unbounded preceding
293 | and current row
294 | ) as range_q
295 | , sum(ol.qty) over (
296 | partition by ol.product_id
297 | order by ol.qty
298 | rows between unbounded preceding
299 | and current row
300 | ) as rows_q
301 | from orderlines ol
302 | join products p
303 | on p.id = ol.product_id
304 | where ol.product_id in (4280, 6600)
305 | order by ol.product_id, ol.qty;
306 |
307 | -- Listing 11-9. A best practice for a running sum
308 |
309 | select
310 | ol.product_id as p_id
311 | , p.name as product_name
312 | , ol.order_id as o_id
313 | , ol.qty
314 | , sum(ol.qty) over (
315 | partition by ol.product_id
316 | order by ol.qty, ol.order_id
317 | rows between unbounded preceding
318 | and current row
319 | ) as p_qty
320 | from orderlines ol
321 | join products p
322 | on p.id = ol.product_id
323 | where ol.product_id in (4280, 6600)
324 | order by ol.product_id, ol.qty, ol.order_id;
325 |
326 | /* ***************************************************** */
327 |
--------------------------------------------------------------------------------
/Chapter 12/ch12_answering_top_n_questions.sql:
--------------------------------------------------------------------------------
1 | /* ***************************************************** **
2 | ch12_answering_top_n_questions.sql
3 |
4 | Companion script for Practical Oracle SQL, Apress 2020
5 | by Kim Berg Hansen, https://www.kibeha.dk
6 | Use at your own risk
7 | *****************************************************
8 |
9 | Chapter 12
10 | Answering Top-N Questions
11 |
12 | To be executed in schema PRACTICAL
13 | ** ***************************************************** */
14 |
15 | /* -----------------------------------------------------
16 | sqlcl formatting setup
17 | ----------------------------------------------------- */
18 |
19 | set pagesize 80
20 | set linesize 80
21 | set sqlformat ansiconsole
22 |
23 | /* -----------------------------------------------------
24 | Chapter 12 example code
25 | ----------------------------------------------------- */
26 |
27 | -- Listing 12-2. A view of the total sales data
28 |
29 | select product_name, total_qty
30 | from total_sales
31 | order by total_qty desc;
32 |
33 | -- A view of the yearly sales data
34 |
35 | select yr, product_name, yr_qty
36 | from yearly_sales
37 | order by yr, yr_qty desc;
38 |
39 | -- Listing 12-3. A view of the yearly sales data
40 | -- (manually formatted, not ansiconsole)
41 |
42 | set pagesize 60
43 | set linesize 60
44 | set sqlformat
45 | column rn format 99
46 | column 2016_prod format a9
47 | column 2016 format 999
48 | column 2017_prod format a9
49 | column 2017 format 999
50 | column 2018_prod format a9
51 | column 2018 format 999
52 |
53 | select *
54 | from (
55 | select
56 | yr, product_name, yr_qty
57 | , row_number() over (
58 | partition by yr
59 | order by yr_qty desc
60 | ) as rn
61 | from yearly_sales
62 | )
63 | pivot (
64 | max(product_name) as prod
65 | , max(yr_qty)
66 | for yr in (
67 | 2016, 2017, 2018
68 | )
69 | )
70 | order by rn;
71 |
72 | set pagesize 60
73 | set linesize 60
74 | set sqlformat ansiconsole
75 |
76 | -- Listing 12-4. Top-3 using inline view and filter on rownum
77 |
78 | select *
79 | from (
80 | select product_name, total_qty
81 | from total_sales
82 | order by total_qty desc
83 | )
84 | where rownum <= 3;
85 |
86 | -- Listing 12-5. Top-3 using inline view and filter on row_number()
87 |
88 | select *
89 | from (
90 | select
91 | product_name, total_qty
92 | , row_number() over (order by total_qty desc) as ranking
93 | from total_sales
94 | )
95 | where ranking <= 3
96 | order by ranking;
97 |
98 | -- Listing 12-6. Comparison of the three analytic ranking functions
99 |
100 | select
101 | product_name, total_qty
102 | , row_number() over (order by total_qty desc) as rn
103 | , rank() over (order by total_qty desc) as rnk
104 | , dense_rank() over (order by total_qty desc) as dr
105 | from total_sales
106 | order by total_qty desc;
107 |
108 | -- Changing ranking function of Listing 12-5 to rank()
109 |
110 | select *
111 | from (
112 | select
113 | product_name, total_qty
114 | , rank() over (order by total_qty desc) as ranking
115 | from total_sales
116 | )
117 | where ranking <= 3
118 | order by ranking;
119 |
120 | -- Changing ranking function of Listing 12-5 to dense_rank()
121 |
122 | select *
123 | from (
124 | select
125 | product_name, total_qty
126 | , dense_rank() over (order by total_qty desc) as ranking
127 | from total_sales
128 | )
129 | where ranking <= 3
130 | order by ranking;
131 |
132 | -- Listing 12-7. Fetching only the first three rows
133 |
134 | select product_name, total_qty
135 | from total_sales
136 | order by total_qty desc
137 | fetch first 3 rows only;
138 |
139 | -- Fetching the first three rows with ties
140 |
141 | select product_name, total_qty
142 | from total_sales
143 | order by total_qty desc
144 | fetch first 3 rows with ties;
145 |
146 | -- Listing 12-8. Comparison of analytic functions for 2018 sales
147 |
148 | select
149 | product_name, yr_qty
150 | , row_number() over (order by yr_qty desc) as rn
151 | , rank() over (order by yr_qty desc) as rnk
152 | , dense_rank() over (order by yr_qty desc) as dr
153 | from yearly_sales
154 | where yr = 2018
155 | order by yr_qty desc
156 | fetch first 5 rows only;
157 |
158 | -- Listing 12-9. Fetching first three rows for 2018
159 |
160 | select product_name, yr_qty
161 | from yearly_sales
162 | where yr = 2018
163 | order by yr_qty desc
164 | fetch first 3 rows only;
165 |
166 | -- Making deterministic output
167 |
168 | select product_name, yr_qty
169 | from yearly_sales
170 | where yr = 2018
171 | order by yr_qty desc, product_id
172 | fetch first 3 rows only;
173 |
174 | -- Fetching with ties
175 |
176 | select product_name, yr_qty
177 | from yearly_sales
178 | where yr = 2018
179 | order by yr_qty desc
180 | fetch first 3 rows with ties;
181 |
182 | -- Distinct order and with ties makes no sense
183 |
184 | select product_name, yr_qty
185 | from yearly_sales
186 | where yr = 2018
187 | order by yr_qty desc, product_id
188 | fetch first 3 rows with ties;
189 |
190 | -- Comparison in 2017 (copy of Listing 12-8)
191 |
192 | select
193 | product_name, yr_qty
194 | , row_number() over (order by yr_qty desc) as rn
195 | , rank() over (order by yr_qty desc) as rnk
196 | , dense_rank() over (order by yr_qty desc) as dr
197 | from yearly_sales
198 | where yr = 2017
199 | order by yr_qty desc
200 | fetch first 5 rows only;
201 |
202 | -- Listing 12-10. Fetching with ties for 2017
203 |
204 | select product_name, yr_qty
205 | from yearly_sales
206 | where yr = 2017
207 | order by yr_qty desc
208 | fetch first 3 rows with ties;
209 |
210 | -- Listing 12-11. Using dense_rank for what fetch first cannot do
211 |
212 | select *
213 | from (
214 | select
215 | product_name, yr_qty
216 | , dense_rank() over (order by yr_qty desc) as ranking
217 | from yearly_sales
218 | where yr = 2017
219 | )
220 | where ranking <= 3
221 | order by ranking;
222 |
223 | -- Listing 12-12. Ranking with row_number within each year
224 |
225 | select *
226 | from (
227 | select
228 | yr, product_name, yr_qty
229 | , row_number() over (
230 | partition by yr
231 | order by yr_qty desc
232 | ) as ranking
233 | from yearly_sales
234 | )
235 | where ranking <= 3
236 | order by yr, ranking;
237 |
238 | -- Changing ranking function to rank()
239 |
240 | select *
241 | from (
242 | select
243 | yr, product_name, yr_qty
244 | , rank() over (
245 | partition by yr
246 | order by yr_qty desc
247 | ) as ranking
248 | from yearly_sales
249 | )
250 | where ranking <= 3
251 | order by yr, ranking;
252 |
253 | -- Changing ranking function to dense_rank()
254 |
255 | select *
256 | from (
257 | select
258 | yr, product_name, yr_qty
259 | , dense_rank() over (
260 | partition by yr
261 | order by yr_qty desc
262 | ) as ranking
263 | from yearly_sales
264 | )
265 | where ranking <= 3
266 | order by yr, ranking;
267 |
268 | -- Listing 12-13. Using fetch first in a laterally joined inline view
269 |
270 | select top_sales.*
271 | from (
272 | select 2016 as yr from dual union all
273 | select 2017 as yr from dual union all
274 | select 2018 as yr from dual
275 | ) years
276 | cross join lateral (
277 | select yr, product_name, yr_qty
278 | from yearly_sales
279 | where yearly_sales.yr = years.yr
280 | order by yr_qty desc
281 | fetch first 3 rows with ties
282 | ) top_sales;
283 |
284 | /* ***************************************************** */
285 |
--------------------------------------------------------------------------------
/Chapter 13/ch13_ordered_subsets_with_rolling_sums.sql:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Apress/practical-oracle-sql/5bbc0d14e9d0c165156e194bb171c2a2d5c5729f/Chapter 13/ch13_ordered_subsets_with_rolling_sums.sql
--------------------------------------------------------------------------------
/Chapter 14/ch14_analyzing_activity_logs_with_lead.sql:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Apress/practical-oracle-sql/5bbc0d14e9d0c165156e194bb171c2a2d5c5729f/Chapter 14/ch14_analyzing_activity_logs_with_lead.sql
--------------------------------------------------------------------------------
/Chapter 15/ch15_forecasting_with_linear_regression.sql:
--------------------------------------------------------------------------------
1 | /* ***************************************************** **
2 | ch15_forecasting_with_linear_regression.sql
3 |
4 | Companion script for Practical Oracle SQL, Apress 2020
5 | by Kim Berg Hansen, https://www.kibeha.dk
6 | Use at your own risk
7 | *****************************************************
8 |
9 | Chapter 15
10 | Forecasting with Linear Regression
11 |
12 | To be executed in schema PRACTICAL
13 | ** ***************************************************** */
14 |
15 | /* -----------------------------------------------------
16 | sqlcl formatting setup
17 | ----------------------------------------------------- */
18 |
19 | -- Unlike most other chapters, this chapter manually formats
20 | -- columns instead of using sqlformat ansiconsole
21 |
22 | set pagesize 100
23 | set linesize 100
24 | set sqlformat
25 | alter session set nls_date_format = 'YYYY-MM';
26 | alter session set nls_numeric_characters = '.,';
27 |
28 | column product_id format a4
29 | column mth format a7
30 | column ts format 99
31 | column yr format 9999
32 | column mthno format 9999
33 | column qty format 999
34 | column cma format 99D9
35 | column s format 0D9999
36 | column des format 999D9
37 | column t format 99D9
38 | column forecast format 999D9
39 |
40 | /* -----------------------------------------------------
41 | Chapter 15 example code
42 | ----------------------------------------------------- */
43 |
44 | -- Listing 15-1. The two products for showing forecasting
45 |
46 | select id, name
47 | from products
48 | where id in (4160, 7790);
49 |
50 | -- The sales of 2016-2018 for the two beers pivoted to two columns
51 |
52 | select *
53 | from (
54 | select product_id, mth, qty
55 | from monthly_sales
56 | where product_id in (4160, 7790)
57 | )
58 | pivot (
59 | sum(qty)
60 | for product_id in (
61 | 4160 as reindeer_fuel
62 | , 7790 as summer_in_india
63 | )
64 | )
65 | order by mth;
66 |
67 | -- Listing 15-2. Building time series 2016-2019 for the two beers
68 |
69 | select
70 | ms.product_id
71 | , mths.mth
72 | , mths.ts
73 | , extract(year from mths.mth) as yr
74 | , extract(month from mths.mth) as mthno
75 | , ms.qty
76 | from (
77 | select
78 | add_months(date '2016-01-01', level - 1) as mth
79 | , level as ts --time series
80 | from dual
81 | connect by level <= 48
82 | ) mths
83 | left outer join (
84 | select product_id, mth, qty
85 | from monthly_sales
86 | where product_id in (4160, 7790)
87 | ) ms
88 | partition by (ms.product_id)
89 | on ms.mth = mths.mth
90 | order by ms.product_id, mths.mth;
91 |
92 | -- Listing 15-3. Calculating centered moving average
93 |
94 | with s1 as (
95 | select
96 | ms.product_id
97 | , mths.mth
98 | , mths.ts
99 | , extract(year from mths.mth) as yr
100 | , extract(month from mths.mth) as mthno
101 | , ms.qty
102 | from (
103 | select
104 | add_months(date '2016-01-01', level - 1) as mth
105 | , level as ts --time series
106 | from dual
107 | connect by level <= 48
108 | ) mths
109 | left outer join (
110 | select product_id, mth, qty
111 | from monthly_sales
112 | where product_id in (4160, 7790)
113 | ) ms
114 | partition by (ms.product_id)
115 | on ms.mth = mths.mth
116 | )
117 | select
118 | product_id, mth, ts, yr, mthno, qty
119 | , case
120 | when ts between 7 and 30 then
121 | (nvl(avg(qty) over (
122 | partition by product_id
123 | order by ts
124 | rows between 5 preceding and 6 following
125 | ), 0) + nvl(avg(qty) over (
126 | partition by product_id
127 | order by ts
128 | rows between 6 preceding and 5 following
129 | ), 0)) / 2
130 | else
131 | null
132 | end as cma -- centered moving average
133 | from s1
134 | order by product_id, mth;
135 |
136 | -- Listing 15-4. Calculating seasonality factor
137 |
138 | with s1 as (
139 | select
140 | ms.product_id
141 | , mths.mth
142 | , mths.ts
143 | , extract(year from mths.mth) as yr
144 | , extract(month from mths.mth) as mthno
145 | , ms.qty
146 | from (
147 | select
148 | add_months(date '2016-01-01', level - 1) as mth
149 | , level as ts --time series
150 | from dual
151 | connect by level <= 48
152 | ) mths
153 | left outer join (
154 | select product_id, mth, qty
155 | from monthly_sales
156 | where product_id in (4160, 7790)
157 | ) ms
158 | partition by (ms.product_id)
159 | on ms.mth = mths.mth
160 | ), s2 as (
161 | select
162 | product_id, mth, ts, yr, mthno, qty
163 | , case
164 | when ts between 7 and 30 then
165 | (nvl(avg(qty) over (
166 | partition by product_id
167 | order by ts
168 | rows between 5 preceding and 6 following
169 | ), 0) + nvl(avg(qty) over (
170 | partition by product_id
171 | order by ts
172 | rows between 6 preceding and 5 following
173 | ), 0)) / 2
174 | else
175 | null
176 | end as cma -- centered moving average
177 | from s1
178 | )
179 | select
180 | product_id, mth, ts, yr, mthno, qty, cma
181 | , nvl(avg(
182 | case qty
183 | when 0 then 0.0001
184 | else qty
185 | end / nullif(cma, 0)
186 | ) over (
187 | partition by product_id, mthno
188 | ),0) as s -- seasonality
189 | from s2
190 | order by product_id, mth;
191 |
192 | -- Listing 15-5. Deseasonalizing sales data
193 |
194 | with s1 as (
195 | select
196 | ms.product_id
197 | , mths.mth
198 | , mths.ts
199 | , extract(year from mths.mth) as yr
200 | , extract(month from mths.mth) as mthno
201 | , ms.qty
202 | from (
203 | select
204 | add_months(date '2016-01-01', level - 1) as mth
205 | , level as ts --time series
206 | from dual
207 | connect by level <= 48
208 | ) mths
209 | left outer join (
210 | select product_id, mth, qty
211 | from monthly_sales
212 | where product_id in (4160, 7790)
213 | ) ms
214 | partition by (ms.product_id)
215 | on ms.mth = mths.mth
216 | ), s2 as (
217 | select
218 | product_id, mth, ts, yr, mthno, qty
219 | , case
220 | when ts between 7 and 30 then
221 | (nvl(avg(qty) over (
222 | partition by product_id
223 | order by ts
224 | rows between 5 preceding and 6 following
225 | ), 0) + nvl(avg(qty) over (
226 | partition by product_id
227 | order by ts
228 | rows between 6 preceding and 5 following
229 | ), 0)) / 2
230 | else
231 | null
232 | end as cma -- centered moving average
233 | from s1
234 | ), s3 as (
235 | select
236 | product_id, mth, ts, yr, mthno, qty, cma
237 | , nvl(avg(
238 | case qty
239 | when 0 then 0.0001
240 | else qty
241 | end / nullif(cma, 0)
242 | ) over (
243 | partition by product_id, mthno
244 | ), 0) as s -- seasonality
245 | from s2
246 | )
247 | select
248 | product_id, mth, ts, yr, mthno, qty, cma, s
249 | , case when ts <= 36 then
250 | nvl(
251 | case qty
252 | when 0 then 0.0001
253 | else qty
254 | end / nullif(s, 0)
255 | , 0)
256 | end as des -- deseasonalized
257 | from s3
258 | order by product_id, mth;
259 |
260 | -- Listing 15-6. Calculating trend line
261 |
262 | with s1 as (
263 | select
264 | ms.product_id
265 | , mths.mth
266 | , mths.ts
267 | , extract(year from mths.mth) as yr
268 | , extract(month from mths.mth) as mthno
269 | , ms.qty
270 | from (
271 | select
272 | add_months(date '2016-01-01', level - 1) as mth
273 | , level as ts --time series
274 | from dual
275 | connect by level <= 48
276 | ) mths
277 | left outer join (
278 | select product_id, mth, qty
279 | from monthly_sales
280 | where product_id in (4160, 7790)
281 | ) ms
282 | partition by (ms.product_id)
283 | on ms.mth = mths.mth
284 | ), s2 as (
285 | select
286 | product_id, mth, ts, yr, mthno, qty
287 | , case
288 | when ts between 7 and 30 then
289 | (nvl(avg(qty) over (
290 | partition by product_id
291 | order by ts
292 | rows between 5 preceding and 6 following
293 | ), 0) + nvl(avg(qty) over (
294 | partition by product_id
295 | order by ts
296 | rows between 6 preceding and 5 following
297 | ), 0)) / 2
298 | else
299 | null
300 | end as cma -- centered moving average
301 | from s1
302 | ), s3 as (
303 | select
304 | product_id, mth, ts, yr, mthno, qty, cma
305 | , nvl(avg(
306 | case qty
307 | when 0 then 0.0001
308 | else qty
309 | end / nullif(cma, 0)
310 | ) over (
311 | partition by product_id, mthno
312 | ), 0) as s -- seasonality
313 | from s2
314 | ), s4 as (
315 | select
316 | product_id, mth, ts, yr, mthno, qty, cma, s
317 | , case when ts <= 36 then
318 | nvl(
319 | case qty
320 | when 0 then 0.0001
321 | else qty
322 | end / nullif(s, 0)
323 | , 0)
324 | end as des -- deseasonalized
325 | from s3
326 | )
327 | select
328 | product_id, mth, ts, yr, mthno, qty, cma, s, des
329 | , regr_intercept(des, ts) over (
330 | partition by product_id
331 | ) + ts * regr_slope(des, ts) over (
332 | partition by product_id
333 | ) as t -- trend
334 | from s4
335 | order by product_id, mth;
336 |
337 | -- Listing 15-7. Reseasonalizing trend => forecast
338 |
339 | with s1 as (
340 | select
341 | ms.product_id
342 | , mths.mth
343 | , mths.ts
344 | , extract(year from mths.mth) as yr
345 | , extract(month from mths.mth) as mthno
346 | , ms.qty
347 | from (
348 | select
349 | add_months(date '2016-01-01', level - 1) as mth
350 | , level as ts --time series
351 | from dual
352 | connect by level <= 48
353 | ) mths
354 | left outer join (
355 | select product_id, mth, qty
356 | from monthly_sales
357 | where product_id in (4160, 7790)
358 | ) ms
359 | partition by (ms.product_id)
360 | on ms.mth = mths.mth
361 | ), s2 as (
362 | select
363 | product_id, mth, ts, yr, mthno, qty
364 | , case
365 | when ts between 7 and 30 then
366 | (nvl(avg(qty) over (
367 | partition by product_id
368 | order by ts
369 | rows between 5 preceding and 6 following
370 | ), 0) + nvl(avg(qty) over (
371 | partition by product_id
372 | order by ts
373 | rows between 6 preceding and 5 following
374 | ), 0)) / 2
375 | else
376 | null
377 | end as cma -- centered moving average
378 | from s1
379 | ), s3 as (
380 | select
381 | product_id, mth, ts, yr, mthno, qty, cma
382 | , nvl(avg(
383 | case qty
384 | when 0 then 0.0001
385 | else qty
386 | end / nullif(cma, 0)
387 | ) over (
388 | partition by product_id, mthno
389 | ), 0) as s -- seasonality
390 | from s2
391 | ), s4 as (
392 | select
393 | product_id, mth, ts, yr, mthno, qty, cma, s
394 | , case when ts <= 36 then
395 | nvl(
396 | case qty
397 | when 0 then 0.0001
398 | else qty
399 | end / nullif(s, 0)
400 | , 0)
401 | end as des -- deseasonalized
402 | from s3
403 | ), s5 as (
404 | select
405 | product_id, mth, ts, yr, mthno, qty, cma, s, des
406 | , regr_intercept(des, ts) over (
407 | partition by product_id
408 | ) + ts * regr_slope(des, ts) over (
409 | partition by product_id
410 | ) as t -- trend
411 | from s4
412 | )
413 | select
414 | product_id, mth, ts, yr, mthno, qty, cma, s, des
415 | , t * s as forecast --reseasonalized
416 | from s5
417 | order by product_id, mth;
418 |
419 | -- Listing 15-8. Selecting actual and forecast
420 |
421 | with s1 as (
422 | select
423 | ms.product_id
424 | , mths.mth
425 | , mths.ts
426 | , extract(year from mths.mth) as yr
427 | , extract(month from mths.mth) as mthno
428 | , ms.qty
429 | from (
430 | select
431 | add_months(date '2016-01-01', level - 1) as mth
432 | , level as ts --time series
433 | from dual
434 | connect by level <= 48
435 | ) mths
436 | left outer join (
437 | select product_id, mth, qty
438 | from monthly_sales
439 | where product_id in (4160, 7790)
440 | ) ms
441 | partition by (ms.product_id)
442 | on ms.mth = mths.mth
443 | ), s2 as (
444 | select
445 | product_id, mth, ts, yr, mthno, qty
446 | , case
447 | when ts between 7 and 30 then
448 | (nvl(avg(qty) over (
449 | partition by product_id
450 | order by ts
451 | rows between 5 preceding and 6 following
452 | ), 0) + nvl(avg(qty) over (
453 | partition by product_id
454 | order by ts
455 | rows between 6 preceding and 5 following
456 | ), 0)) / 2
457 | else
458 | null
459 | end as cma -- centered moving average
460 | from s1
461 | ), s3 as (
462 | select
463 | product_id, mth, ts, yr, mthno, qty, cma
464 | , nvl(avg(
465 | case qty
466 | when 0 then 0.0001
467 | else qty
468 | end / nullif(cma, 0)
469 | ) over (
470 | partition by product_id, mthno
471 | ), 0) as s -- seasonality
472 | from s2
473 | ), s4 as (
474 | select
475 | product_id, mth, ts, yr, mthno, qty, cma, s
476 | , case when ts <= 36 then
477 | nvl(
478 | case qty
479 | when 0 then 0.0001
480 | else qty
481 | end / nullif(s, 0)
482 | , 0)
483 | end as des -- deseasonalized
484 | from s3
485 | ), s5 as (
486 | select
487 | product_id, mth, ts, yr, mthno, qty, cma, s, des
488 | , regr_intercept(des, ts) over (
489 | partition by product_id
490 | ) + ts * regr_slope(des, ts) over (
491 | partition by product_id
492 | ) as t -- trend
493 | from s4
494 | )
495 | select
496 | product_id
497 | , mth
498 | , case
499 | when ts <= 36 then qty
500 | else round(t * s)
501 | end as qty
502 | , case
503 | when ts <= 36 then 'Actual'
504 | else 'Forecast'
505 | end as type
506 | from s5
507 | order by product_id, mth;
508 |
509 | /* ***************************************************** */
510 |
--------------------------------------------------------------------------------
/Chapter 16/ch16_rolling_sums_to_forecast_reaching_minimums.sql:
--------------------------------------------------------------------------------
1 | /* ***************************************************** **
2 | ch16_rolling_sums_to_forecast_reaching_minimums.sql
3 |
4 | Companion script for Practical Oracle SQL, Apress 2020
5 | by Kim Berg Hansen, https://www.kibeha.dk
6 | Use at your own risk
7 | *****************************************************
8 |
9 | Chapter 16
10 | Rolling Sums to Forecast Reaching Minimums
11 |
12 | To be executed in schema PRACTICAL
13 | ** ***************************************************** */
14 |
15 | /* -----------------------------------------------------
16 | sqlcl formatting setup
17 | ----------------------------------------------------- */
18 |
19 | set pagesize 80
20 | set linesize 80
21 | set sqlformat ansiconsole
22 |
23 | alter session set nls_date_format = 'YYYY-MM-DD';
24 |
25 | /* -----------------------------------------------------
26 | Chapter 16 example code
27 | ----------------------------------------------------- */
28 |
29 | -- Listing 16-3. The inventory totals for two products
30 |
31 | select it.product_id, p.name, it.qty
32 | from inventory_totals it
33 | join products p
34 | on p.id = it.product_id
35 | where product_id in (6520, 6600)
36 | order by product_id;
37 |
38 | -- Listing 16-4. The 2019 monthly budget for the two beers
39 |
40 | select mb.product_id, mb.mth, mb.qty
41 | from monthly_budget mb
42 | where mb.product_id in (6520, 6600)
43 | and mb.mth >= date '2019-01-01'
44 | order by mb.product_id, mb.mth;
45 |
46 | -- Listing 16-5. The current monthly order quantities
47 |
48 | select mo.product_id, mo.mth, mo.qty
49 | from monthly_orders mo
50 | where mo.product_id in (6520, 6600)
51 | order by mo.product_id, mo.mth;
52 |
53 | -- Listing 16-6. Accumulating quantities
54 |
55 | select
56 | mb.product_id as p_id, mb.mth
57 | , mb.qty b_qty, mo.qty o_qty
58 | , greatest(mb.qty, nvl(mo.qty, 0)) as qty
59 | , sum(greatest(mb.qty, nvl(mo.qty, 0))) over (
60 | partition by mb.product_id
61 | order by mb.mth
62 | rows between unbounded preceding and current row
63 | ) as acc_qty
64 | from monthly_budget mb
65 | left outer join monthly_orders mo
66 | on mo.product_id = mb.product_id
67 | and mo.mth = mb.mth
68 | where mb.product_id in (6520, 6600)
69 | and mb.mth >= date '2019-01-01'
70 | order by mb.product_id, mb.mth;
71 |
72 | -- Listing 16-7. Dwindling inventory
73 |
74 | select
75 | mb.product_id as p_id, mb.mth
76 | , greatest(mb.qty, nvl(mo.qty, 0)) as qty
77 | , greatest(
78 | it.qty - nvl(sum(
79 | greatest(mb.qty, nvl(mo.qty, 0))
80 | ) over (
81 | partition by mb.product_id
82 | order by mb.mth
83 | rows between unbounded preceding and 1 preceding
84 | ), 0)
85 | , 0
86 | ) as inv_begin
87 | , greatest(
88 | it.qty - sum(
89 | greatest(mb.qty, nvl(mo.qty, 0))
90 | ) over (
91 | partition by mb.product_id
92 | order by mb.mth
93 | rows between unbounded preceding and current row
94 | )
95 | , 0
96 | ) as inv_end
97 | from monthly_budget mb
98 | left outer join monthly_orders mo
99 | on mo.product_id = mb.product_id
100 | and mo.mth = mb.mth
101 | join inventory_totals it
102 | on it.product_id = mb.product_id
103 | where mb.product_id in (6520, 6600)
104 | and mb.mth >= date '2019-01-01'
105 | order by mb.product_id, mb.mth;
106 |
107 | -- Listing 16-8. Estimating when zero is reached
108 |
109 | select
110 | product_id as p_id, mth, inv_begin, inv_end
111 | , trunc(
112 | mth + numtodsinterval(
113 | (add_months(mth, 1) - 1 - mth) * inv_begin / qty
114 | , 'day'
115 | )
116 | ) as zero_day
117 | from (
118 | select
119 | mb.product_id, mb.mth
120 | , greatest(mb.qty, nvl(mo.qty, 0)) as qty
121 | , greatest(
122 | it.qty - nvl(sum(
123 | greatest(mb.qty, nvl(mo.qty, 0))
124 | ) over (
125 | partition by mb.product_id
126 | order by mb.mth
127 | rows between unbounded preceding and 1 preceding
128 | ), 0)
129 | , 0
130 | ) as inv_begin
131 | , greatest(
132 | it.qty - sum(
133 | greatest(mb.qty, nvl(mo.qty, 0))
134 | ) over (
135 | partition by mb.product_id
136 | order by mb.mth
137 | rows between unbounded preceding and current row
138 | )
139 | , 0
140 | ) as inv_end
141 | from monthly_budget mb
142 | left outer join monthly_orders mo
143 | on mo.product_id = mb.product_id
144 | and mo.mth = mb.mth
145 | join inventory_totals it
146 | on it.product_id = mb.product_id
147 | where mb.product_id in (6520, 6600)
148 | and mb.mth >= date '2019-01-01'
149 | )
150 | where inv_begin > 0 and inv_end = 0
151 | order by product_id;
152 |
153 | -- Listing 16-9. Product minimum restocking parameters
154 |
155 | select product_id, qty_minimum, qty_purchase
156 | from product_minimums pm
157 | where pm.product_id in (6520, 6600)
158 | order by pm.product_id;
159 |
160 | -- Listing 16-10. Restocking when a minimum is reached
161 |
162 | with mb_recur(
163 | product_id, mth, qty, inv_begin, date_purch
164 | , p_qty, inv_end, qty_minimum, qty_purchase
165 | ) as (
166 | select
167 | it.product_id
168 | , date '2018-12-01' as mth
169 | , 0 as qty
170 | , 0 as inv_begin
171 | , cast(null as date) as date_purch
172 | , 0 as p_qty
173 | , it.qty as inv_end
174 | , pm.qty_minimum
175 | , pm.qty_purchase
176 | from inventory_totals it
177 | join product_minimums pm
178 | on pm.product_id = it.product_id
179 | where it.product_id in (6520, 6600)
180 | union all
181 | select
182 | mb.product_id
183 | , mb.mth
184 | , greatest(mb.qty, nvl(mo.qty, 0)) as qty
185 | , mbr.inv_end as inv_begin
186 | , case
187 | when mbr.inv_end - greatest(mb.qty, nvl(mo.qty, 0))
188 | < mbr.qty_minimum
189 | then
190 | trunc(
191 | mb.mth
192 | + numtodsinterval(
193 | (add_months(mb.mth, 1) - 1 - mb.mth)
194 | * (mbr.inv_end - mbr.qty_minimum)
195 | / mb.qty
196 | , 'day'
197 | )
198 | )
199 | end as date_purch
200 | , case
201 | when mbr.inv_end - greatest(mb.qty, nvl(mo.qty, 0))
202 | < mbr.qty_minimum
203 | then mbr.qty_purchase
204 | end as p_qty
205 | , mbr.inv_end - greatest(mb.qty, nvl(mo.qty, 0))
206 | + case
207 | when mbr.inv_end - greatest(mb.qty, nvl(mo.qty, 0))
208 | < mbr.qty_minimum
209 | then mbr.qty_purchase
210 | else 0
211 | end as inv_end
212 | , mbr.qty_minimum
213 | , mbr.qty_purchase
214 | from mb_recur mbr
215 | join monthly_budget mb
216 | on mb.product_id = mbr.product_id
217 | and mb.mth = add_months(mbr.mth, 1)
218 | left outer join monthly_orders mo
219 | on mo.product_id = mb.product_id
220 | and mo.mth = mb.mth
221 | )
222 | select
223 | product_id as p_id, mth, qty, inv_begin
224 | , date_purch, p_qty, inv_end
225 | from mb_recur
226 | where mth >= date '2019-01-01'
227 | and p_qty is not null
228 | order by product_id, mth;
229 |
230 | -- Listing 16-11. Restocking with model clause
231 |
232 | select
233 | product_id as p_id, mth, qty, inv_begin
234 | , date_purch, p_qty, inv_end
235 | from (
236 | select *
237 | from monthly_budget mb
238 | left outer join monthly_orders mo
239 | on mo.product_id = mb.product_id
240 | and mo.mth = mb.mth
241 | join inventory_totals it
242 | on it.product_id = mb.product_id
243 | join product_minimums pm
244 | on pm.product_id = mb.product_id
245 | where mb.product_id in (6520, 6600)
246 | and mb.mth >= date '2019-01-01'
247 | model
248 | partition by (mb.product_id)
249 | dimension by (
250 | row_number() over (
251 | partition by mb.product_id order by mb.mth
252 | ) - 1 as rn
253 | )
254 | measures (
255 | mb.mth
256 | , greatest(mb.qty, nvl(mo.qty, 0)) as qty
257 | , 0 as inv_begin
258 | , cast(null as date) as date_purch
259 | , 0 as p_qty
260 | , 0 as inv_end
261 | , it.qty as inv_orig
262 | , pm.qty_minimum
263 | , pm.qty_purchase
264 | )
265 | rules sequential order iterate (12) (
266 | inv_begin[iteration_number]
267 | = nvl(inv_end[iteration_number-1], inv_orig[cv()])
268 | , p_qty[iteration_number]
269 | = case
270 | when inv_begin[cv()] - qty[cv()]
271 | < qty_minimum[cv()]
272 | then qty_purchase[cv()]
273 | end
274 | , date_purch[iteration_number]
275 | = case
276 | when p_qty[cv()] is not null
277 | then
278 | trunc(
279 | mth[cv()]
280 | + numtodsinterval(
281 | (add_months(mth[cv()], 1) - 1 - mth[cv()])
282 | * (inv_begin[cv()] - qty_minimum[cv()])
283 | / qty[cv()]
284 | , 'day'
285 | )
286 | )
287 | end
288 | , inv_end[iteration_number]
289 | = inv_begin[cv()] + nvl(p_qty[cv()], 0) - qty[cv()]
290 | )
291 | )
292 | where p_qty is not null
293 | order by product_id, mth;
294 |
295 | /* ***************************************************** */
296 |
--------------------------------------------------------------------------------
/Chapter 17/ch17_up_and_down_patterns.sql:
--------------------------------------------------------------------------------
1 | /* ***************************************************** **
2 | ch17_up_and_down_patterns.sql
3 |
4 | Companion script for Practical Oracle SQL, Apress 2020
5 | by Kim Berg Hansen, https://www.kibeha.dk
6 | Use at your own risk
7 | *****************************************************
8 |
9 | Chapter 17
10 | Row pattern matching on stock ticker data
11 |
12 | To be executed in schema PRACTICAL
13 | ** ***************************************************** */
14 |
15 | /* -----------------------------------------------------
16 | sqlcl formatting setup
17 | ----------------------------------------------------- */
18 |
19 | set pagesize 80
20 | set linesize 80
21 | set sqlformat ansiconsole
22 | alter session set nls_date_format = 'YYYY-MM-DD';
23 |
24 | /* -----------------------------------------------------
25 | Chapter 17 example code
26 | ----------------------------------------------------- */
27 |
28 | -- Listing 17-1. Classifying the rows
29 |
30 | select *
31 | from ticker
32 | match_recognize (
33 | partition by symbol
34 | order by day
35 | measures
36 | match_number() as match
37 | , classifier() as class
38 | , prev(price) as prev
39 | all rows per match
40 | pattern (
41 | down | up
42 | )
43 | define
44 | down as price < prev(price)
45 | , up as price > prev(price)
46 | )
47 | order by symbol, day;
48 |
49 | -- Changing to less-than-or-equal and greater-than-or-equal
50 |
51 | select *
52 | from ticker
53 | match_recognize (
54 | partition by symbol
55 | order by day
56 | measures
57 | match_number() as match
58 | , classifier() as class
59 | , prev(price) as prev
60 | all rows per match
61 | pattern (
62 | down | up
63 | )
64 | define
65 | down as price <= prev(price)
66 | , up as price >= prev(price)
67 | )
68 | order by symbol, day;
69 |
70 | -- Using down, up and same
71 |
72 | select *
73 | from ticker
74 | match_recognize (
75 | partition by symbol
76 | order by day
77 | measures
78 | match_number() as match
79 | , classifier() as class
80 | , prev(price) as prev
81 | all rows per match
82 | pattern (
83 | down | up | same
84 | )
85 | define
86 | down as price < prev(price)
87 | , up as price > prev(price)
88 | , same as price = prev(price)
89 | )
90 | order by symbol, day;
91 |
92 | -- Adding undefined STRT to the pattern
93 |
94 | select *
95 | from ticker
96 | match_recognize (
97 | partition by symbol
98 | order by day
99 | measures
100 | match_number() as match
101 | , classifier() as class
102 | , prev(price) as prev
103 | all rows per match
104 | pattern (
105 | down | up | same | strt
106 | )
107 | define
108 | down as price < prev(price)
109 | , up as price > prev(price)
110 | , same as price = prev(price)
111 | )
112 | order by symbol, day;
113 |
114 | -- Here not good with STRT in front in the pattern
115 |
116 | select *
117 | from ticker
118 | match_recognize (
119 | partition by symbol
120 | order by day
121 | measures
122 | match_number() as match
123 | , classifier() as class
124 | , prev(price) as prev
125 | all rows per match
126 | pattern (
127 | strt | down | up | same
128 | )
129 | define
130 | down as price < prev(price)
131 | , up as price > prev(price)
132 | , same as price = prev(price)
133 | )
134 | order by symbol, day;
135 |
136 | -- Listing 17-2. Searching for V shapes
137 |
138 | select *
139 | from ticker
140 | match_recognize (
141 | partition by symbol
142 | order by day
143 | measures
144 | match_number() as match
145 | , classifier() as class
146 | , prev(price) as prev
147 | all rows per match
148 | pattern (
149 | (down | same)+ (up | same)+
150 | )
151 | define
152 | down as price < prev(price)
153 | , up as price > prev(price)
154 | , same as price = prev(price)
155 | )
156 | order by symbol, day;
157 |
158 | -- Listing 17-3. Output a single row for each match
159 |
160 | select *
161 | from ticker
162 | match_recognize (
163 | partition by symbol
164 | order by day
165 | measures
166 | match_number() as match
167 | , first(day) as first_day
168 | , last(day) as last_day
169 | , count(*) as days
170 | one row per match
171 | pattern (
172 | (down | same)+ (up | same)+
173 | )
174 | define
175 | down as price < prev(price)
176 | , up as price > prev(price)
177 | , same as price = prev(price)
178 | )
179 | order by symbol, first_day;
180 |
181 | -- Adding STRT to the pattern
182 |
183 | select *
184 | from ticker
185 | match_recognize (
186 | partition by symbol
187 | order by day
188 | measures
189 | match_number() as match
190 | , first(day) as first_day
191 | , last(day) as last_day
192 | , count(*) as days
193 | one row per match
194 | pattern (
195 | strt (down | same)+ (up | same)+
196 | )
197 | define
198 | down as price < prev(price)
199 | , up as price > prev(price)
200 | , same as price = prev(price)
201 | )
202 | order by symbol, first_day;
203 |
204 | -- Showing default AFTER MATCH SKIP PAST LAST ROW
205 |
206 | select *
207 | from ticker
208 | match_recognize (
209 | partition by symbol
210 | order by day
211 | measures
212 | match_number() as match
213 | , first(day) as first_day
214 | , last(day) as last_day
215 | , count(*) as days
216 | one row per match
217 | after match skip past last row
218 | pattern (
219 | strt (down | same)+ (up | same)+
220 | )
221 | define
222 | down as price < prev(price)
223 | , up as price > prev(price)
224 | , same as price = prev(price)
225 | )
226 | order by symbol, first_day;
227 |
228 | -- Using SKIP TO LAST together with SUBSET
229 |
230 | select *
231 | from ticker
232 | match_recognize (
233 | partition by symbol
234 | order by day
235 | measures
236 | match_number() as match
237 | , first(day) as first_day
238 | , last(day) as last_day
239 | , count(*) as days
240 | one row per match
241 | after match skip to last up_or_same
242 | pattern (
243 | strt (down | same)+ (up | same)+
244 | )
245 | subset up_or_same = (up, same)
246 | define
247 | down as price < prev(price)
248 | , up as price > prev(price)
249 | , same as price = prev(price)
250 | )
251 | order by symbol, first_day;
252 |
253 | -- Listing 17-4. Simplified query utilizing
254 | -- how definitions are evaluated for patterns
255 |
256 | select *
257 | from ticker
258 | match_recognize (
259 | partition by symbol
260 | order by day
261 | measures
262 | match_number() as match
263 | , first(day) as first_day
264 | , last(day) as last_day
265 | , count(*) as days
266 | one row per match
267 | after match skip to last up
268 | pattern (
269 | strt down+ up+
270 | )
271 | define
272 | down as price <= prev(price)
273 | , up as price >= prev(price)
274 | )
275 | order by symbol, first_day;
276 |
277 | -- Listing 17-5. Seeing all rows of the simplified query
278 |
279 | select *
280 | from ticker
281 | match_recognize (
282 | partition by symbol
283 | order by day
284 | measures
285 | match_number() as match
286 | , classifier() as class
287 | , prev(price) as prev
288 | all rows per match
289 | after match skip to last up
290 | pattern (
291 | strt down+ up+
292 | )
293 | define
294 | down as price <= prev(price)
295 | , up as price >= prev(price)
296 | )
297 | order by symbol, day;
298 |
299 | -- Listing 17-6. First attempt at finding W shapes
300 |
301 | select *
302 | from ticker
303 | match_recognize (
304 | partition by symbol
305 | order by day
306 | measures
307 | match_number() as match
308 | , first(day) as first_day
309 | , last(day) as last_day
310 | , count(*) as days
311 | one row per match
312 | after match skip to last up
313 | pattern (
314 | strt down+ up+ down+ up+
315 | )
316 | define
317 | down as price <= prev(price)
318 | , up as price >= prev(price)
319 | )
320 | order by symbol, first_day;
321 |
322 | -- Debugging with ALL ROWS PER MATCH
323 |
324 | select *
325 | from ticker
326 | match_recognize (
327 | partition by symbol
328 | order by day
329 | measures
330 | match_number() as match
331 | , classifier() as class
332 | , prev(price) as prev
333 | all rows per match
334 | after match skip to last up
335 | pattern (
336 | strt down+ up+ down+ up+
337 | )
338 | define
339 | down as price <= prev(price)
340 | , up as price >= prev(price)
341 | )
342 | order by symbol, day;
343 |
344 | -- Attempt with DOWN, UP and SAME
345 |
346 | select *
347 | from ticker
348 | match_recognize (
349 | partition by symbol
350 | order by day
351 | measures
352 | match_number() as match
353 | , classifier() as class
354 | , prev(price) as prev
355 | all rows per match
356 | after match skip to last up_or_same
357 | pattern (
358 | strt (down | same)+ (up | same)+ (down | same)+ (up | same)+
359 | )
360 | subset up_or_same = (up, same)
361 | define
362 | down as price < prev(price)
363 | , up as price > prev(price)
364 | , same as price = prev(price)
365 | )
366 | order by symbol, day;
367 |
368 | -- Listing 17-7. More intelligent definitions for W shape matching
369 |
370 | select *
371 | from ticker
372 | match_recognize (
373 | partition by symbol
374 | order by day
375 | measures
376 | match_number() as match
377 | , classifier() as class
378 | , prev(price) as prev
379 | all rows per match
380 | after match skip to last up
381 | pattern (
382 | strt down+ up+ down+ up+
383 | )
384 | define
385 | down as price < prev(price)
386 | or ( price = prev(price)
387 | and price = last(down.price, 1)
388 | )
389 | , up as price > prev(price)
390 | or ( price = prev(price)
391 | and price = last(up.price , 1)
392 | )
393 | )
394 | order by symbol, day;
395 |
396 | -- Listing 17-8. Finding overlapping W shapes
397 |
398 | select *
399 | from ticker
400 | match_recognize (
401 | partition by symbol
402 | order by day
403 | measures
404 | match_number() as match
405 | , first(day) as first_day
406 | , last(day) as last_day
407 | , count(*) as days
408 | one row per match
409 | after match skip to first up
410 | pattern (
411 | strt down+ up+ down+ up+
412 | )
413 | define
414 | down as price < prev(price)
415 | or ( price = prev(price)
416 | and price = last(down.price, 1)
417 | )
418 | , up as price > prev(price)
419 | or ( price = prev(price)
420 | and price = last(up.price , 1)
421 | )
422 | )
423 | order by symbol, first_day;
424 |
425 | /* ***************************************************** */
426 |
--------------------------------------------------------------------------------
/Chapter 18/ch18_grouping_data_with_patterns.sql:
--------------------------------------------------------------------------------
1 | /* ***************************************************** **
2 | ch18_grouping_data_with_patterns.sql
3 |
4 | Companion script for Practical Oracle SQL, Apress 2020
5 | by Kim Berg Hansen, https://www.kibeha.dk
6 | Use at your own risk
7 | *****************************************************
8 |
9 | Chapter 18
10 | Grouping Data with Patterns
11 |
12 | To be executed in schema PRACTICAL
13 | ** ***************************************************** */
14 |
15 | /* -----------------------------------------------------
16 | sqlcl formatting setup
17 | ----------------------------------------------------- */
18 |
19 | set pagesize 80
20 | set linesize 80
21 | set sqlformat ansiconsole
22 |
23 | alter session set nls_date_format = 'YYYY-MM-DD HH24:MI:SS';
24 |
25 | /* -----------------------------------------------------
26 | Chapter 18 example code
27 | ----------------------------------------------------- */
28 |
29 | -- Listing 18-1. Difference between value and row_number
30 |
31 | with ints(i) as (
32 | select 1 from dual union all
33 | select 2 from dual union all
34 | select 3 from dual union all
35 | select 6 from dual union all
36 | select 8 from dual union all
37 | select 9 from dual
38 | )
39 | select
40 | i
41 | , row_number() over (order by i) as rn
42 | , i - row_number() over (order by i) as diff
43 | from ints
44 | order by i;
45 |
46 | -- Listing 18-2. Tabibitosan grouping
47 |
48 | with ints(i) as (
49 | select 1 from dual union all
50 | select 2 from dual union all
51 | select 3 from dual union all
52 | select 6 from dual union all
53 | select 8 from dual union all
54 | select 9 from dual
55 | )
56 | select
57 | min(i) as first_int
58 | , max(i) as last_int
59 | , count(*) as ints_in_grp
60 | from (
61 | select i, i - row_number() over (order by i) as diff
62 | from ints
63 | )
64 | group by diff
65 | order by first_int;
66 |
67 | -- Listing 18-3. Same grouping with match_recognize
68 |
69 | with ints(i) as (
70 | select 1 from dual union all
71 | select 2 from dual union all
72 | select 3 from dual union all
73 | select 6 from dual union all
74 | select 8 from dual union all
75 | select 9 from dual
76 | )
77 | select first_int, last_int, ints_in_grp
78 | from ints
79 | match_recognize (
80 | order by i
81 | measures
82 | first(i) as first_int
83 | , last(i) as last_int
84 | , count(*) as ints_in_grp
85 | one row per match
86 | pattern (strt one_higher*)
87 | define
88 | one_higher as i = prev(i) + 1
89 | )
90 | order by first_int;
91 |
92 | -- Listing 18-4. Server heartbeat as example of something other than integers
93 |
94 | select server, beat_time
95 | from server_heartbeat
96 | order by server, beat_time;
97 |
98 | -- Listing 18-5. Tabibitosan adjusted to 5 minute intervals
99 |
100 | select
101 | server
102 | , min(beat_time) as first_beat
103 | , max(beat_time) as last_beat
104 | , count(*) as beats
105 | from (
106 | select
107 | server
108 | , beat_time
109 | , beat_time - interval '5' minute
110 | * row_number() over (
111 | partition by server
112 | order by beat_time
113 | ) as diff
114 | from server_heartbeat
115 | )
116 | group by server, diff
117 | order by server, first_beat;
118 |
119 | -- Listing 18-6. Same adjustment to match_recognize solution
120 |
121 | select server, first_beat, last_beat, beats
122 | from server_heartbeat
123 | match_recognize (
124 | partition by server
125 | order by beat_time
126 | measures
127 | first(beat_time) as first_beat
128 | , last(beat_time) as last_beat
129 | , count(*) as beats
130 | one row per match
131 | pattern (strt five_mins_later*)
132 | define
133 | five_mins_later as
134 | beat_time = prev(beat_time) + interval '5' minute
135 | )
136 | order by server, first_beat;
137 |
138 | -- Allowing for "fuzzy" intervals
139 |
140 | select server, first_beat, last_beat, beats
141 | from server_heartbeat
142 | match_recognize (
143 | partition by server
144 | order by beat_time
145 | measures
146 | first(beat_time) as first_beat
147 | , last(beat_time) as last_beat
148 | , count(*) as beats
149 | one row per match
150 | pattern (strt five_mins_later*)
151 | define
152 | five_mins_later as
153 | beat_time between prev(beat_time) + interval '4' minute
154 | and prev(beat_time) + interval '6' minute
155 | )
156 | order by server, first_beat;
157 |
158 | -- Listing 18-7. Detecting gaps from consecutive grouping using lead function
159 |
160 | select
161 | server, last_beat, next_beat
162 | , round((next_beat - last_beat) * (24*60)) as gap_minutes
163 | from (
164 | select
165 | server
166 | , last_beat
167 | , lead(first_beat) over (
168 | partition by server order by first_beat
169 | ) as next_beat
170 | from (
171 | select server, first_beat, last_beat, beats
172 | from server_heartbeat
173 | match_recognize (
174 | partition by server
175 | order by beat_time
176 | measures
177 | first(beat_time) as first_beat
178 | , last(beat_time) as last_beat
179 | , count(*) as beats
180 | one row per match
181 | pattern (strt five_mins_later*)
182 | define
183 | five_mins_later as
184 | beat_time = prev(beat_time) + interval '5' minute
185 | )
186 | )
187 | )
188 | where next_beat is not null
189 | order by server, last_beat;
190 |
191 | -- Listing 18-8. Detecting gaps directly in match_recognize
192 |
193 | select
194 | server, last_beat, next_beat
195 | , round((next_beat - last_beat) * (24*60)) as gap_minutes
196 | from server_heartbeat
197 | match_recognize (
198 | partition by server
199 | order by beat_time
200 | measures
201 | last(before_gap.beat_time) as last_beat
202 | , next_after_gap.beat_time as next_beat
203 | one row per match
204 | after match skip to last next_after_gap
205 | pattern (strt five_mins_later* next_after_gap)
206 | subset before_gap = (strt, five_mins_later)
207 | define
208 | five_mins_later as
209 | beat_time = prev(beat_time) + interval '5' minute
210 | , next_after_gap as
211 | beat_time > prev(beat_time) + interval '5' minute
212 | )
213 | order by server, last_beat;
214 |
215 | -- Listing 18-9. Web page visit data
216 |
217 | select app_id, visit_time, client_ip, page_no
218 | from web_page_visits
219 | order by app_id, visit_time, client_ip;
220 |
221 | -- Listing 18-10. Data belongs to same group (session) as long as max 15 minutes between page visits
222 |
223 | select app_id, first_visit, last_visit, visits, client_ip
224 | from web_page_visits
225 | match_recognize (
226 | partition by app_id, client_ip
227 | order by visit_time
228 | measures
229 | first(visit_time) as first_visit
230 | , last(visit_time) as last_visit
231 | , count(*) as visits
232 | one row per match
233 | pattern (strt within_15_mins*)
234 | define
235 | within_15_mins as
236 | visit_time <= prev(visit_time) + interval '15' minute
237 | )
238 | order by app_id, first_visit, client_ip;
239 |
240 | -- Reversing the logic to look ahead instead of looking behind
241 |
242 | select app_id, first_visit, last_visit, visits, client_ip
243 | from web_page_visits
244 | match_recognize (
245 | partition by app_id, client_ip
246 | order by visit_time
247 | measures
248 | first(visit_time) as first_visit
249 | , last(visit_time) as last_visit
250 | , count(*) as visits
251 | one row per match
252 | pattern (has_15_mins_to_next* last_time)
253 | define
254 | has_15_mins_to_next as
255 | visit_time + interval '15' minute >= next(visit_time)
256 | )
257 | order by app_id, first_visit, client_ip;
258 |
259 | -- Listing 18-11. Sessions max one hour long since first page visit
260 |
261 | select app_id, first_visit, last_visit, visits, client_ip
262 | from web_page_visits
263 | match_recognize (
264 | partition by app_id, client_ip
265 | order by visit_time
266 | measures
267 | first(visit_time) as first_visit
268 | , last(visit_time) as last_visit
269 | , count(*) as visits
270 | one row per match
271 | pattern (same_hour+)
272 | define
273 | same_hour as
274 | visit_time <= first(visit_time) + interval '1' hour
275 | )
276 | order by app_id, first_visit, client_ip;
277 |
278 | /* ***************************************************** */
279 |
--------------------------------------------------------------------------------
/Chapter 19/ch19_merging_date_ranges.sql:
--------------------------------------------------------------------------------
1 | /* ***************************************************** **
2 | ch19_merging_date_ranges.sql
3 |
4 | Companion script for Practical Oracle SQL, Apress 2020
5 | by Kim Berg Hansen, https://www.kibeha.dk
6 | Use at your own risk
7 | *****************************************************
8 |
9 | Chapter 19
10 | Merging Date Ranges
11 |
12 | To be executed in schema PRACTICAL
13 | ** ***************************************************** */
14 |
15 | /* -----------------------------------------------------
16 | sqlcl formatting setup
17 | ----------------------------------------------------- */
18 |
19 | set pagesize 80
20 | set linesize 80
21 | set sqlformat ansiconsole
22 |
23 | alter session set nls_date_format = 'YYYY-MM-DD';
24 |
25 | /* -----------------------------------------------------
26 | Chapter 19 example code
27 | ----------------------------------------------------- */
28 |
29 | -- Listing 19-2. The hire periods data
30 |
31 | select
32 | ehp.emp_id
33 | , ehp.name
34 | , ehp.start_date
35 | , ehp.end_date
36 | , ehp.title
37 | from emp_hire_periods_with_name ehp
38 | order by ehp.emp_id, ehp.start_date;
39 |
40 | -- Listing 19-4. Querying hire periods table as of a specific date
41 |
42 | select
43 | ehp.emp_id
44 | , e.name
45 | , ehp.start_date
46 | , ehp.end_date
47 | , ehp.title
48 | from emp_hire_periods
49 | as of period for employed_in date '2010-07-01'
50 | ehp
51 | join employees e
52 | on e.id = ehp.emp_id
53 | order by ehp.emp_id, ehp.start_date;
54 |
55 | -- Querying as of another date
56 |
57 | select
58 | ehp.emp_id
59 | , e.name
60 | , ehp.start_date
61 | , ehp.end_date
62 | , ehp.title
63 | from emp_hire_periods
64 | as of period for employed_in date '2016-07-01'
65 | ehp
66 | join employees e
67 | on e.id = ehp.emp_id
68 | order by ehp.emp_id, ehp.start_date;
69 |
70 | -- Listing 19-5. Comparing start_date to end_date of the previous row
71 |
72 | select
73 | emp_id
74 | , name
75 | , start_date
76 | , end_date
77 | , jobs
78 | from emp_hire_periods_with_name
79 | match_recognize (
80 | partition by emp_id
81 | order by start_date, end_date
82 | measures
83 | max(name) as name
84 | , first(start_date) as start_date
85 | , last(end_date) as end_date
86 | , count(*) as jobs
87 | pattern (
88 | strt adjoin_or_overlap*
89 | )
90 | define
91 | adjoin_or_overlap as
92 | start_date <= prev(end_date)
93 | )
94 | order by emp_id, start_date;
95 |
96 | -- Trying to order by end_date first instead of start_date
97 |
98 | select
99 | emp_id
100 | , name
101 | , start_date
102 | , end_date
103 | , jobs
104 | from emp_hire_periods_with_name
105 | match_recognize (
106 | partition by emp_id
107 | order by end_date, start_date
108 | measures
109 | max(name) as name
110 | , first(start_date) as start_date
111 | , last(end_date) as end_date
112 | , count(*) as jobs
113 | pattern (
114 | strt adjoin_or_overlap*
115 | )
116 | define
117 | adjoin_or_overlap as
118 | start_date <= prev(end_date)
119 | )
120 | order by emp_id, start_date;
121 |
122 | -- Attempting to compare start_date with the highest end_date so far
123 | -- This does not work and depending on which DB version and which client
124 | -- you can risk that your session crashes with one of these errors:
125 | -- ORA-03113: end-of-file on communication channel
126 | -- java.lang.NullPointerException
127 | -- Do not call this statement, it just illustrates a point
128 | /*
129 | select
130 | emp_id
131 | , name
132 | , start_date
133 | , end_date
134 | , jobs
135 | from emp_hire_periods_with_name
136 | match_recognize (
137 | partition by emp_id
138 | order by start_date, end_date
139 | measures
140 | max(name) as name
141 | , first(start_date) as start_date
142 | , max(end_date) as end_date
143 | , count(*) as jobs
144 | pattern (
145 | strt adjoin_or_overlap*
146 | )
147 | define
148 | adjoin_or_overlap as
149 | start_date <= max(end_date)
150 | )
151 | order by emp_id, start_date;
152 | */
153 |
154 | -- Listing 19-6. Comparing start_date of next row to highest end_date seen so far
155 |
156 | select
157 | emp_id
158 | , name
159 | , start_date
160 | , end_date
161 | , jobs
162 | from emp_hire_periods_with_name
163 | match_recognize (
164 | partition by emp_id
165 | order by start_date, end_date
166 | measures
167 | max(name) as name
168 | , first(start_date) as start_date
169 | , max(end_date) as end_date
170 | , count(*) as jobs
171 | pattern (
172 | adjoin_or_overlap* last_row
173 | )
174 | define
175 | adjoin_or_overlap as
176 | next(start_date) <= max(end_date)
177 | )
178 | order by emp_id, start_date;
179 |
180 | -- Listing 19-7. Handling null=infinity for both start and end
181 |
182 | select
183 | emp_id
184 | , name
185 | , start_date
186 | , end_date
187 | , jobs
188 | from emp_hire_periods_with_name
189 | match_recognize (
190 | partition by emp_id
191 | order by start_date nulls first, end_date nulls last
192 | measures
193 | max(name) as name
194 | , first(start_date) as start_date
195 | , nullif(
196 | max(nvl(end_date, date '9999-12-31'))
197 | , date '9999-12-31'
198 | ) as end_date
199 | , count(*) as jobs
200 | pattern (
201 | adjoin_or_overlap* last_row
202 | )
203 | define
204 | adjoin_or_overlap as
205 | nvl(next(start_date), date '-4712-01-01')
206 | <= max(nvl(end_date, date '9999-12-31'))
207 | )
208 | order by emp_id, start_date;
209 |
210 | /* ***************************************************** */
211 |
--------------------------------------------------------------------------------
/Chapter 2/ch02_pitfalls_of_set_operations.sql:
--------------------------------------------------------------------------------
1 | /* ***************************************************** **
2 | ch02_pitfalls_of_set_operations.sql
3 |
4 | Companion script for Practical Oracle SQL, Apress 2020
5 | by Kim Berg Hansen, https://www.kibeha.dk
6 | Use at your own risk
7 | *****************************************************
8 |
9 | Chapter 2
10 | Pitfalls of Set Operations
11 |
12 | To be executed in schema PRACTICAL
13 | ** ***************************************************** */
14 |
15 | /* -----------------------------------------------------
16 | sqlcl formatting setup
17 | ----------------------------------------------------- */
18 |
19 | -- Unlike most other chapters, this chapter manually formats
20 | -- columns instead of using sqlformat ansiconsole
21 |
22 | set pagesize 80
23 | set linesize 80
24 | set sqlformat
25 | alter session set nls_date_format = 'YYYY-MM-DD';
26 |
27 | column c_id format 99999
28 | column customer_name format a15
29 | column b_id format 99999
30 | column brewery_name format a18
31 | column p_id format 9999
32 | column product_name format a17
33 | column c_or_b_id format 99999
34 | column c_or_b_name format a18
35 | column ordered format a10
36 | column qty format 999
37 | column product_coll format a40
38 | column multiset_coll format a60
39 | column rn format 9
40 |
41 | /* -----------------------------------------------------
42 | Chapter 2 example code
43 | ----------------------------------------------------- */
44 |
45 | -- Listing 2-2. Data for two customers and their orders
46 |
47 | select
48 | customer_id as c_id, customer_name, ordered
49 | , product_id as p_id, product_name , qty
50 | from customer_order_products
51 | where customer_id in (50042, 50741)
52 | order by customer_id, product_id;
53 |
54 | -- Listing 2-3. Data for two breweries and the products bought from them
55 |
56 | select
57 | brewery_id as b_id, brewery_name
58 | , product_id as p_id, product_name
59 | from brewery_products
60 | where brewery_id in (518, 523)
61 | order by brewery_id, product_id;
62 |
63 | -- Listing 2-4. Concatenating the results of two queries
64 |
65 | select product_id as p_id, product_name
66 | from customer_order_products
67 | where customer_id = 50741
68 | union all
69 | select product_id as p_id, product_name
70 | from brewery_products
71 | where brewery_id = 523;
72 |
73 | -- Listing 2-5. Different columns from the two queries
74 |
75 | select
76 | customer_id as c_or_b_id, customer_name as c_or_b_name
77 | , product_id as p_id, product_name
78 | from customer_order_products
79 | where customer_id = 50741
80 | union all
81 | select
82 | brewery_id, brewery_name
83 | , product_id as p_id, product_name
84 | from brewery_products
85 | where brewery_id = 523;
86 |
87 | -- Attempting to order by a table column leads to ORA-00904: "PRODUCT_ID": invalid identifier
88 |
89 | select
90 | customer_id as c_or_b_id, customer_name as c_or_b_name
91 | , product_id as p_id, product_name
92 | from customer_order_products
93 | where customer_id = 50741
94 | union all
95 | select
96 | brewery_id, brewery_name
97 | , product_id as p_id, product_name
98 | from brewery_products
99 | where brewery_id = 523
100 | order by product_id;
101 |
102 | -- Ordering by column alias works
103 |
104 | select
105 | customer_id as c_or_b_id, customer_name as c_or_b_name
106 | , product_id as p_id, product_name
107 | from customer_order_products
108 | where customer_id = 50741
109 | union all
110 | select
111 | brewery_id, brewery_name
112 | , product_id as p_id, product_name
113 | from brewery_products
114 | where brewery_id = 523
115 | order by p_id;
116 |
117 | -- Listing 2-6. Union is a true set operation that implicitly performs a distinct of the query result
118 |
119 | select product_id as p_id, product_name
120 | from customer_order_products
121 | where customer_id = 50741
122 | union
123 | select product_id as p_id, product_name
124 | from brewery_products
125 | where brewery_id = 523
126 | order by p_id;
127 |
128 | -- Where union is the distinct joined results, intersect is the distinct common results
129 |
130 | select product_id as p_id, product_name
131 | from customer_order_products
132 | where customer_id = 50741
133 | intersect
134 | select product_id as p_id, product_name
135 | from brewery_products
136 | where brewery_id = 523
137 | order by p_id;
138 |
139 | -- Minus is the set subtraction - also known as except
140 |
141 | select product_id as p_id, product_name
142 | from customer_order_products
143 | where customer_id = 50741
144 | minus
145 | select product_id as p_id, product_name
146 | from brewery_products
147 | where brewery_id = 523
148 | order by p_id;
149 |
150 | -- Listing 2-7. The customer product data viewed as a collection type
151 |
152 | select
153 | customer_id as c_id, customer_name
154 | , product_coll
155 | from customer_order_products_obj
156 | where customer_id in (50042, 50741)
157 | order by customer_id;
158 |
159 | -- Listing 2-8. Doing union as a multiset operation on the collections
160 |
161 | select
162 | whitehart.product_coll
163 | multiset union
164 | hyggehumle.product_coll
165 | as multiset_coll
166 | from customer_order_products_obj whitehart
167 | cross join customer_order_products_obj hyggehumle
168 | where whitehart.customer_id = 50042
169 | and hyggehumle.customer_id = 50741;
170 |
171 | -- Multiset union all is the same as multiset union
172 |
173 | select
174 | whitehart.product_coll
175 | multiset union all
176 | hyggehumle.product_coll
177 | as multiset_coll
178 | from customer_order_products_obj whitehart
179 | cross join customer_order_products_obj hyggehumle
180 | where whitehart.customer_id = 50042
181 | and hyggehumle.customer_id = 50741;
182 |
183 | -- Multiset union distinct exists too
184 |
185 | select
186 | whitehart.product_coll
187 | multiset union distinct
188 | hyggehumle.product_coll
189 | as multiset_coll
190 | from customer_order_products_obj whitehart
191 | cross join customer_order_products_obj hyggehumle
192 | where whitehart.customer_id = 50042
193 | and hyggehumle.customer_id = 50741;
194 |
195 | -- For multiset an intersect all is possible
196 |
197 | select
198 | whitehart.product_coll
199 | multiset intersect all
200 | hyggehumle.product_coll
201 | as multiset_coll
202 | from customer_order_products_obj whitehart
203 | cross join customer_order_products_obj hyggehumle
204 | where whitehart.customer_id = 50042
205 | and hyggehumle.customer_id = 50741;
206 |
207 | -- As well as an intersect distinct
208 |
209 | select
210 | whitehart.product_coll
211 | multiset intersect distinct
212 | hyggehumle.product_coll
213 | as multiset_coll
214 | from customer_order_products_obj whitehart
215 | cross join customer_order_products_obj hyggehumle
216 | where whitehart.customer_id = 50042
217 | and hyggehumle.customer_id = 50741;
218 |
219 | -- Naturally there is an except all as well
220 |
221 | select
222 | whitehart.product_coll
223 | multiset except all
224 | hyggehumle.product_coll
225 | as multiset_coll
226 | from customer_order_products_obj whitehart
227 | cross join customer_order_products_obj hyggehumle
228 | where whitehart.customer_id = 50042
229 | and hyggehumle.customer_id = 50741;
230 |
231 | -- More interesting results of the reversed except all
232 |
233 | select
234 | hyggehumle.product_coll
235 | multiset except all
236 | whitehart.product_coll
237 | as multiset_coll
238 | from customer_order_products_obj whitehart
239 | cross join customer_order_products_obj hyggehumle
240 | where whitehart.customer_id = 50042
241 | and hyggehumle.customer_id = 50741;
242 |
243 | -- Except distinct result shows distinct is performed before set subtraction
244 |
245 | select
246 | hyggehumle.product_coll
247 | multiset except distinct
248 | whitehart.product_coll
249 | as multiset_coll
250 | from customer_order_products_obj whitehart
251 | cross join customer_order_products_obj hyggehumle
252 | where whitehart.customer_id = 50042
253 | and hyggehumle.customer_id = 50741;
254 |
255 | -- Listing 2-9. Minus is like multiset except distinct
256 |
257 | select product_id as p_id, product_name
258 | from customer_order_products
259 | where customer_id = 50741
260 | minus
261 | select product_id as p_id, product_name
262 | from customer_order_products
263 | where customer_id = 50042
264 | order by p_id;
265 |
266 | -- Listing 2-10. Emulating minus all using multiset except all
267 |
268 | select
269 | minus_all_table.id as p_id
270 | , minus_all_table.name as product_name
271 | from table(
272 | cast(
273 | multiset(
274 | select product_id, product_name
275 | from customer_order_products
276 | where customer_id = 50741
277 | )
278 | as id_name_coll_type
279 | )
280 | multiset except all
281 | cast(
282 | multiset(
283 | select product_id, product_name
284 | from customer_order_products
285 | where customer_id = 50042
286 | )
287 | as id_name_coll_type
288 | )
289 | ) minus_all_table
290 | order by p_id;
291 |
292 | -- Listing 2-11. Emulating minus all using analytic row_number function
293 |
294 | select
295 | product_id as p_id
296 | , product_name
297 | , row_number() over (
298 | partition by product_id, product_name
299 | order by rownum
300 | ) as rn
301 | from customer_order_products
302 | where customer_id = 50741
303 | minus
304 | select
305 | product_id as p_id
306 | , product_name
307 | , row_number() over (
308 | partition by product_id, product_name
309 | order by rownum
310 | ) as rn
311 | from customer_order_products
312 | where customer_id = 50042
313 | order by p_id;
314 |
315 | /* ***************************************************** */
316 |
--------------------------------------------------------------------------------
/Chapter 20/ch20_finding_abnormal_peaks.sql:
--------------------------------------------------------------------------------
1 | /* ***************************************************** **
2 | ch20_finding_abnormal_peaks.sql
3 |
4 | Companion script for Practical Oracle SQL, Apress 2020
5 | by Kim Berg Hansen, https://www.kibeha.dk
6 | Use at your own risk
7 | *****************************************************
8 |
9 | Chapter 20
10 | Finding Abnormal Peaks
11 |
12 | To be executed in schema PRACTICAL
13 | ** ***************************************************** */
14 |
15 | /* -----------------------------------------------------
16 | sqlcl formatting setup
17 | ----------------------------------------------------- */
18 |
19 | set pagesize 80
20 | set linesize 80
21 | set sqlformat ansiconsole
22 |
23 | alter session set nls_date_format = 'YYYY-MM-DD';
24 |
25 | /* -----------------------------------------------------
26 | Chapter 20 example code
27 | ----------------------------------------------------- */
28 |
29 | -- Listing 20-2. The pages in my webshop app
30 |
31 | select
32 | p.app_id
33 | , a.name as app_name
34 | , p.page_no
35 | , p.friendly_url
36 | from web_apps a
37 | join web_pages p
38 | on p.app_id = a.id
39 | order by p.app_id, p.page_no;
40 |
41 | -- Listing 20-3. Web page counter history data
42 |
43 | select
44 | friendly_url, day, counter
45 | from web_page_counter_hist
46 | where app_id = 542
47 | order by page_no, day;
48 |
49 | -- Listing 20-4. Recognizing days where counter grew by at least 200
50 |
51 | select
52 | url, from_day, to_day, days, begin, growth, daily
53 | from web_page_counter_hist
54 | match_recognize(
55 | partition by page_no
56 | order by day
57 | measures
58 | first(friendly_url) as url
59 | , first(day) as from_day
60 | , last(day) as to_day
61 | , count(*) as days
62 | , first(counter) as begin
63 | , next(counter) - first(counter) as growth
64 | , (next(counter) - first(counter)) / count(*)
65 | as daily
66 | one row per match
67 | after match skip past last row
68 | pattern ( peak+ )
69 | define
70 | peak as next(counter) - counter >= 200
71 | )
72 | order by page_no, from_day;
73 |
74 | -- Explicitly using final and last
75 |
76 | select
77 | url, from_day, to_day, days, begin, growth, daily
78 | from web_page_counter_hist
79 | match_recognize(
80 | partition by page_no
81 | order by day
82 | measures
83 | first(friendly_url) as url
84 | , first(day) as from_day
85 | , final last(day) as to_day
86 | , final count(*) as days
87 | , first(counter) as begin
88 | , next(final last(counter)) - first(counter) as growth
89 | , (next(final last(counter)) - first(counter))
90 | / final count(*) as daily
91 | one row per match
92 | after match skip past last row
93 | pattern ( peak+ )
94 | define
95 | peak as next(counter) - counter >= 200
96 | )
97 | order by page_no, from_day;
98 |
99 | -- Listing 20-5. Recognizing days where counter grew by at least 4 percent
100 |
101 | select
102 | url, from_day, to_day, days, begin, pct, daily
103 | from web_page_counter_hist
104 | match_recognize(
105 | partition by page_no
106 | order by day
107 | measures
108 | first(friendly_url) as url
109 | , first(day) as from_day
110 | , final last(day) as to_day
111 | , final count(*) as days
112 | , first(counter) as begin
113 | , round(
114 | 100 * (next(final last(counter)) / first(counter))
115 | - 100
116 | , 1
117 | ) as pct
118 | , round(
119 | (100 * (next(final last(counter)) / first(counter))
120 | - 100) / final count(*)
121 | , 1
122 | ) as daily
123 | one row per match
124 | after match skip past last row
125 | pattern ( peak+ )
126 | define
127 | peak as next(counter) / counter >= 1.04
128 | )
129 | order by page_no, from_day;
130 |
131 | -- Periods where counter grew by by at least 4% on average per day
132 |
133 | select
134 | url, from_day, to_day, days, begin, pct, daily
135 | from web_page_counter_hist
136 | match_recognize(
137 | partition by page_no
138 | order by day
139 | measures
140 | first(friendly_url) as url
141 | , first(day) as from_day
142 | , final last(day) as to_day
143 | , final count(*) as days
144 | , first(counter) as begin
145 | , round(
146 | 100 * (next(final last(counter)) / first(counter))
147 | - 100
148 | , 1
149 | ) as pct
150 | , round(
151 | (100 * (next(final last(counter)) / first(counter))
152 | - 100) / final count(*)
153 | , 1
154 | ) as daily
155 | one row per match
156 | after match skip past last row
157 | pattern ( peak+ )
158 | define
159 | peak as ((next(counter) / first(counter)) - 1)
160 | / running count(*) >= 0.04
161 | )
162 | order by page_no, from_day;
163 |
164 | -- Listing 20-6. Focusing on daily visits
165 |
166 | select
167 | friendly_url, day
168 | , lead(counter) over (
169 | partition by page_no order by day
170 | ) - counter as visits
171 | from web_page_counter_hist
172 | order by page_no, day;
173 |
174 | -- Listing 20-7. Daily visits at least 50 higher than previous day
175 |
176 | select
177 | url, from_day, to_day, days, begin, p_v, f_v, t_v, d_v
178 | from web_page_counter_hist
179 | match_recognize(
180 | partition by page_no
181 | order by day
182 | measures
183 | first(friendly_url) as url
184 | , first(day) as from_day
185 | , final last(day) as to_day
186 | , final count(*) as days
187 | , first(counter) as begin
188 | , first(counter) - prev(first(counter)) as p_v
189 | , next(first(counter)) - first(counter) as f_v
190 | , next(final last(counter)) - first(counter) as t_v
191 | , round(
192 | (next(final last(counter)) - first(counter))
193 | / final count(*)
194 | , 1
195 | ) as d_v
196 | one row per match
197 | after match skip past last row
198 | pattern ( peak+ )
199 | define
200 | peak as next(counter) - counter
201 | - (first(counter) - prev(first(counter))) >= 50
202 | )
203 | order by page_no, from_day;
204 |
205 | -- Listing 20-8. Pre-calculating visits for simplifying code
206 |
207 | select
208 | url, from_day, to_day, days, begin, p_v, f_v, t_v, d_v
209 | from (
210 | select
211 | page_no, friendly_url, day, counter
212 | , lead(counter) over (
213 | partition by page_no order by day
214 | ) - counter as visits
215 | from web_page_counter_hist
216 | )
217 | match_recognize(
218 | partition by page_no
219 | order by day
220 | measures
221 | first(friendly_url) as url
222 | , first(day) as from_day
223 | , final last(day) as to_day
224 | , final count(*) as days
225 | , first(counter) as begin
226 | , prev(first(visits)) as p_v
227 | , first(visits) as f_v
228 | , final sum(visits) as t_v
229 | , round(final avg(visits)) as d_v
230 | one row per match
231 | after match skip past last row
232 | pattern ( peak+ )
233 | define
234 | peak as visits - prev(first(visits)) >= 50
235 | )
236 | order by page_no, from_day;
237 |
238 | -- Listing 20-9. Daily visits at least 50% higher than previous day
239 |
240 | select
241 | url, from_day, to_day, days, begin, p_v, f_v, t_v, d_pct
242 | from (
243 | select
244 | page_no, friendly_url, day, counter
245 | , lead(counter) over (
246 | partition by page_no order by day
247 | ) - counter as visits
248 | from web_page_counter_hist
249 | )
250 | match_recognize(
251 | partition by page_no
252 | order by day
253 | measures
254 | first(friendly_url) as url
255 | , first(day) as from_day
256 | , final last(day) as to_day
257 | , final count(*) as days
258 | , first(counter) as begin
259 | , prev(first(visits)) as p_v
260 | , first(visits) as f_v
261 | , final sum(visits) as t_v
262 | , round(
263 | (100*(final sum(visits) / prev(first(visits))) - 100)
264 | / final count(*)
265 | , 1
266 | ) as d_pct
267 | one row per match
268 | after match skip past last row
269 | pattern ( peak+ )
270 | define
271 | peak as visits / nullif(prev(first(visits)), 0) >= 1.5
272 | )
273 | order by page_no, from_day;
274 |
275 | -- Listing 20-10. Daily visits at least 50% higher than average
276 |
277 | select
278 | url, avg_v, from_day, to_day, days, t_v, d_v, d_pct
279 | from (
280 | select
281 | page_no, friendly_url, day, counter, visits
282 | , avg(visits) over (
283 | partition by page_no
284 | ) as avg_visits
285 | from (
286 | select
287 | page_no, friendly_url, day, counter
288 | , lead(counter) over (
289 | partition by page_no order by day
290 | ) - counter as visits
291 | from web_page_counter_hist
292 | )
293 | )
294 | match_recognize(
295 | partition by page_no
296 | order by day
297 | measures
298 | first(friendly_url) as url
299 | , round(first(avg_visits), 1) as avg_v
300 | , first(day) as from_day
301 | , final last(day) as to_day
302 | , final count(*) as days
303 | , final sum(visits) as t_v
304 | , round(final avg(visits), 1) as d_v
305 | , round(
306 | (100 * final avg(visits) / avg_visits) - 100
307 | , 1
308 | ) as d_pct
309 | one row per match
310 | after match skip past last row
311 | pattern ( peak+ )
312 | define
313 | peak as visits / avg_visits >= 1.5
314 | )
315 | order by page_no, from_day;
316 |
317 | -- Daily visits at least 80% less than average
318 |
319 | select
320 | url, avg_v, from_day, to_day, days, t_v, d_v, d_pct
321 | from (
322 | select
323 | page_no, friendly_url, day, counter, visits
324 | , avg(visits) over (
325 | partition by page_no
326 | ) as avg_visits
327 | from (
328 | select
329 | page_no, friendly_url, day, counter
330 | , lead(counter) over (
331 | partition by page_no order by day
332 | ) - counter as visits
333 | from web_page_counter_hist
334 | )
335 | )
336 | match_recognize(
337 | partition by page_no
338 | order by day
339 | measures
340 | first(friendly_url) as url
341 | , round(first(avg_visits), 1) as avg_v
342 | , first(day) as from_day
343 | , final last(day) as to_day
344 | , final count(*) as days
345 | , final sum(visits) as t_v
346 | , round(final avg(visits), 1) as d_v
347 | , round(
348 | (100 * final avg(visits) / avg_visits) - 100
349 | , 1
350 | ) as d_pct
351 | one row per match
352 | after match skip past last row
353 | pattern ( peak+ )
354 | define
355 | peak as visits / avg_visits <= 0.2
356 | )
357 | order by page_no, from_day;
358 |
359 | -- Listing 20-11. Finding multiple peak classifications simultaneously
360 |
361 | select
362 | url, avg_v, from_day, days, class, t_v, d_v, d_pct
363 | from (
364 | select
365 | page_no, friendly_url, day, counter, visits
366 | , avg(visits) over (
367 | partition by page_no
368 | ) as avg_visits
369 | from (
370 | select
371 | page_no, friendly_url, day, counter
372 | , lead(counter) over (
373 | partition by page_no order by day
374 | ) - counter as visits
375 | from web_page_counter_hist
376 | )
377 | )
378 | match_recognize(
379 | partition by page_no
380 | order by day
381 | measures
382 | first(friendly_url) as url
383 | , round(first(avg_visits), 1) as avg_v
384 | , first(day) as from_day
385 | , final count(*) as days
386 | , classifier() as class
387 | , final sum(visits) as t_v
388 | , round(final avg(visits), 1) as d_v
389 | , round(
390 | (100 * final avg(visits) / avg_visits) - 100
391 | , 1
392 | ) as d_pct
393 | one row per match
394 | after match skip past last row
395 | pattern ( high{1,} | medium{2,} | low{3,} )
396 | define
397 | high as visits / avg_visits >= 4
398 | , medium as visits / avg_visits >= 2
399 | , low as visits / avg_visits >= 1.1
400 | )
401 | order by page_no, from_day;
402 |
403 | -- Listing 20-12. Finding peaks of a particular shape
404 |
405 | select
406 | url, avg_v, from_day, days, hi, med, low, t_v, d_v, d_pct
407 | from (
408 | select
409 | page_no, friendly_url, day, counter, visits
410 | , avg(visits) over (
411 | partition by page_no
412 | ) as avg_visits
413 | from (
414 | select
415 | page_no, friendly_url, day, counter
416 | , lead(counter) over (
417 | partition by page_no order by day
418 | ) - counter as visits
419 | from web_page_counter_hist
420 | )
421 | )
422 | match_recognize(
423 | partition by page_no
424 | order by day
425 | measures
426 | first(friendly_url) as url
427 | , round(first(avg_visits), 1) as avg_v
428 | , first(day) as from_day
429 | , final count(*) as days
430 | , final count(high.*) as hi
431 | , final count(medium.*) as med
432 | , final count(low.*) as low
433 | , final sum(visits) as t_v
434 | , round(final avg(visits), 1) as d_v
435 | , round(
436 | (100 * final avg(visits) / avg_visits) - 100
437 | , 1
438 | ) as d_pct
439 | one row per match
440 | after match skip past last row
441 | pattern ( high+ medium+ low+ )
442 | define
443 | high as visits / avg_visits >= 2.5
444 | , medium as visits / avg_visits >= 1.5
445 | , low as visits / avg_visits >= 1.1
446 | )
447 | order by page_no, from_day;
448 |
449 | /* ***************************************************** */
450 |
--------------------------------------------------------------------------------
/Chapter 21/ch21_bin_fitting.sql:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Apress/practical-oracle-sql/5bbc0d14e9d0c165156e194bb171c2a2d5c5729f/Chapter 21/ch21_bin_fitting.sql
--------------------------------------------------------------------------------
/Chapter 22/ch22_counting_children_in_trees.sql:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Apress/practical-oracle-sql/5bbc0d14e9d0c165156e194bb171c2a2d5c5729f/Chapter 22/ch22_counting_children_in_trees.sql
--------------------------------------------------------------------------------
/Chapter 3/ch03_divide_and_conquer_with_subquery_factoring.sql:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Apress/practical-oracle-sql/5bbc0d14e9d0c165156e194bb171c2a2d5c5729f/Chapter 3/ch03_divide_and_conquer_with_subquery_factoring.sql
--------------------------------------------------------------------------------
/Chapter 4/ch04_tree_calculations_with_recursion.sql:
--------------------------------------------------------------------------------
1 | /* ***************************************************** **
2 | ch04_tree_calculations_with_recursion.sql
3 |
4 | Companion script for Practical Oracle SQL, Apress 2020
5 | by Kim Berg Hansen, https://www.kibeha.dk
6 | Use at your own risk
7 | *****************************************************
8 |
9 | Chapter 4
10 | Tree Calculations with Recursion
11 |
12 | To be executed in schema PRACTICAL
13 | ** ***************************************************** */
14 |
15 | /* -----------------------------------------------------
16 | sqlcl formatting setup
17 | ----------------------------------------------------- */
18 |
19 | set pagesize 80
20 | set linesize 80
21 | set sqlformat ansiconsole
22 |
23 | /* -----------------------------------------------------
24 | Chapter 4 example code
25 | ----------------------------------------------------- */
26 |
27 | -- Listing 4-1. The hierarchical relations of the different packaging types
28 |
29 | select
30 | p.id as p_id
31 | , lpad(' ', 2*(level-1)) || p.name as p_name
32 | , c.id as c_id
33 | , c.name as c_name
34 | , pr.qty
35 | from packaging_relations pr
36 | join packaging p
37 | on p.id = pr.packaging_id
38 | join packaging c
39 | on c.id = pr.contains_id
40 | start with pr.packaging_id not in (
41 | select c.contains_id from packaging_relations c
42 | )
43 | connect by pr.packaging_id = prior pr.contains_id
44 | order siblings by pr.contains_id;
45 |
46 | -- Listing 4-2. First attempt at multiplication of quantities
47 |
48 | select
49 | connect_by_root p.id as p_id
50 | , connect_by_root p.name as p_name
51 | , c.id as c_id
52 | , c.name as c_name
53 | , ltrim(sys_connect_by_path(pr.qty, '*'), '*') as qty_expr
54 | , qty * prior qty as qty_mult
55 | from packaging_relations pr
56 | join packaging p
57 | on p.id = pr.packaging_id
58 | join packaging c
59 | on c.id = pr.contains_id
60 | where connect_by_isleaf = 1
61 | start with pr.packaging_id not in (
62 | select c.contains_id from packaging_relations c
63 | )
64 | connect by pr.packaging_id = prior pr.contains_id
65 | order siblings by pr.contains_id;
66 |
67 | -- Listing 4-3. Multiplication of quantities with recursive subquery factoring
68 |
69 | with recursive_pr (
70 | packaging_id, contains_id, qty, lvl
71 | ) as (
72 | select
73 | pr.packaging_id
74 | , pr.contains_id
75 | , pr.qty
76 | , 1 as lvl
77 | from packaging_relations pr
78 | where pr.packaging_id not in (
79 | select c.contains_id from packaging_relations c
80 | )
81 | union all
82 | select
83 | pr.packaging_id
84 | , pr.contains_id
85 | , rpr.qty * pr.qty as qty
86 | , rpr.lvl + 1 as lvl
87 | from recursive_pr rpr
88 | join packaging_relations pr
89 | on pr.packaging_id = rpr.contains_id
90 | )
91 | search depth first by contains_id set rpr_order
92 | select
93 | p.id as p_id
94 | , lpad(' ', 2*(rpr.lvl-1)) || p.name as p_name
95 | , c.id as c_id
96 | , c.name as c_name
97 | , rpr.qty
98 | from recursive_pr rpr
99 | join packaging p
100 | on p.id = rpr.packaging_id
101 | join packaging c
102 | on c.id = rpr.contains_id
103 | order by rpr.rpr_order;
104 |
105 | -- Listing 4-4. Finding leaves in recursive subquery factoring
106 |
107 | with recursive_pr (
108 | root_id, packaging_id, contains_id, qty, lvl
109 | ) as (
110 | select
111 | pr.packaging_id as root_id
112 | , pr.packaging_id
113 | , pr.contains_id
114 | , pr.qty
115 | , 1 as lvl
116 | from packaging_relations pr
117 | where pr.packaging_id not in (
118 | select c.contains_id from packaging_relations c
119 | )
120 | union all
121 | select
122 | rpr.root_id
123 | , pr.packaging_id
124 | , pr.contains_id
125 | , rpr.qty * pr.qty as qty
126 | , rpr.lvl + 1 as lvl
127 | from recursive_pr rpr
128 | join packaging_relations pr
129 | on pr.packaging_id = rpr.contains_id
130 | )
131 | search depth first by contains_id set rpr_order
132 | select
133 | p.id as p_id
134 | , p.name as p_name
135 | , c.id as c_id
136 | , c.name as c_name
137 | , leaf.qty
138 | from (
139 | select
140 | rpr.*
141 | , case
142 | when nvl(
143 | lead(rpr.lvl) over (order by rpr.rpr_order)
144 | , 0
145 | ) > rpr.lvl
146 | then 0
147 | else 1
148 | end as is_leaf
149 | from recursive_pr rpr
150 | ) leaf
151 | join packaging p
152 | on p.id = leaf.root_id
153 | join packaging c
154 | on c.id = leaf.contains_id
155 | where leaf.is_leaf = 1
156 | order by leaf.rpr_order;
157 |
158 | -- Listing 4-5. Grouping totals for packaging combinations
159 |
160 | with recursive_pr (
161 | root_id, packaging_id, contains_id, qty, lvl
162 | ) as (
163 | select
164 | pr.packaging_id as root_id
165 | , pr.packaging_id
166 | , pr.contains_id
167 | , pr.qty
168 | , 1 as lvl
169 | from packaging_relations pr
170 | where pr.packaging_id not in (
171 | select c.contains_id from packaging_relations c
172 | )
173 | union all
174 | select
175 | rpr.root_id
176 | , pr.packaging_id
177 | , pr.contains_id
178 | , rpr.qty * pr.qty as qty
179 | , rpr.lvl + 1 as lvl
180 | from recursive_pr rpr
181 | join packaging_relations pr
182 | on pr.packaging_id = rpr.contains_id
183 | )
184 | search depth first by contains_id set rpr_order
185 | select
186 | p.id as p_id
187 | , p.name as p_name
188 | , c.id as c_id
189 | , c.name as c_name
190 | , leaf.qty
191 | from (
192 | select
193 | root_id, contains_id, sum(qty) as qty
194 | from (
195 | select
196 | rpr.*
197 | , case
198 | when nvl(
199 | lead(rpr.lvl) over (order by rpr.rpr_order)
200 | , 0
201 | ) > rpr.lvl
202 | then 0
203 | else 1
204 | end as is_leaf
205 | from recursive_pr rpr
206 | )
207 | where is_leaf = 1
208 | group by root_id, contains_id
209 | ) leaf
210 | join packaging p
211 | on p.id = leaf.root_id
212 | join packaging c
213 | on c.id = leaf.contains_id
214 | order by p.id, c.id;
215 |
216 | -- Listing 4-6. Alternative method using dynamic evaluation function
217 |
218 | with
219 | function evaluate_expr(
220 | p_expr varchar2
221 | )
222 | return number
223 | is
224 | l_retval number;
225 | begin
226 | execute immediate
227 | 'select ' || p_expr || ' from dual'
228 | into l_retval;
229 | return l_retval;
230 | end;
231 | select
232 | connect_by_root p.id as p_id
233 | , connect_by_root p.name as p_name
234 | , c.id as c_id
235 | , c.name as c_name
236 | , ltrim(sys_connect_by_path(pr.qty, '*'), '*') as qty_expr
237 | , evaluate_expr(
238 | ltrim(sys_connect_by_path(pr.qty, '*'), '*')
239 | ) as qty_mult
240 | from packaging_relations pr
241 | join packaging p
242 | on p.id = pr.packaging_id
243 | join packaging c
244 | on c.id = pr.contains_id
245 | where connect_by_isleaf = 1
246 | start with pr.packaging_id not in (
247 | select c.contains_id from packaging_relations c
248 | )
249 | connect by pr.packaging_id = prior pr.contains_id
250 | order siblings by pr.contains_id;
251 | /
252 |
253 | /* ***************************************************** */
254 |
--------------------------------------------------------------------------------
/Chapter 5/ch05_functions_defined_within_sql.sql:
--------------------------------------------------------------------------------
1 | /* ***************************************************** **
2 | ch05_functions_defined_within_sql.sql
3 |
4 | Companion script for Practical Oracle SQL, Apress 2020
5 | by Kim Berg Hansen, https://www.kibeha.dk
6 | Use at your own risk
7 | *****************************************************
8 |
9 | Chapter 5
10 | Functions Defined Within SQL
11 |
12 | To be executed in schema PRACTICAL
13 | ** ***************************************************** */
14 |
15 | /* -----------------------------------------------------
16 | sqlcl formatting setup
17 | ----------------------------------------------------- */
18 |
19 | set pagesize 80
20 | set linesize 80
21 | set sqlformat ansiconsole
22 |
23 | /* -----------------------------------------------------
24 | Chapter 6 example code
25 | ----------------------------------------------------- */
26 |
27 | -- Listing 5-1. The alcohol data for the beers in the Stout product group
28 |
29 | select
30 | p.id as p_id
31 | , p.name
32 | , pa.sales_volume as vol
33 | , pa.abv
34 | from products p
35 | join product_alcohol pa
36 | on pa.product_id = p.id
37 | where p.group_id = 142
38 | order by p.id;
39 |
40 | -- Listing 5-2. Calculating blood alcohol concentration for male and female
41 |
42 | select
43 | p.id as p_id
44 | , p.name
45 | , pa.sales_volume as vol
46 | , pa.abv
47 | , round(
48 | 100 * (pa.sales_volume * pa.abv / 100 * 0.789)
49 | / (80 * 1000 * 0.68)
50 | , 3
51 | ) bac_m
52 | , round(
53 | 100 * (pa.sales_volume * pa.abv / 100 * 0.789)
54 | / (60 * 1000 * 0.55)
55 | , 3
56 | ) bac_f
57 | from products p
58 | join product_alcohol pa
59 | on pa.product_id = p.id
60 | where p.group_id = 142
61 | order by p.id;
62 |
63 | -- Listing 5-4. Querying male and female BAC using packaged formula
64 |
65 | select
66 | p.id as p_id
67 | , p.name
68 | , pa.sales_volume as vol
69 | , pa.abv
70 | , formulas.bac(pa.sales_volume, pa.abv, 80, 'M') bac_m
71 | , formulas.bac(pa.sales_volume, pa.abv, 60, 'F') bac_f
72 | from products p
73 | join product_alcohol pa
74 | on pa.product_id = p.id
75 | where p.group_id = 142
76 | order by p.id;
77 |
78 | -- Listing 5-5. Querying BAC with a function in the with clause
79 |
80 | with
81 | function bac (
82 | p_volume in number
83 | , p_abv in number
84 | , p_weight in number
85 | , p_gender in varchar2
86 | ) return number deterministic
87 | is
88 | begin
89 | return round(
90 | 100 * (p_volume * p_abv / 100 * 0.789)
91 | / (p_weight * 1000 * case p_gender
92 | when 'M' then 0.68
93 | when 'F' then 0.55
94 | end)
95 | , 3
96 | );
97 | end;
98 | select
99 | p.id as p_id
100 | , p.name
101 | , pa.sales_volume as vol
102 | , pa.abv
103 | , bac(pa.sales_volume, pa.abv, 80, 'M') bac_m
104 | , bac(pa.sales_volume, pa.abv, 60, 'F') bac_f
105 | from products p
106 | join product_alcohol pa
107 | on pa.product_id = p.id
108 | where p.group_id = 142
109 | order by p.id
110 | /
111 |
112 | -- Listing 5-6. Having multiple functions in one with clause
113 |
114 | with
115 | function gram_alcohol (
116 | p_volume in number
117 | , p_abv in number
118 | ) return number deterministic
119 | is
120 | begin
121 | return p_volume * p_abv / 100 * 0.789;
122 | end;
123 | function gram_body_fluid (
124 | p_weight in number
125 | , p_gender in varchar2
126 | ) return number deterministic
127 | is
128 | begin
129 | return p_weight * 1000 * case p_gender
130 | when 'M' then 0.68
131 | when 'F' then 0.55
132 | end;
133 | end;
134 | function bac (
135 | p_volume in number
136 | , p_abv in number
137 | , p_weight in number
138 | , p_gender in varchar2
139 | ) return number deterministic
140 | is
141 | begin
142 | return round(
143 | 100 * gram_alcohol(p_volume, p_abv)
144 | / gram_body_fluid(p_weight, p_gender)
145 | , 3
146 | );
147 | end;
148 | select
149 | p.id as p_id
150 | , p.name
151 | , pa.sales_volume as vol
152 | , pa.abv
153 | , bac(pa.sales_volume, pa.abv, 80, 'M') bac_m
154 | , bac(pa.sales_volume, pa.abv, 60, 'F') bac_f
155 | from products p
156 | join product_alcohol pa
157 | on pa.product_id = p.id
158 | where p.group_id = 142
159 | order by p.id
160 | /
161 |
162 | -- Listing 5-8. Querying BAC data using the view
163 |
164 | select
165 | p.id as p_id
166 | , p.name
167 | , pab.sales_volume as vol
168 | , pab.abv
169 | , pab.bac_m
170 | , pab.bac_f
171 | from products p
172 | join product_alcohol_bac pab
173 | on pab.product_id = p.id
174 | where p.group_id = 142
175 | order by p.id;
176 |
177 | /* ***************************************************** */
178 |
--------------------------------------------------------------------------------
/Chapter 6/ch06_iterative_calculations_with_multidimensional_data.sql:
--------------------------------------------------------------------------------
1 | /* ***************************************************** **
2 | ch06_iterative_calculations_with_multidimensional_data.sql
3 |
4 | Companion script for Practical Oracle SQL, Apress 2020
5 | by Kim Berg Hansen, https://www.kibeha.dk
6 | Use at your own risk
7 | *****************************************************
8 |
9 | Chapter 6
10 | Iterative Calculations with Multidimensional Data
11 |
12 | To be executed in schema PRACTICAL
13 | ** ***************************************************** */
14 |
15 | /* -----------------------------------------------------
16 | sqlcl formatting setup
17 | ----------------------------------------------------- */
18 |
19 | -- Unlike most other chapters, this chapter manually formats
20 | -- columns instead of using sqlformat ansiconsole
21 |
22 | set pagesize 80
23 | set linesize 80
24 | set sqlformat
25 | column generation format 99
26 | column x format 99
27 | column y format 99
28 | column alive format 9999
29 | column cells format a10
30 | column sum_alives format a10
31 | column nb_alives format a10
32 |
33 | /* -----------------------------------------------------
34 | Chapter 6 example code
35 | ----------------------------------------------------- */
36 |
37 | -- Listing 6-1. Creating a 10x10 generation zero population
38 |
39 | truncate table conway_gen_zero;
40 |
41 | insert into conway_gen_zero (x, y, alive)
42 | select * from (
43 | with numbers as (
44 | select level as n from dual
45 | connect by level <= 10
46 | ), grid as (
47 | select
48 | x.n as x
49 | , y.n as y
50 | from numbers x
51 | cross join numbers y
52 | ), start_cells as (
53 | select 4 x, 4 y from dual union all
54 | select 5 x, 4 y from dual union all
55 | select 4 x, 5 y from dual union all
56 | select 6 x, 6 y from dual union all
57 | select 7 x, 6 y from dual union all
58 | select 4 x, 7 y from dual union all
59 | select 5 x, 7 y from dual union all
60 | select 6 x, 7 y from dual
61 | )
62 | select
63 | g.x
64 | , g.y
65 | , nvl2(sc.x, 1, 0) as alive
66 | from grid g
67 | left outer join start_cells sc
68 | on sc.x = g.x
69 | and sc.y = g.y
70 | );
71 |
72 | commit;
73 |
74 | -- Listing 6-2. Vizualising generation zero
75 |
76 | select
77 | listagg(
78 | case alive
79 | when 1 then 'X'
80 | when 0 then ' '
81 | end
82 | ) within group (
83 | order by x
84 | ) as cells
85 | from conway_gen_zero
86 | group by y
87 | order by y;
88 |
89 | -- Listing 6-3. Live neighbour calculation with model clause
90 |
91 | select *
92 | from conway_gen_zero
93 | model
94 | dimension by (
95 | x, y
96 | )
97 | measures (
98 | alive
99 | , 0 as sum_alive
100 | , 0 as nb_alive
101 | )
102 | ignore nav
103 | rules
104 | (
105 | sum_alive[any, any] =
106 | sum(alive)[
107 | x between cv() - 1 and cv() + 1
108 | , y between cv() - 1 and cv() + 1
109 | ]
110 | , nb_alive[any, any] =
111 | sum_alive[cv(), cv()] - alive[cv(), cv()]
112 | )
113 | order by x, y;
114 |
115 | -- Listing 6-4. Live neighbour calculation with scalar subquery
116 |
117 | select
118 | x
119 | , y
120 | , alive
121 | , sum_alive
122 | , sum_alive - alive as nb_alive
123 | from (
124 | select
125 | x
126 | , y
127 | , alive
128 | , (
129 | select sum(gz2.alive)
130 | from conway_gen_zero gz2
131 | where gz2.x between gz.x - 1 and gz.x + 1
132 | and gz2.y between gz.y - 1 and gz.y + 1
133 | ) as sum_alive
134 | from conway_gen_zero gz
135 | )
136 | order by x, y;
137 |
138 | -- Listing 6-5. Displaying the counts grid fashion
139 |
140 | with conway as (
141 | select *
142 | from conway_gen_zero
143 | model
144 | dimension by (
145 | x, y
146 | )
147 | measures (
148 | alive
149 | , 0 as sum_alive
150 | , 0 as nb_alive
151 | )
152 | ignore nav
153 | rules
154 | (
155 | sum_alive[any, any] =
156 | sum(alive)[
157 | x between cv() - 1 and cv() + 1
158 | , y between cv() - 1 and cv() + 1
159 | ]
160 | , nb_alive[any, any] =
161 | sum_alive[cv(), cv()] - alive[cv(), cv()]
162 | )
163 | )
164 | select
165 | listagg(
166 | case alive
167 | when 1 then 'X'
168 | when 0 then ' '
169 | end
170 | ) within group (
171 | order by x
172 | ) cells
173 | , listagg(sum_alive) within group (order by x) sum_alives
174 | , listagg(nb_alive ) within group (order by x) nb_alives
175 | from conway
176 | group by y
177 | order by y;
178 |
179 | -- Variant of Listing 6-5 based on 6-4 instead of 6-3
180 |
181 | with conway as (
182 | select
183 | x
184 | , y
185 | , alive
186 | , sum_alive
187 | , sum_alive - alive as nb_alive
188 | from (
189 | select
190 | x
191 | , y
192 | , alive
193 | , (
194 | select sum(gz2.alive)
195 | from conway_gen_zero gz2
196 | where gz2.x between gz.x - 1 and gz.x + 1
197 | and gz2.y between gz.y - 1 and gz.y + 1
198 | ) as sum_alive
199 | from conway_gen_zero gz
200 | )
201 | )
202 | select
203 | listagg(
204 | case alive
205 | when 1 then 'X'
206 | when 0 then ' '
207 | end
208 | ) within group (
209 | order by x
210 | ) cells
211 | , listagg(sum_alive) within group (order by x) sum_alives
212 | , listagg(nb_alive ) within group (order by x) nb_alives
213 | from conway
214 | group by y
215 | order by y;
216 |
217 | -- Listing 6-6. Iterating 2 generations
218 |
219 | with conway as (
220 | select *
221 | from conway_gen_zero
222 | model
223 | dimension by (
224 | 0 as generation
225 | , x, y
226 | )
227 | measures (
228 | alive
229 | , 0 as sum_alive
230 | , 0 as nb_alive
231 | )
232 | ignore nav
233 | rules upsert all iterate (2)
234 | (
235 | sum_alive[iteration_number, any, any] =
236 | sum(alive)[
237 | generation = iteration_number
238 | , x between cv() - 1 and cv() + 1
239 | , y between cv() - 1 and cv() + 1
240 | ]
241 | , nb_alive[iteration_number, any, any] =
242 | sum_alive[iteration_number, cv(), cv()]
243 | - alive[iteration_number, cv(), cv()]
244 | , alive[iteration_number + 1, any, any] =
245 | case nb_alive[iteration_number, cv(), cv()]
246 | when 2 then alive[iteration_number, cv(), cv()]
247 | when 3 then 1
248 | else 0
249 | end
250 | )
251 | )
252 | select
253 | generation
254 | , listagg(
255 | case alive
256 | when 1 then 'X'
257 | when 0 then ' '
258 | end
259 | ) within group (
260 | order by x
261 | ) cells
262 | , listagg(sum_alive) within group (order by x) sum_alives
263 | , listagg(nb_alive ) within group (order by x) nb_alives
264 | from conway
265 | group by generation, y
266 | order by generation, y;
267 |
268 | -- Listing 6-7. Reducing the query
269 |
270 | with conway as (
271 | select *
272 | from conway_gen_zero
273 | model
274 | dimension by (
275 | 0 as generation
276 | , x, y
277 | )
278 | measures (
279 | alive
280 | )
281 | ignore nav
282 | rules upsert all iterate (2)
283 | (
284 | alive[iteration_number + 1, any, any] =
285 | case sum(alive)[
286 | generation = iteration_number,
287 | x between cv() - 1 and cv() + 1,
288 | y between cv() - 1 and cv() + 1
289 | ] - alive[iteration_number, cv(), cv()]
290 | when 2 then alive[iteration_number, cv(), cv()]
291 | when 3 then 1
292 | else 0
293 | end
294 | )
295 | )
296 | select
297 | generation
298 | , listagg(
299 | case alive
300 | when 1 then 'X'
301 | when 0 then ' '
302 | end
303 | ) within group (
304 | order by x
305 | ) cells
306 | from conway
307 | group by generation, y
308 | order by generation, y;
309 |
310 | -- 25 generations
311 |
312 | with conway as (
313 | select *
314 | from conway_gen_zero
315 | model
316 | dimension by (
317 | 0 as generation
318 | , x, y
319 | )
320 | measures (
321 | alive
322 | )
323 | ignore nav
324 | rules upsert all iterate (25)
325 | (
326 | alive[iteration_number + 1, any, any] =
327 | case sum(alive)[
328 | generation = iteration_number,
329 | x between cv() - 1 and cv() + 1,
330 | y between cv() - 1 and cv() + 1
331 | ] - alive[iteration_number, cv(), cv()]
332 | when 2 then alive[iteration_number, cv(), cv()]
333 | when 3 then 1
334 | else 0
335 | end
336 | )
337 | )
338 | select
339 | generation
340 | , listagg(
341 | case alive
342 | when 1 then 'X'
343 | when 0 then ' '
344 | end
345 | ) within group (
346 | order by x
347 | ) cells
348 | from conway
349 | group by generation, y
350 | order by generation, y;
351 |
352 | -- 50 generations
353 |
354 | with conway as (
355 | select *
356 | from conway_gen_zero
357 | model
358 | dimension by (
359 | 0 as generation
360 | , x, y
361 | )
362 | measures (
363 | alive
364 | )
365 | ignore nav
366 | rules upsert all iterate (50)
367 | (
368 | alive[iteration_number + 1, any, any] =
369 | case sum(alive)[
370 | generation = iteration_number,
371 | x between cv() - 1 and cv() + 1,
372 | y between cv() - 1 and cv() + 1
373 | ] - alive[iteration_number, cv(), cv()]
374 | when 2 then alive[iteration_number, cv(), cv()]
375 | when 3 then 1
376 | else 0
377 | end
378 | )
379 | )
380 | select
381 | generation
382 | , listagg(
383 | case alive
384 | when 1 then 'X'
385 | when 0 then ' '
386 | end
387 | ) within group (
388 | order by x
389 | ) cells
390 | from conway
391 | group by generation, y
392 | order by generation, y;
393 |
394 | -- Listing 6-8. The Toad
395 |
396 | truncate table conway_gen_zero;
397 |
398 | insert into conway_gen_zero (x, y, alive)
399 | select * from (
400 | with numbers as (
401 | select level as n from dual
402 | connect by level <= 6
403 | ), grid as (
404 | select
405 | x.n as x
406 | , y.n as y
407 | from numbers x
408 | cross join numbers y
409 | ), start_cells as (
410 | select 4 x, 2 y from dual union all
411 | select 2 x, 3 y from dual union all
412 | select 5 x, 3 y from dual union all
413 | select 2 x, 4 y from dual union all
414 | select 5 x, 4 y from dual union all
415 | select 3 x, 5 y from dual
416 | )
417 | select
418 | g.x
419 | , g.y
420 | , nvl2(sc.x, 1, 0) as alive
421 | from grid g
422 | left outer join start_cells sc
423 | on sc.x = g.x
424 | and sc.y = g.y
425 | );
426 |
427 | commit;
428 |
429 | -- 2 generations show generation zero and two are identical
430 |
431 | with conway as (
432 | select *
433 | from conway_gen_zero
434 | model
435 | dimension by (
436 | 0 as generation
437 | , x, y
438 | )
439 | measures (
440 | alive
441 | )
442 | ignore nav
443 | rules upsert all iterate (2)
444 | (
445 | alive[iteration_number + 1, any, any] =
446 | case sum(alive)[
447 | generation = iteration_number,
448 | x between cv() - 1 and cv() + 1,
449 | y between cv() - 1 and cv() + 1
450 | ] - alive[iteration_number, cv(), cv()]
451 | when 2 then alive[iteration_number, cv(), cv()]
452 | when 3 then 1
453 | else 0
454 | end
455 | )
456 | )
457 | select
458 | generation
459 | , listagg(
460 | case alive
461 | when 1 then 'X'
462 | when 0 then ' '
463 | end
464 | ) within group (
465 | order by x
466 | ) cells
467 | from conway
468 | group by generation, y
469 | order by generation, y;
470 |
471 | -- Oscillates between two states no matter how many generations
472 |
473 | with conway as (
474 | select *
475 | from conway_gen_zero
476 | model
477 | dimension by (
478 | 0 as generation
479 | , x, y
480 | )
481 | measures (
482 | alive
483 | )
484 | ignore nav
485 | rules upsert all iterate (20)
486 | (
487 | alive[iteration_number + 1, any, any] =
488 | case sum(alive)[
489 | generation = iteration_number,
490 | x between cv() - 1 and cv() + 1,
491 | y between cv() - 1 and cv() + 1
492 | ] - alive[iteration_number, cv(), cv()]
493 | when 2 then alive[iteration_number, cv(), cv()]
494 | when 3 then 1
495 | else 0
496 | end
497 | )
498 | )
499 | select
500 | generation
501 | , listagg(
502 | case alive
503 | when 1 then 'X'
504 | when 0 then ' '
505 | end
506 | ) within group (
507 | order by x
508 | ) cells
509 | from conway
510 | group by generation, y
511 | order by generation, y;
512 |
513 | /* ***************************************************** */
514 |
--------------------------------------------------------------------------------
/Chapter 7/ch07_unpivoting_columns_to_rows.sql:
--------------------------------------------------------------------------------
1 | /* ***************************************************** **
2 | ch07_unpivoting_columns_to_rows.sql
3 |
4 | Companion script for Practical Oracle SQL, Apress 2020
5 | by Kim Berg Hansen, https://www.kibeha.dk
6 | Use at your own risk
7 | *****************************************************
8 |
9 | Chapter 7
10 | Unpivoting Columns to Rows
11 |
12 | To be executed in schema PRACTICAL
13 | ** ***************************************************** */
14 |
15 | /* -----------------------------------------------------
16 | sqlcl formatting setup
17 | ----------------------------------------------------- */
18 |
19 | set pagesize 80
20 | set linesize 80
21 | set sqlformat ansiconsole
22 | alter session set nls_date_format = 'YYYY-MM-DD';
23 |
24 | /* -----------------------------------------------------
25 | Chapter 7 example code
26 | ----------------------------------------------------- */
27 |
28 | -- Listing 7-1. Daily web visits per device
29 |
30 | select day, pc, tablet, phone
31 | from web_devices
32 | order by day;
33 |
34 | -- Listing 7-2. Using unpivot to get dimension and measure
35 |
36 | select day, device, cnt
37 | from web_devices
38 | unpivot (
39 | cnt
40 | for device
41 | in (
42 | pc as 'PC'
43 | , tablet as 'Tablet'
44 | , phone as 'Phone'
45 | )
46 | )
47 | order by day, device;
48 |
49 | -- Listing 7-3. Manual unpivot using numbered row generator
50 |
51 | select
52 | wd.day
53 | , case r.rn
54 | when 1 then 'PC'
55 | when 2 then 'Tablet'
56 | when 3 then 'Phone'
57 | end as device
58 | , case r.rn
59 | when 1 then wd.pc
60 | when 2 then wd.tablet
61 | when 3 then wd.phone
62 | end as cnt
63 | from web_devices wd
64 | cross join (
65 | select level as rn from dual connect by level <= 3
66 | ) r
67 | order by day, device;
68 |
69 | -- Listing 7-4. Manual unpivot using dimension style row generator
70 |
71 | with devices( device ) as (
72 | select 'PC' from dual union all
73 | select 'Tablet' from dual union all
74 | select 'Phone' from dual
75 | )
76 | select
77 | wd.day
78 | , d.device
79 | , case d.device
80 | when 'PC' then wd.pc
81 | when 'Tablet' then wd.tablet
82 | when 'Phone' then wd.phone
83 | end as cnt
84 | from web_devices wd
85 | cross join devices d
86 | order by day, device;
87 |
88 | -- Listing 7-5. Daily web visits and purchases per gender and channel
89 |
90 | select
91 | day
92 | , m_tw_cnt
93 | , m_tw_qty
94 | , m_fb_cnt
95 | , m_fb_qty
96 | , f_tw_cnt
97 | , f_tw_qty
98 | , f_fb_cnt
99 | , f_fb_qty
100 | from web_demographics
101 | order by day;
102 |
103 | -- Listing 7-6. Using unpivot with two dimensions and two measures
104 |
105 | select day, gender, channel, cnt, qty
106 | from web_demographics
107 | unpivot (
108 | ( cnt, qty )
109 | for ( gender, channel )
110 | in (
111 | (m_tw_cnt, m_tw_qty) as ('Male' , 'Twitter' )
112 | , (m_fb_cnt, m_fb_qty) as ('Male' , 'Facebook')
113 | , (f_tw_cnt, f_tw_qty) as ('Female', 'Twitter' )
114 | , (f_fb_cnt, f_fb_qty) as ('Female', 'Facebook')
115 | )
116 | )
117 | order by day, gender, channel;
118 |
119 | -- Listing 7-7. Using unpivot with one composite dimension and two measures
120 |
121 | select day, gender_and_channel, cnt, qty
122 | from web_demographics
123 | unpivot (
124 | ( cnt, qty )
125 | for gender_and_channel
126 | in (
127 | (m_tw_cnt, m_tw_qty) as 'Male on Twitter'
128 | , (m_fb_cnt, m_fb_qty) as 'Male on Facebook'
129 | , (f_tw_cnt, f_tw_qty) as 'Female on Twitter'
130 | , (f_fb_cnt, f_fb_qty) as 'Female on Facebook'
131 | )
132 | )
133 | order by day, gender_and_channel;
134 |
135 | -- Listing 7-8. Using unpivot with one single dimension and two measures
136 |
137 | select day, gender, cnt, qty
138 | from web_demographics
139 | unpivot (
140 | ( cnt, qty )
141 | for gender
142 | in (
143 | (m_tw_cnt, m_tw_qty) as 'Male'
144 | , (m_fb_cnt, m_fb_qty) as 'Male'
145 | , (f_tw_cnt, f_tw_qty) as 'Female'
146 | , (f_fb_cnt, f_fb_qty) as 'Female'
147 | )
148 | )
149 | order by day, gender;
150 |
151 | -- Listing 7-9. Using unpivot with one aggregated dimension and two measures
152 |
153 | select day
154 | , gender
155 | , sum(cnt) as cnt
156 | , sum(qty) as qty
157 | from web_demographics
158 | unpivot (
159 | ( cnt, qty )
160 | for gender
161 | in (
162 | (m_tw_cnt, m_tw_qty) as 'Male'
163 | , (m_fb_cnt, m_fb_qty) as 'Male'
164 | , (f_tw_cnt, f_tw_qty) as 'Female'
165 | , (f_fb_cnt, f_fb_qty) as 'Female'
166 | )
167 | )
168 | group by day, gender
169 | order by day, gender;
170 |
171 | -- Listing 7-10. Using unpivot with two dimensions and one measure
172 |
173 | select day, gender, channel, cnt
174 | from web_demographics
175 | unpivot (
176 | cnt
177 | for ( gender, channel )
178 | in (
179 | m_tw_cnt as ('Male' , 'Twitter' )
180 | , m_fb_cnt as ('Male' , 'Facebook')
181 | , f_tw_cnt as ('Female', 'Twitter' )
182 | , f_fb_cnt as ('Female', 'Facebook')
183 | )
184 | )
185 | order by day, gender, channel;
186 |
187 | -- Listing 7-11. Dimension table for gender
188 |
189 | select letter, name
190 | from gender_dim
191 | order by letter;
192 |
193 | -- Listing 7-12. Dimension table for channels
194 |
195 | select id, name, shortcut
196 | from channels_dim
197 | order by id;
198 |
199 | -- Listing 7-13. Manual unpivot using dimension tables
200 |
201 | select
202 | d.day
203 | , g.letter as g_id
204 | , c.id as ch_id
205 | , case g.letter
206 | when 'M' then
207 | case c.shortcut
208 | when 'tw' then d.m_tw_cnt
209 | when 'fb' then d.m_fb_cnt
210 | end
211 | when 'F' then
212 | case c.shortcut
213 | when 'tw' then d.f_tw_cnt
214 | when 'fb' then d.f_fb_cnt
215 | end
216 | end as cnt
217 | , case g.letter
218 | when 'M' then
219 | case c.shortcut
220 | when 'tw' then d.m_tw_qty
221 | when 'fb' then d.m_fb_qty
222 | end
223 | when 'F' then
224 | case c.shortcut
225 | when 'tw' then d.f_tw_qty
226 | when 'fb' then d.f_fb_qty
227 | end
228 | end as qty
229 | from web_demographics d
230 | cross join gender_dim g
231 | cross join channels_dim c
232 | order by day, g_id, ch_id;
233 |
234 | -- Listing 7-14. Preparing column names mapped to dimension values
235 |
236 | select
237 | s.cnt_col, s.qty_col
238 | , s.g_id, s.gender
239 | , s.ch_id, s.channel
240 | from (
241 | select
242 | lower(
243 | g.letter || '_' || c.shortcut || '_cnt'
244 | ) as cnt_col
245 | , lower(
246 | g.letter || '_' || c.shortcut || '_qty'
247 | )as qty_col
248 | , g.letter as g_id
249 | , g.name as gender
250 | , c.id as ch_id
251 | , c.name as channel
252 | from gender_dim g
253 | cross join channels_dim c
254 | ) s
255 | join user_tab_columns cnt_c
256 | on cnt_c.column_name = upper(s.cnt_col)
257 | join user_tab_columns qty_c
258 | on qty_c.column_name = upper(s.cnt_col)
259 | where cnt_c.table_name = 'WEB_DEMOGRAPHICS'
260 | and qty_c.table_name = 'WEB_DEMOGRAPHICS'
261 | order by gender, channel;
262 |
263 | -- Listing 7-15. Dynamically building unpivot query
264 |
265 | set serveroutput on
266 |
267 | variable unpivoted refcursor
268 |
269 | declare
270 | v_unpivot_sql varchar2(4000);
271 | begin
272 | for c in (
273 | select
274 | s.cnt_col, s.qty_col
275 | , s.g_id, s.gender
276 | , s.ch_id, s.channel
277 | from (
278 | select
279 | lower(
280 | g.letter || '_' || c.shortcut || '_cnt'
281 | ) as cnt_col
282 | , lower(
283 | g.letter || '_' || c.shortcut || '_qty'
284 | )as qty_col
285 | , g.letter as g_id
286 | , g.name as gender
287 | , c.id as ch_id
288 | , c.name as channel
289 | from gender_dim g
290 | cross join channels_dim c
291 | ) s
292 | join user_tab_columns cnt_c
293 | on cnt_c.column_name = upper(s.cnt_col)
294 | join user_tab_columns qty_c
295 | on qty_c.column_name = upper(s.cnt_col)
296 | where cnt_c.table_name = 'WEB_DEMOGRAPHICS'
297 | and qty_c.table_name = 'WEB_DEMOGRAPHICS'
298 | order by gender, channel
299 | ) loop
300 |
301 | if v_unpivot_sql is null then
302 | v_unpivot_sql := q'[
303 | select day, g_id, ch_id, cnt, qty
304 | from web_demographics
305 | unpivot (
306 | ( cnt, qty )
307 | for ( g_id, ch_id )
308 | in (
309 | ]';
310 | else
311 | v_unpivot_sql := v_unpivot_sql || q'[
312 | , ]';
313 | end if;
314 |
315 | v_unpivot_sql := v_unpivot_sql
316 | || '(' || c.cnt_col
317 | || ', ' || c.qty_col
318 | || ') as (''' || c.g_id
319 | || ''', ' || c.ch_id
320 | || ')';
321 |
322 | end loop;
323 |
324 | v_unpivot_sql := v_unpivot_sql || q'[
325 | )
326 | )
327 | order by day, g_id, ch_id]';
328 |
329 | dbms_output.put_line(v_unpivot_sql);
330 |
331 | open :unpivoted for v_unpivot_sql;
332 | end;
333 | /
334 |
335 | print unpivoted
336 |
337 | /* ***************************************************** */
338 |
--------------------------------------------------------------------------------
/Chapter 8/ch08_pivoting_rows_to_columns.sql:
--------------------------------------------------------------------------------
1 | /* ***************************************************** **
2 | ch08_pivoting_rows_to_columns.sql
3 |
4 | Companion script for Practical Oracle SQL, Apress 2020
5 | by Kim Berg Hansen, https://www.kibeha.dk
6 | Use at your own risk
7 | *****************************************************
8 |
9 | Chapter 8
10 | Pivoting Rows to Columns
11 |
12 | To be executed in schema PRACTICAL
13 | ** ***************************************************** */
14 |
15 | /* -----------------------------------------------------
16 | sqlcl formatting setup
17 | ----------------------------------------------------- */
18 |
19 | set pagesize 80
20 | set linesize 80
21 | set sqlformat ansiconsole
22 | alter session set nls_date_format = 'YYYY-MM-DD';
23 |
24 | /* -----------------------------------------------------
25 | Chapter 8 example code
26 | ----------------------------------------------------- */
27 |
28 | -- Listing 8-2. Yearly purchased quantities by brewery and product group
29 |
30 | select
31 | brewery_name
32 | , group_name
33 | , extract(year from purchased) as yr
34 | , sum(qty) as qty
35 | from purchases_with_dims pwd
36 | group by
37 | brewery_name
38 | , group_name
39 | , extract(year from purchased)
40 | order by
41 | brewery_name
42 | , group_name
43 | , yr;
44 |
45 | -- Listing 8-3. Pivoting the year rows into columns
46 |
47 | select *
48 | from (
49 | select
50 | brewery_name
51 | , group_name
52 | , extract(year from purchased) as yr
53 | , sum(qty) as qty
54 | from purchases_with_dims pwd
55 | group by
56 | brewery_name
57 | , group_name
58 | , extract(year from purchased)
59 | ) pivot (
60 | sum(qty)
61 | for yr
62 | in (
63 | 2016 as y2016
64 | , 2017 as y2017
65 | , 2018 as y2018
66 | )
67 | )
68 | order by brewery_name, group_name;
69 |
70 | -- Listing 8-4. Utilizing the implicit group by
71 |
72 | select *
73 | from (
74 | select
75 | brewery_name
76 | , group_name
77 | , extract(year from purchased) as yr
78 | , qty
79 | from purchases_with_dims pwd
80 | ) pivot (
81 | sum(qty)
82 | for yr
83 | in (
84 | 2016 as y2016
85 | , 2017 as y2017
86 | , 2018 as y2018
87 | )
88 | )
89 | order by brewery_name, group_name;
90 |
91 | -- Listing 8-5. Manual pivoting without using pivot clause
92 |
93 | select
94 | brewery_name
95 | , group_name
96 | , sum(
97 | case extract(year from purchased)
98 | when 2016 then qty
99 | end
100 | ) as y2016
101 | , sum(
102 | case extract(year from purchased)
103 | when 2017 then qty
104 | end
105 | ) as y2017
106 | , sum(
107 | case extract(year from purchased)
108 | when 2018 then qty
109 | end
110 | ) as y2018
111 | from purchases_with_dims pwd
112 | group by
113 | brewery_name
114 | , group_name
115 | order by brewery_name, group_name;
116 |
117 | -- Listing 8-6. Getting an ORA-00918 error with multiple measures
118 |
119 | select *
120 | from (
121 | select
122 | brewery_name
123 | , group_name
124 | , extract(year from purchased) as yr
125 | , qty
126 | , cost
127 | from purchases_with_dims pwd
128 | ) pivot (
129 | sum(qty)
130 | , sum(cost)
131 | for yr
132 | in (
133 | 2016 as y2016
134 | , 2017 as y2017
135 | , 2018 as y2018
136 | )
137 | )
138 | order by brewery_name, group_name;
139 |
140 | -- Fixing it with measure aliases
141 |
142 | select *
143 | from (
144 | select
145 | brewery_name
146 | , group_name
147 | , extract(year from purchased) as yr
148 | , qty
149 | , cost
150 | from purchases_with_dims pwd
151 | ) pivot (
152 | sum(qty) as q
153 | , sum(cost) as c
154 | for yr
155 | in (
156 | 2016 as "16"
157 | , 2017 as "17"
158 | , 2018 as "18"
159 | )
160 | )
161 | order by brewery_name, group_name;
162 |
163 | -- Listing 8-7. Combining two dimensions and two measures
164 |
165 | select *
166 | from (
167 | select
168 | brewery_name
169 | , group_name
170 | , extract(year from purchased) as yr
171 | , qty
172 | , cost
173 | from purchases_with_dims pwd
174 | where group_name in ('IPA', 'Wheat')
175 | and purchased >= date '2017-01-01'
176 | and purchased < date '2019-01-01'
177 | ) pivot (
178 | sum(qty) as q
179 | , sum(cost) as c
180 | for (group_name, yr)
181 | in (
182 | ('IPA' , 2017) as i17
183 | , ('IPA' , 2018) as i18
184 | , ('Wheat', 2017) as w17
185 | , ('Wheat', 2018) as w18
186 | )
187 | )
188 | order by brewery_name;
189 |
190 | -- Same output without where clause, but bad idea
191 |
192 | select *
193 | from (
194 | select
195 | brewery_name
196 | , group_name
197 | , extract(year from purchased) as yr
198 | , qty
199 | , cost
200 | from purchases_with_dims pwd
201 | ) pivot (
202 | sum(qty) as q
203 | , sum(cost) as c
204 | for (group_name, yr)
205 | in (
206 | ('IPA' , 2017) as i17
207 | , ('IPA' , 2018) as i18
208 | , ('Wheat', 2017) as w17
209 | , ('Wheat', 2018) as w18
210 | )
211 | )
212 | order by brewery_name;
213 |
214 | /* ***************************************************** */
215 |
--------------------------------------------------------------------------------
/Chapter 9/ch09_splitting_delimited_text.sql:
--------------------------------------------------------------------------------
1 | /* ***************************************************** **
2 | ch09_splitting_delimited_text.sql
3 |
4 | Companion script for Practical Oracle SQL, Apress 2020
5 | by Kim Berg Hansen, https://www.kibeha.dk
6 | Use at your own risk
7 | *****************************************************
8 |
9 | Chapter 9
10 | Splitting Delimited Text
11 |
12 | To be executed in schema PRACTICAL
13 | ** ***************************************************** */
14 |
15 | /* -----------------------------------------------------
16 | sqlcl formatting setup
17 | ----------------------------------------------------- */
18 |
19 | set pagesize 80
20 | set linesize 80
21 | set sqlformat ansiconsole
22 |
23 | alter session set nls_date_format = 'YYYY-MM-DD';
24 |
25 | /* -----------------------------------------------------
26 | Chapter 9 example code
27 | ----------------------------------------------------- */
28 |
29 | -- Listing 9-1. Comma-delimited content of customer_favorites table
30 |
31 | select customer_id, favorite_list
32 | from customer_favorites
33 | order by customer_id;
34 |
35 | -- Listing 9-3. Using pipelined table function to split string
36 |
37 | select
38 | cf.customer_id
39 | , fl.column_value as product_id
40 | from customer_favorites cf
41 | , table(
42 | favorite_list_to_coll_type(cf.favorite_list)
43 | ) fl
44 | order by cf.customer_id, fl.column_value;
45 |
46 | -- Including customers with no favorites
47 |
48 | select
49 | cf.customer_id
50 | , fl.column_value as product_id
51 | from customer_favorites cf
52 | , table(
53 | favorite_list_to_coll_type(cf.favorite_list)
54 | )(+) fl
55 | order by cf.customer_id, fl.column_value;
56 |
57 | -- Listing 9-4. Join the results of the splitting to products
58 |
59 | select
60 | cf.customer_id as c_id
61 | , c.name as cust_name
62 | , fl.column_value as p_id
63 | , p.name as prod_name
64 | from customer_favorites cf
65 | cross apply table(
66 | favorite_list_to_coll_type(cf.favorite_list)
67 | ) fl
68 | join customers c
69 | on c.id = cf.customer_id
70 | join products p
71 | on p.id = fl.column_value
72 | order by cf.customer_id, fl.column_value;
73 |
74 | -- Including customers with no favorites - ANSI style
75 |
76 | select
77 | cf.customer_id as c_id
78 | , c.name as cust_name
79 | , fl.column_value as p_id
80 | , p.name as prod_name
81 | from customer_favorites cf
82 | outer apply table(
83 | favorite_list_to_coll_type(cf.favorite_list)
84 | ) fl
85 | join customers c
86 | on c.id = cf.customer_id
87 | left outer join products p
88 | on p.id = fl.column_value
89 | order by cf.customer_id, fl.column_value;
90 |
91 | -- Listing 9-5. Splitting with apex_string.split
92 |
93 | select
94 | cf.customer_id as c_id
95 | , c.name as cust_name
96 | , to_number(fl.column_value) as p_id
97 | , p.name as prod_name
98 | from customer_favorites cf
99 | cross apply table(
100 | apex_string.split(cf.favorite_list, ',')
101 | ) fl
102 | join customers c
103 | on c.id = cf.customer_id
104 | join products p
105 | on p.id = to_number(fl.column_value)
106 | order by cf.customer_id, p_id;
107 |
108 | -- Including customers with no favorites - ANSI style
109 |
110 | select
111 | cf.customer_id as c_id
112 | , c.name as cust_name
113 | , to_number(fl.column_value) as p_id
114 | , p.name as prod_name
115 | from customer_favorites cf
116 | outer apply table(
117 | apex_string.split(cf.favorite_list, ',')
118 | ) fl
119 | join customers c
120 | on c.id = cf.customer_id
121 | left outer join products p
122 | on p.id = to_number(fl.column_value)
123 | order by cf.customer_id, p_id;
124 |
125 | -- Listing 9-6. Generating as many rows as delimiter count
126 |
127 | select
128 | favs.customer_id as c_id
129 | , c.name as cust_name
130 | , favs.product_id as p_id
131 | , p.name as prod_name
132 | from (
133 | select
134 | cf.customer_id
135 | , to_number(
136 | regexp_substr(cf.favorite_list, '[^,]+', 1, sub#)
137 | ) as product_id
138 | from customer_favorites cf
139 | cross join lateral(
140 | select level sub#
141 | from dual
142 | connect by level <= regexp_count(cf.favorite_list, ',') + 1
143 | ) fl
144 | ) favs
145 | join customers c
146 | on c.id = favs.customer_id
147 | join products p
148 | on p.id = favs.product_id
149 | order by favs.customer_id, favs.product_id;
150 |
151 | -- Handling if there could have been blanks in the string
152 |
153 | select
154 | favs.customer_id as c_id
155 | , c.name as cust_name
156 | , favs.product_id as p_id
157 | , p.name as prod_name
158 | from (
159 | select
160 | cf.customer_id
161 | , to_number(
162 | regexp_substr(
163 | cf.favorite_list
164 | , '(^|,)([^,]*)'
165 | , 1
166 | , sub#
167 | , null
168 | , 2
169 | )
170 | ) as product_id
171 | from customer_favorites cf
172 | cross join lateral(
173 | select level sub#
174 | from dual
175 | connect by level <= regexp_count(cf.favorite_list, ',') + 1
176 | ) fl
177 | ) favs
178 | join customers c
179 | on c.id = favs.customer_id
180 | join products p
181 | on p.id = favs.product_id
182 | order by favs.customer_id, favs.product_id;
183 |
184 | -- Listing 9-7. Treating the string as a JSON array
185 |
186 | select
187 | cf.customer_id as c_id
188 | , c.name as cust_name
189 | , fl.product_id as p_id
190 | , p.name as prod_name
191 | from customer_favorites cf
192 | outer apply json_table(
193 | '[' || cf.favorite_list || ']'
194 | , '$[*]'
195 | columns (
196 | product_id number path '$'
197 | )
198 | ) fl
199 | join customers c
200 | on c.id = cf.customer_id
201 | left outer join products p
202 | on p.id = fl.product_id
203 | order by cf.customer_id, fl.product_id;
204 |
205 | -- Listing 9-8. Comma- and colon-delimited content of customer_reviews table
206 |
207 | select customer_id, review_list
208 | from customer_reviews
209 | order by customer_id;
210 |
211 | -- Listing 9-10. Using the ODCI table function to parse the delimited data
212 |
213 | select cr.customer_id, rl.product_id, rl.score
214 | from customer_reviews cr
215 | outer apply table (
216 | delimited_col_row.parser(
217 | cr.review_list
218 | , 'PRODUCT_ID:NUMBER,SCORE:VARCHAR2(1)'
219 | , ':'
220 | , ','
221 | )
222 | ) rl
223 | order by cr.customer_id, rl.product_id;
224 |
225 | -- Listing 9-11. Joining with real column names instead of generic column_value
226 |
227 | select
228 | cr.customer_id as c_id
229 | , c.name as cust_name
230 | , rl.product_id as p_id
231 | , p.name as prod_name
232 | , rl.score
233 | from customer_reviews cr
234 | cross apply table (
235 | delimited_col_row.parser(
236 | cr.review_list
237 | , 'PRODUCT_ID:NUMBER,SCORE:VARCHAR2(1)'
238 | , ':'
239 | , ','
240 | )
241 | ) rl
242 | join customers c
243 | on c.id = cr.customer_id
244 | join products p
245 | on p.id = rl.product_id
246 | order by cr.customer_id, rl.product_id;
247 |
248 | -- Listing 9-12. Getting rows with apex_string.split and columns with substr
249 |
250 | select
251 | cr.customer_id as c_id
252 | , c.name as cust_name
253 | , p.id as p_id
254 | , p.name as prod_name
255 | , substr(
256 | rl.column_value
257 | , instr(rl.column_value, ':') + 1
258 | ) as score
259 | from customer_reviews cr
260 | cross apply table(
261 | apex_string.split(cr.review_list, ',')
262 | ) rl
263 | join customers c
264 | on c.id = cr.customer_id
265 | join products p
266 | on p.id = to_number(
267 | substr(
268 | rl.column_value
269 | , 1
270 | , instr(rl.column_value, ':') - 1
271 | ))
272 | order by cr.customer_id, p_id;
273 |
274 | -- Listing 9-13. Generating as many rows as delimiter count
275 |
276 | select
277 | revs.customer_id as c_id
278 | , c.name as cust_name
279 | , revs.product_id as p_id
280 | , p.name as prod_name
281 | , revs.score
282 | from (
283 | select
284 | cr.customer_id
285 | , to_number(
286 | regexp_substr(
287 | cr.review_list
288 | , '(^|,)([^:,]*)'
289 | , 1
290 | , sub#
291 | , null
292 | , 2
293 | )
294 | ) as product_id
295 | , regexp_substr(
296 | cr.review_list
297 | , '([^:,]*)(,|$)'
298 | , 1
299 | , sub#
300 | , null
301 | , 1
302 | ) as score
303 | from customer_reviews cr
304 | cross join lateral(
305 | select level sub#
306 | from dual
307 | connect by level <= regexp_count(cr.review_list, ',') + 1
308 | ) rl
309 | ) revs
310 | join customers c
311 | on c.id = revs.customer_id
312 | join products p
313 | on p.id = revs.product_id
314 | order by revs.customer_id, revs.product_id;
315 |
316 | -- Listing 9-14. Turning delimited text into JSON
317 |
318 | select
319 | customer_id
320 | , '[["'
321 | || replace(
322 | replace(
323 | review_list
324 | , ','
325 | , '"],["'
326 | )
327 | , ':'
328 | , '","'
329 | )
330 | || '"]]'
331 | as json_list
332 | from customer_reviews
333 | order by customer_id;
334 |
335 | -- Listing 9-15. Parsing JSON with json_table
336 |
337 | select
338 | cr.customer_id as c_id
339 | , c.name as cust_name
340 | , rl.product_id as p_id
341 | , p.name as prod_name
342 | , rl.score
343 | from customer_reviews cr
344 | cross apply json_table (
345 | '[["'
346 | || replace(
347 | replace(
348 | cr.review_list
349 | , ','
350 | , '"],["'
351 | )
352 | , ':'
353 | , '","'
354 | )
355 | || '"]]'
356 | , '$[*]'
357 | columns (
358 | product_id number path '$[0]'
359 | , score varchar2(1) path '$[1]'
360 | )
361 | ) rl
362 | join customers c
363 | on c.id = cr.customer_id
364 | join products p
365 | on p.id = rl.product_id
366 | order by cr.customer_id, rl.product_id;
367 |
368 | -- Bonus: Using JSON objects instead of arrays
369 |
370 | select
371 | cr.customer_id as c_id
372 | , c.name as cust_name
373 | , rl.product_id as p_id
374 | , p.name as prod_name
375 | , rl.score
376 | from customer_reviews cr
377 | cross apply json_table (
378 | nvl2(cr.review_list, '[{"p":', null)
379 | || replace(
380 | replace(
381 | replace(cr.review_list, ',', '|')
382 | , ':'
383 | , ',"r":"'
384 | )
385 | , '|'
386 | , '"},{"p":'
387 | )
388 | || nvl2(cr.review_list, '"}]', null)
389 | , '$[*]'
390 | columns (
391 | product_id number path '$.p'
392 | , score varchar2(1) path '$.r'
393 | )
394 | ) rl
395 | join customers c
396 | on c.id = cr.customer_id
397 | join products p
398 | on p.id = rl.product_id
399 | order by cr.customer_id, rl.product_id;
400 |
401 | -- Bonus: Turning delimited text into XML
402 |
403 | select
404 | customer_id
405 | , '' || nvl2(review_list, '', null) || ''
416 | as xml_list
417 | from customer_reviews
418 | order by customer_id;
419 |
420 | -- Bonus: Parsing XML with xmltable
421 |
422 | select
423 | cr.customer_id as c_id
424 | , c.name as cust_name
425 | , rl.product_id as p_id
426 | , p.name as prod_name
427 | , rl.score
428 | from customer_reviews cr
429 | outer apply xmltable (
430 | '/c/o'
431 | passing xmltype(
432 | '' || nvl2(cr.review_list, '', null) || ''
443 | )
444 | columns
445 | product_id number path '@p'
446 | , score varchar2(1) path '@r'
447 | ) rl
448 | join customers c
449 | on c.id = cr.customer_id
450 | left outer join products p
451 | on p.id = rl.product_id
452 | order by cr.customer_id, rl.product_id;
453 |
454 | /* ***************************************************** */
455 |
--------------------------------------------------------------------------------
/Contributing.md:
--------------------------------------------------------------------------------
1 | # Contributing to Apress Source Code
2 |
3 | Copyright for Apress source code belongs to the author(s). However, under fair use you are encouraged to fork and contribute minor corrections and updates for the benefit of the author(s) and other readers.
4 |
5 | ## How to Contribute
6 |
7 | 1. Make sure you have a GitHub account.
8 | 2. Fork the repository for the relevant book.
9 | 3. Create a new branch on which to make your change, e.g.
10 | `git checkout -b my_code_contribution`
11 | 4. Commit your change. Include a commit message describing the correction. Please note that if your commit message is not clear, the correction will not be accepted.
12 | 5. Submit a pull request.
13 |
14 | Thank you for your contribution!
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | Freeware License, some rights reserved
2 |
3 | Copyright (c) 2020 Kim Berg Hansen
4 |
5 | Permission is hereby granted, free of charge, to anyone obtaining a copy
6 | of this software and associated documentation files (the "Software"),
7 | to work with the Software within the limits of freeware distribution and fair use.
8 | This includes the rights to use, copy, and modify the Software for personal use.
9 | Users are also allowed and encouraged to submit corrections and modifications
10 | to the Software for the benefit of other users.
11 |
12 | It is not allowed to reuse, modify, or redistribute the Software for
13 | commercial use in any way, or for a user’s educational materials such as books
14 | or blog articles without prior permission from the copyright holder.
15 |
16 | The above copyright notice and this permission notice need to be included
17 | in all copies or substantial portions of the software.
18 |
19 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
22 | AUTHORS OR COPYRIGHT HOLDERS OR APRESS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
24 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 | SOFTWARE.
26 |
27 |
28 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Apress Source Code
2 |
3 | This repository accompanies [*Practical Oracle SQL*](https://www.apress.com/9781484256169) by Kim Berg Hansen (Apress, 2020).
4 |
5 | [comment]: #cover
6 | 
7 |
8 | Download the files as a zip using the green button, or clone the repository to your machine using Git.
9 |
10 | ## Releases
11 |
12 | Release v1.0 corresponds to the code in the published book, without corrections or updates.
13 |
14 | ## Contributions
15 |
16 | See the file Contributing.md for more information on how you can contribute to this repository.
--------------------------------------------------------------------------------
/practical_clean_schema.sql:
--------------------------------------------------------------------------------
1 | /* ***************************************************** **
2 | practical_clean_schema.sql
3 |
4 | Companion script for Practical Oracle SQL, Apress 2020
5 | by Kim Berg Hansen, https://www.kibeha.dk
6 | Use at your own risk
7 | *****************************************************
8 |
9 | Drop objects in schema PRACTICAL
10 | Tables, data and other objects
11 |
12 | To be executed in schema PRACTICAL
13 | ** ***************************************************** */
14 |
15 | /* -----------------------------------------------------
16 | Drop views
17 | ----------------------------------------------------- */
18 |
19 | drop view customer_order_products_obj;
20 | drop view customer_order_products;
21 |
22 | drop package formulas;
23 |
24 | drop view purchases_with_dims;
25 |
26 | drop view brewery_products;
27 |
28 | drop view total_sales;
29 | drop view yearly_sales;
30 |
31 | drop view inventory_with_dims;
32 |
33 | drop view inventory_totals;
34 | drop view monthly_orders;
35 |
36 | drop view emp_hire_periods_with_name;
37 |
38 | drop view web_page_counter_hist;
39 |
40 | /* -----------------------------------------------------
41 | Drop packages, functions and procedures
42 | ----------------------------------------------------- */
43 |
44 | drop view product_alcohol_bac;
45 |
46 | drop function favorite_list_to_coll_type;
47 |
48 | drop function stragg;
49 | drop function name_coll_type_to_varchar2;
50 |
51 | /* -----------------------------------------------------
52 | Drop types and type bodies
53 | ----------------------------------------------------- */
54 |
55 | drop type id_name_coll_type force;
56 | drop type id_name_type force;
57 |
58 | drop type favorite_coll_type force;
59 | drop type delimited_col_row force;
60 |
61 | drop type stragg_type force;
62 | drop type stragg_expr_type force;
63 | drop type name_coll_type force;
64 |
65 | /* -----------------------------------------------------
66 | Drop tables
67 | ----------------------------------------------------- */
68 |
69 | drop table conway_gen_zero cascade constraints purge;
70 |
71 | drop table customer_favorites cascade constraints purge;
72 | drop table customer_reviews cascade constraints purge;
73 |
74 | drop table product_alcohol cascade constraints purge;
75 |
76 | drop table web_devices cascade constraints purge;
77 | drop table web_demographics cascade constraints purge;
78 | drop table channels_dim cascade constraints purge;
79 | drop table gender_dim cascade constraints purge;
80 |
81 | drop table picking_log cascade constraints purge;
82 | drop table picking_line cascade constraints purge;
83 | drop table picking_list cascade constraints purge;
84 |
85 | drop table orderlines cascade constraints purge;
86 | drop table orders cascade constraints purge;
87 | drop table inventory cascade constraints purge;
88 | drop table locations cascade constraints purge;
89 | drop table customers cascade constraints purge;
90 |
91 | drop table purchases cascade constraints purge;
92 | drop table breweries cascade constraints purge;
93 |
94 | drop table monthly_budget cascade constraints purge;
95 | drop table product_minimums cascade constraints purge;
96 |
97 | drop table monthly_sales cascade constraints purge;
98 | drop table products cascade constraints purge;
99 | drop table product_groups cascade constraints purge;
100 |
101 | drop table packaging_relations cascade constraints purge;
102 | drop table packaging cascade constraints purge;
103 |
104 | drop table ticker cascade constraints purge;
105 | drop table stock cascade constraints purge;
106 |
107 | drop table server_heartbeat cascade constraints purge;
108 | drop table web_page_visits cascade constraints purge;
109 |
110 | drop table web_counter_hist cascade constraints purge;
111 | drop table web_pages cascade constraints purge;
112 | drop table web_apps cascade constraints purge;
113 |
114 | drop table emp_hire_periods cascade constraints purge;
115 |
116 | drop table employees cascade constraints purge;
117 |
118 | -- Everything is now dropped
119 |
120 | /* ***************************************************** */
121 |
--------------------------------------------------------------------------------
/practical_create_schema.sql:
--------------------------------------------------------------------------------
1 | /* ***************************************************** **
2 | practical_create_schema.sql
3 |
4 | Companion script for Practical Oracle SQL, Apress 2020
5 | by Kim Berg Hansen, https://www.kibeha.dk
6 | Use at your own risk
7 | *****************************************************
8 |
9 | Creation of the user/schema PRACTICAL
10 | Granting the necessary privileges to this schema
11 |
12 | To be executed as a DBA user
13 | ** ***************************************************** */
14 |
15 | create user practical
16 | identified by practical
17 | default tablespace users
18 | temporary tablespace temp;
19 |
20 | alter user practical quota unlimited on users;
21 |
22 | grant create session to practical;
23 | grant create table to practical;
24 | grant create view to practical;
25 | grant create type to practical;
26 | grant create procedure to practical;
27 |
28 | /* ***************************************************** */
29 |
--------------------------------------------------------------------------------
/practical_drop_schema.sql:
--------------------------------------------------------------------------------
1 | /* ***************************************************** **
2 | practical_drop_schema.sql
3 |
4 | Companion script for Practical Oracle SQL, Apress 2020
5 | by Kim Berg Hansen, https://www.kibeha.dk
6 | Use at your own risk
7 | *****************************************************
8 |
9 | Drop the user/schema PRACTICAL
10 | To be executed as a DBA user
11 | ** ***************************************************** */
12 |
13 | drop user practical cascade;
14 |
15 | /* ***************************************************** */
16 |
--------------------------------------------------------------------------------
/practical_readme.txt:
--------------------------------------------------------------------------------
1 | /* ***************************************************** **
2 | practical_readme.txt
3 |
4 | README for scripts for Practical Oracle SQL, Apress 2020
5 | by Kim Berg Hansen, https://www.kibeha.dk
6 | Use the scripts at your own risk
7 |
8 | ** ***************************************************** */
9 |
10 | These scripts are companions to the book Practical Oracle SQL.
11 | Detailed explanations of the scripts are found in the book.
12 |
13 | Everything is placed in a schema called PRACTICAL
14 | (somewhat like Oracle sample schemas SCOTT and HR.)
15 |
16 | There are four scripts for managing this schema:
17 |
18 | - practical_create_schema.sql
19 |
20 | To be run as a DBA user.
21 | This creates the schema/user PRACTICAL
22 | and grants the necessary privileges to it.
23 |
24 | - practical_fill_schema.sql
25 |
26 | To be run as user PRACTICAL.
27 | This creates all tables, objects and sample data.
28 |
29 | - practical_clean_schema.sql
30 |
31 | To be run as user PRACTICAL.
32 | This drops all tables, objects and sample data.
33 |
34 | - practical_drop_schema.sql
35 |
36 | To be run as a DBA user.
37 | This drops the schema PRACTICAL with all content.
38 |
39 |
40 | And then each chapter has a script ch_{chaptername}.sql.
41 |
42 | To get going, run practical_create_schema.sql
43 | followed by practical_fill_schema.sql.
44 |
45 | Then you can try out all the chapter scripts
46 | as you read the chapters in the book.
47 |
48 | If you get the data mangled in the tables along the way,
49 | you can a clean sheet by practical_clean_schema.sql
50 | followed by practical_fill_schema.sql.
51 |
52 | When you don't want the schema anymore,
53 | run practical_drop_schema.sql.
54 |
55 |
56 | Enjoy the book and play with the code.
57 | Above all, have fun.
58 |
59 | /Kim Berg Hansen
60 |
61 | /* ***************************************************** */
62 |
--------------------------------------------------------------------------------