├── CONTRIBUTING.md
├── LICENCE.md
├── README.md
├── att_clu_zone_map
├── 01_setup12c.sql
├── 02_table_create.sql
├── 03_dim_fill.sql
├── 04_source_fill.sql
├── 05_create_fact.sql
├── 06_ac_only.sql
├── 07_zm_lin_inter.sql
├── 08_zm_prune.sql
├── 09_part_zm.sql
├── 10_zm_maint.sql
├── README.md
└── spooled_output
│ ├── 05_create_fact.lst
│ ├── 06_ac_only.lst
│ ├── 07_zm_lin_inter.lst
│ ├── 08_zm_prune.lst
│ ├── 09_part_zm.lst
│ └── 10_zm_maint.lst
├── bulk_load
├── 01_dirs.sql
├── 02_table.sql
├── 03_etable.sql
├── 04_etablez.sql
├── 05_makedat.sh
├── 06_ins.lst
├── 06_ins.sql
├── 07_insz.lst
├── 07_insz.sql
└── README.md
├── compare_ofe
├── README.md
└── ofe.sql
├── db_load
├── InsertExamples.java
├── README.md
├── example_output.txt
├── insert_example.py
└── table.sql
├── direct_path
├── 11g
│ ├── README.md
│ ├── equi.lst
│ ├── equi.sql
│ ├── merge.lst
│ ├── merge.sql
│ ├── orcl_ora_21482_TSM.trc
│ ├── orcl_ora_21537_EQUI.trc
│ ├── show_type.sh
│ ├── tsm.lst
│ ├── tsm.sql
│ ├── tsm_v_tsmhwmb.lst
│ └── tsm_v_tsmhwmb.sql
├── 12c
│ ├── README.md
│ ├── equi.lst
│ ├── equi.sql
│ ├── hwm.lst
│ ├── hwm.sql
│ ├── hwmb.lst
│ ├── hwmb.sql
│ ├── merge.lst
│ ├── merge.sql
│ ├── tsm_v_tsmhwmb.lst
│ └── tsm_v_tsmhwmb.sql
└── README.md
├── new_synopses
├── 01_mixed.lst
├── 01_mixed.sql
├── 02_default.lst
├── 02_default.sql
├── 03_hll.lst
├── 03_hll.sql
├── 04_defaultmx.lst
├── 04_defaultmx.sql
├── 05_defaultmx.lst
├── 05_defaultmx.sql
├── 06_defaultmx.lst
├── 06_defaultmx.sql
├── 07_defaultmx.lst
├── 07_defaultmx.sql
├── 08_nomix.lst
├── 08_nomix.sql
├── README.md
├── check.sql
├── t1check.sql
├── test1.lst
├── test1.sql
├── test2.lst
├── test2.sql
├── test3.lst
├── test3.sql
├── test4.lst
└── test4.sql
├── partition_exchange_load
├── README.md
├── check_hist.sql
├── example.lst
├── example.sql
└── list_s.sql
├── px_wm_wp
├── README.md
├── dbrm_example.sql
├── logon.sql
└── px_dbrm_queries.sql
├── spm
├── README.md
└── inmem
│ ├── README.md
│ ├── part1
│ ├── all.lst
│ ├── all.sql
│ ├── auto.sql
│ ├── autoo.sql
│ ├── base.sql
│ ├── drop_base.sql
│ ├── in.sql
│ ├── noin.sql
│ ├── plan.sql
│ ├── query.sql
│ ├── seg.sql
│ └── tab.sql
│ ├── part2
│ ├── base.sql
│ ├── drop_base.sql
│ ├── example.lst
│ ├── flush.sql
│ ├── in.sql
│ ├── load.sql
│ ├── noin.sql
│ ├── plan.sql
│ ├── seg.sql
│ ├── show.sql
│ ├── sql.sql
│ ├── t1.sql
│ └── tab.sql
│ └── part3
│ ├── auto.sql
│ ├── autoo.sql
│ ├── base.sql
│ ├── drop_base.sql
│ ├── evo2.sql
│ ├── flush.sql
│ ├── in.sql
│ ├── noin.sql
│ ├── plan.sql
│ ├── q3.sql
│ ├── results.lst
│ ├── seg.sql
│ ├── show.sql
│ └── tab.sql
├── spm_in_practice
├── README.md
├── bplan.sql
├── con
├── example.lst
├── example.sql
├── load.sql
├── q1.sql
├── table.sql
├── user.sql
└── util
│ ├── dropu.sql
│ ├── evo.sql
│ ├── evou.sql
│ ├── list.sql
│ ├── listu.sql
│ ├── noact.sql
│ ├── nomatch.sql
│ ├── nomatchu.sql
│ ├── plan.sql
│ ├── spmhint.sql
│ └── top.sql
├── spm_on_11g
├── README.md
├── base.sql
├── con
├── drop.sql
├── evo.sql
├── evolve.lst
├── flush.sql
├── introduction.lst
├── loadc.sql
├── loads.sql
├── make_tab.sql
├── makeset.sql
├── p.sql
├── plan.sql
├── q1.sql
├── q2.sql
├── q3.sql
├── show.sql
├── sig.sql
├── sql.sql
├── sts.lst
├── tab.sql
└── user.sql
├── tpcds_etabs
├── README.md
├── tpcds_etabs.sql
└── tpcds_makex.sql
└── upgrading_to_12c
├── README.md
├── dpump_copy
├── README.md
├── after_dpcopy.lst
├── after_gather_s2.lst
├── before_dpcopy.lst
├── dp_copy
├── dp_copy.log
├── gather_s2.sql
├── look.sql
├── users.lst
└── users.sql
├── duplicate
├── README.md
├── copy_ext.sql
├── copy_hist.sql
└── copy_hist_a.sql
└── show_stats
├── README.md
├── show_ext.sql
├── show_hist.sql
├── show_hist_11g.sql
├── show_spd.sql
└── show_usage.sql
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to DW-VLDB
2 |
3 | *Copyright (c) 2014,2016 Oracle and/or its affiliates
4 | The Universal Permissive License (UPL), Version 1.0*
5 |
6 | **Pull requests are currently not being accepted for the Oracle DW_VLDB project.**
7 |
8 | We plan to provide this functionality in the future. At that time, you will need to follow [The Oracle Contributor Agreement](https://www.oracle.com/technetwork/community/oca-486395.html)
9 | (OCA).
10 |
11 | If you have ideas, comments, or issues related to Oracle DW-VLDB, please post an issue to the repository or a comment to https://blogs.oracle.com/optimizer.
12 |
--------------------------------------------------------------------------------
/LICENCE.md:
--------------------------------------------------------------------------------
1 | # Oracle dw-vldb
2 |
3 | You may not use the identified files except in compliance with the
4 | Universal Permissive License (UPL), Version 1.0 (the "License.")
5 |
6 | You may obtain a copy of the License at
7 | https://opensource.org/licenses/UPL. A copy of the license is
8 | also reproduced below.
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
13 | implied.
14 |
15 | See the License for the specific language governing permissions and
16 | limitations under the License.
17 |
18 |
19 | ```
20 | Copyright (c) 2014, 2016 Oracle and/or its affiliates
21 | The Universal Permissive License (UPL), Version 1.0
22 |
23 | Subject to the condition set forth below, permission is hereby granted to any person obtaining
24 | a copy of this software, associated documentation and/or data (collectively the "Software"),
25 | free of charge and under any and all copyright rights in the Software, and any and all patent
26 | rights owned or freely licensable by each licensor hereunder covering either (i) the unmodified
27 | Software as contributed to or provided by such licensor, or (ii) the Larger Works (as defined below),
28 | to deal in both
29 |
30 | (a) the Software, and (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if
31 | one is included with the Software (each a Larger Work to which the Software is contributed by such licensors),
32 | without restriction, including without limitation the rights to copy, create derivative works of,
33 | display, perform, and distribute the Software and make, use, sell, offer for sale, import, export,
34 | have made, and have sold the Software and the Larger Work(s), and to sublicense the foregoing rights
35 | on either these or other terms.
36 |
37 | This license is subject to the following condition:
38 |
39 | The above copyright notice and either this complete permission notice or at a minimum a reference
40 | to the UPL must be included in all copies or substantial portions of the Software.
41 |
42 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
43 | LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
44 | IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
45 | WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH
46 | THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
47 | ```
48 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 | This is a top level repository for code examples related to Data Warehousing and Very Large Databases.
3 |
4 | All the examples here have been copied to the new "oracle-db-examples" repository (the optimizer subdirectory). I won't be adding anything new to this repository so please visit the new one.
5 |
--------------------------------------------------------------------------------
/att_clu_zone_map/01_setup12c.sql:
--------------------------------------------------------------------------------
1 | -- DISCLAIMER:
2 | -- This script is provided for educational purposes only. It is
3 | -- NOT supported by Oracle World Wide Technical Support.
4 | -- The script has been tested and appears to work as intended.
5 | -- You should always run new scripts initially
6 | -- on a test instance.
7 | set echo off
8 | connect / as sysdba
9 |
10 | REM To allow for re-execution of this script,
11 | REM the user is first dropped, then created.
12 | drop user aczm12c cascade;
13 |
14 | set echo on
15 | create user aczm12c identified by oracle_4U;
16 | alter user aczm12c default tablespace users;
17 | grant connect, dba to aczm12c;
18 |
--------------------------------------------------------------------------------
/att_clu_zone_map/02_table_create.sql:
--------------------------------------------------------------------------------
1 | -- DISCLAIMER:
2 | -- This script is provided for educational purposes only. It is
3 | -- NOT supported by Oracle World Wide Technical Support.
4 | -- The script has been tested and appears to work as intended.
5 | -- You should always run new scripts initially
6 | -- on a test instance.
7 |
8 | set timing off
9 | set echo off
10 | set lines 400 pages 1000
11 | set feedback 1
12 | set pause off
13 | set echo on
14 |
15 |
16 | PROMPT Connect to the Attribute Clusters/Zone Map Schema
17 | connect aczm12c/oracle_4U
18 |
19 | PROMPT Drop SALES_SOURCE table
20 | drop table sales_source
21 | /
22 | --
23 | PROMPT Create the SALES_SOURCE table
24 | PROMPT This will provide us with a consistent dataset
25 | PROMPT for any fact tables we choose to create later on
26 | --
27 | CREATE TABLE sales_source
28 | (
29 | order_id NUMBER(20) NOT NULL ,
30 | order_item_number NUMBER(3) NOT NULL ,
31 | sale_date DATE NOT NULL ,
32 | delivered DATE ,
33 | sale_agent VARCHAR2(100) NOT NULL ,
34 | product_id NUMBER(10) NOT NULL ,
35 | amount NUMBER(10,2) NOT NULL ,
36 | quantity NUMBER(5) NOT NULL ,
37 | location_id NUMBER(20) NOT NULL ,
38 | warehouse VARCHAR2(100) NOT NULL
39 | )
40 | /
41 |
42 | PROMPT Drop the LOCATIONS table
43 | DROP TABLE locations
44 | /
45 |
46 | PROMPT Create the LOCATIONS table
47 | CREATE TABLE locations
48 | (
49 | location_id NUMBER(20) ,
50 | state VARCHAR2(100) NOT NULL ,
51 | county VARCHAR2(100) NOT NULL ,
52 | description VARCHAR2(1000) NOT NULL ,
53 | PRIMARY KEY (location_id)
54 | )
55 | /
56 |
57 | PROMPT Drop the PRODUCTS table
58 | DROP TABLE products
59 | /
60 |
61 | PROMPT Create the PRODUCTS table
62 | CREATE TABLE products
63 | (
64 | product_id NUMBER(20) ,
65 | product_name VARCHAR2(20) ,
66 | product_description VARCHAR2(100) ,
67 | PRIMARY KEY(product_id)
68 | )
69 | /
70 |
71 |
72 |
73 |
74 |
--------------------------------------------------------------------------------
/att_clu_zone_map/04_source_fill.sql:
--------------------------------------------------------------------------------
1 | -- DISCLAIMER:
2 | -- This script is provided for educational purposes only. It is
3 | -- NOT supported by Oracle World Wide Technical Support.
4 | -- The script has been tested and appears to work as intended.
5 | -- You should always run new scripts initially
6 | -- on a test instance.
7 |
8 | set timing off
9 | set echo off
10 | set lines 400 pages 1000
11 | set feedback 1
12 | set pause off
13 | set echo on
14 |
15 | PROMPT Connect to the Attribute Clusters/Zone Map Schema
16 | connect aczm12c/oracle_4U
17 |
18 | --
19 | PROMPT Drop and create a sequence
20 | --
21 |
22 | DROP sequence nseq
23 | /
24 | CREATE sequence nseq CACHE 10000
25 | /
26 |
27 | --
28 | PROMPT Utility procedure to fill SALES_SOURCE table
29 | --
30 |
31 | CREATE OR REPLACE
32 | PROCEDURE filltab(
33 | p_start_date DATE,
34 | p_number_of_rows NUMBER,
35 | p_number_of_days NUMBER)
36 | AS
37 | TYPE sa_tab_type
38 | IS
39 | TABLE OF sales_source.sale_agent%TYPE INDEX BY BINARY_INTEGER;
40 | sa_tab sa_tab_type;
41 | TYPE wh_type
42 | IS
43 | TABLE OF sales_source.warehouse%TYPE INDEX BY BINARY_INTEGER;
44 | wh_tab wh_type;
45 | sale_date DATE;
46 | num_order_items NUMBER(2);
47 | sa sales_source.sale_agent%TYPE;
48 | product sales_source.product_id%TYPE;
49 | location sales_source.location_id%TYPE;
50 | wh sales_source.warehouse%TYPE;
51 | order_id sales_source.order_id%TYPE;
52 | num_products NUMBER(5);
53 | num_locations NUMBER(5);
54 | max_order_items NUMBER(3) := 20;
55 | num_inserted NUMBER(10) := 0;
56 | loop_count NUMBER(10) := 0;
57 | counter NUMBER(10);
58 | deliv_days NUMBER(3);
59 | BEGIN
60 | sa_tab(1) := 'MARK';
61 | sa_tab(2) := 'CLARE';
62 | sa_tab(3) := 'ANDREW';
63 | sa_tab(4) := 'LUCY';
64 | sa_tab(5) := 'JENNY';
65 | sa_tab(6) := 'JOHN';
66 | sa_tab(7) := 'BRIAN';
67 | sa_tab(8) := 'JANE';
68 | sa_tab(9) := 'ED';
69 | sa_tab(10) := 'SIMON';
70 | sa_tab(11) := 'SALLY';
71 | wh_tab(1) := 'ALBUQUERQUE';
72 | wh_tab(2) := 'WINSTON SALEM';
73 | wh_tab(3) := 'NEWPORT';
74 | wh_tab(4) := 'BIRMINGHAM';
75 | wh_tab(5) := 'OCOEE';
76 | wh_tab(6) := 'PRINCETON';
77 | order_id := nseq.nextval;
78 | sale_date := p_start_date;
79 | SELECT COUNT(*) INTO num_products FROM products;
80 | SELECT COUNT(*) INTO num_locations FROM locations;
81 | LOOP
82 | num_order_items:= dbms_random.value(1,max_order_items+1);
83 | order_id := nseq.nextval;
84 | sale_date := p_start_date + dbms_random.value(0,floor(p_number_of_days+1));
85 | wh := wh_tab(floor(dbms_random.value(1,7)));
86 | sa := sa_tab(floor(dbms_random.value(1,12)));
87 | deliv_days := dbms_random.value(2,30);
88 | INSERT INTO sales_source
89 | SELECT order_id ,
90 | rownum ,
91 | sale_date ,
92 | sale_date + deliv_days ,
93 | sa ,
94 | dbms_random.value(1,floor(num_products)) ,
95 | dbms_random.value(1,2000) ,
96 | dbms_random.value(1,3) ,
97 | dbms_random.value(1,floor(num_locations)) ,
98 | wh
99 | FROM dual
100 | CONNECT BY rownum <= num_order_items;
101 | num_inserted := num_inserted + num_order_items;
102 | loop_count := loop_count + 1;
103 | IF mod(loop_count,1000) = 0 THEN
104 | COMMIT;
105 | END IF;
106 | EXIT WHEN num_inserted >= p_number_of_rows;
107 | END LOOP;
108 | COMMIT;
109 | END;
110 | /
111 | show errors
112 |
113 | --
114 | PROMPT Fill the SALES_SOURCE table with data for 2000 and 2009
115 | PROMPT This may take several minutes...
116 | --
117 |
118 | EXECUTE filltab(to_date('01-JAN-2000','DD-MON-YYYY'),1452090,364);
119 | EXECUTE filltab(to_date('01-JAN-2009','DD-MON-YYYY'),500000,364);
120 |
121 | --
122 | PROMPT Gather table statistics...
123 | --
124 | EXECUTE dbms_stats.gather_table_stats(ownname=>NULL,tabname=>'sales_source');
125 |
126 |
--------------------------------------------------------------------------------
/att_clu_zone_map/05_create_fact.sql:
--------------------------------------------------------------------------------
1 | -- DISCLAIMER:
2 | -- This script is provided for educational purposes only. It is
3 | -- NOT supported by Oracle World Wide Technical Support.
4 | -- The script has been tested and appears to work as intended.
5 | -- You should always run new scripts initially
6 | -- on a test instance.
7 |
8 | set timing off
9 | set echo off
10 | set lines 400 pages 1000
11 | set feedback 1
12 | set pause off
13 | set echo on
14 | set autotrace off
15 | column plan_table_output format a150
16 | set trims on
17 |
18 | PROMPT Connect to the Attribute Clusters/Zone Map Schema
19 | connect aczm12c/oracle_4U
20 |
21 | PURGE recyclebin
22 | /
23 |
24 | PROMPT Drop SALES table (if it exists)
25 | DROP TABLE sales
26 | /
27 |
28 | PROMPT Drop SALES_AC table (if it exists)
29 | DROP TABLE sales_ac
30 | /
31 |
32 | --
33 | PROMPT Create the SALES fact table
34 | PROMPT This table will not have attribute clustering
35 | PROMPT or zone maps. We will use it to compare with
36 | PROMPT an attribute clustered table.
37 | --
38 | CREATE TABLE sales
39 | AS
40 | SELECT * FROM sales_source
41 | WHERE 1 = -1
42 | /
43 |
44 | --
45 | PROMPT Create a SALES_AC fact table
46 | PROMPT The data will be the same as SALES
47 | PROMPT but it will be used to demontrate
48 | PROMPT attribute clustering and zone maps
49 | PROMPT in comparison to the standard SALES table.
50 | --
51 | CREATE TABLE sales_ac
52 | AS
53 | SELECT * FROM sales_source
54 | WHERE 1 = -1
55 | /
56 |
57 | --
58 | PROMPT Here we enable linear ordered attribute clustering
59 | PROMPT We will simply order rows by location_id, product_id
60 | PROMPT To see the effects of attribute clustering in
61 | PROMPT isolation, we will not create a zone map.
62 | --
63 | ALTER TABLE sales_ac
64 | ADD CLUSTERING BY LINEAR ORDER (location_id, product_id)
65 | WITHOUT MATERIALIZED ZONEMAP
66 | /
67 |
68 | set timing on
69 | --
70 | PROMPT Insert data into standard table
71 | --
72 | INSERT /*+ APPEND */ INTO sales SELECT * FROM sales_source
73 | /
74 | --
75 | PROMPT Observe that insert plan is a simple insert
76 | --
77 | SELECT * FROM TABLE(dbms_xplan.display_cursor)
78 | /
79 | COMMIT
80 | /
81 |
82 | --
83 | PROMPT Insert data into attribute clustered table.
84 | PROMPT We must use a direct path operation to make
85 | PROMPT use of attribute clustering.
86 | PROMPT In real systems we will probably insert in
87 | PROMPT multiple batches: each batch of inserts will be
88 | PROMPT ordered appropriately. Later on,
89 | PROMPT if we want to re-order all rows into
90 | PROMPT tightly grouped zones we can, for example, use
91 | PROMPT partitioning and MOVE PARTITION to do this.
92 | PROMPT
93 | PROMPT Increased elapsed time is likely due
94 | PROMPT to the sort that is transparently performed to cluster
95 | PROMPT the data as it is inserted into the SALES_AC table.
96 | --
97 | INSERT /*+ APPEND */ INTO sales_ac SELECT * FROM sales_source
98 | /
99 | --
100 | PROMPT Observe the addition of "SORT ORDER BY" in the execution plan
101 | --
102 | SELECT * FROM TABLE(dbms_xplan.display_cursor)
103 | /
104 | COMMIT
105 | /
106 |
107 | set timing off
108 |
109 | PROMPT Gather table statistics
110 | EXECUTE dbms_stats.gather_table_stats(ownname=>NULL,tabname=>'sales');
111 | EXECUTE dbms_stats.gather_table_stats(ownname=>NULL,tabname=>'sales_ac');
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
--------------------------------------------------------------------------------
/att_clu_zone_map/06_ac_only.sql:
--------------------------------------------------------------------------------
1 | -- DISCLAIMER:
2 | -- This script is provided for educational purposes only. It is
3 | -- NOT supported by Oracle World Wide Technical Support.
4 | -- The script has been tested and appears to work as intended.
5 | -- You should always run new scripts initially
6 | -- on a test instance.
7 |
8 | set timing off
9 | set echo off
10 | set lines 400 pages 1000
11 | set feedback 1
12 | set pause off
13 | set echo on
14 |
15 | PROMPT Connect to the Attribute Clusters/Zone Map Schema
16 | connect aczm12c/oracle_4U
17 |
18 | PROMPT The full potential of attribute clusters are realised
19 | PROMPT when used in conjunction with zone maps, Exadata storage indexes
20 | PROMPT and In-Memory min/max pruning. However, they also improve
21 | PROMPT index clustering. This is demonstrated here.
22 |
23 | PROMPT Create indexes on location id for the standard SALES
24 | PROMPT table and the attribute clustered SALES_AC table
25 |
26 | CREATE INDEX sales_loc_i ON sales (location_id)
27 | /
28 |
29 | CREATE INDEX sales_ac_loc_i ON sales_ac (location_id)
30 | /
31 |
32 | column index_name format a40
33 |
34 | PROMPT Observe the improved value of "Average Blocks Per Key"
35 | PROMPT for the attribute clustered table. This will
36 | PROMPT result in fewer consistend gets for table lookups from
37 | PROMPT index range scans.
38 |
39 | SELECT index_name, clustering_factor,avg_data_blocks_per_key
40 | FROM user_indexes
41 | WHERE index_name LIKE 'SALES%LOC%'
42 | ORDER BY index_name
43 | /
44 |
45 | PROMPT Confirm that index range scans are occuring in both query examples
46 | PROMPT Hints are used in this case because the table is relatively small
47 | PROMPT so Exadata may choose a bloom filter plan.
48 |
49 | SELECT /*+ INDEX(sales sales_loc_i) */ SUM(amount)
50 | FROM sales
51 | JOIN locations ON (sales.location_id = locations.location_id)
52 | WHERE locations.state = 'California'
53 | AND locations.county = 'Alpine County'
54 | /
55 | SELECT * FROM TABLE(dbms_xplan.display_cursor);
56 |
57 | SELECT /*+ INDEX(sales_ac sales_ac_loc_i) */ SUM(amount)
58 | FROM sales_ac
59 | JOIN locations ON (sales_ac.location_id = locations.location_id)
60 | WHERE locations.state = 'California'
61 | AND locations.county = 'Alpine County'
62 | /
63 | SELECT * FROM TABLE(dbms_xplan.display_cursor);
64 |
65 | PROMPT Run two test queries to cache all relevant data
66 |
67 | SELECT SUM(amount)
68 | FROM sales
69 | JOIN locations ON (sales.location_id = locations.location_id)
70 | WHERE locations.state = 'California'
71 | AND locations.county = 'Alpine County'
72 | /
73 |
74 | SELECT SUM(amount)
75 | FROM sales_ac
76 | JOIN locations ON (sales_ac.location_id = locations.location_id)
77 | WHERE locations.state = 'California'
78 | AND locations.county = 'Alpine County'
79 | /
80 |
81 | PROMPT Run queries again and observe
82 | PROMPT the reduced number of consistent
83 | PROMPT gets for the attribute cluster example.
84 |
85 | SET AUTOTRACE ON STATISTICS
86 |
87 | SELECT SUM(amount)
88 | FROM sales
89 | JOIN locations ON (sales.location_id = locations.location_id)
90 | WHERE locations.state = 'California'
91 | AND locations.county = 'Alpine County'
92 | /
93 |
94 | SELECT SUM(amount)
95 | FROM sales_ac
96 | JOIN locations ON (sales_ac.location_id = locations.location_id)
97 | WHERE locations.state = 'California'
98 | AND locations.county = 'Alpine County'
99 | /
100 |
101 | SET AUTOTRACE OFF
102 |
103 | PROMPT Drop the test indexes
104 |
105 | DROP INDEX sales_loc_i
106 | /
107 |
108 | DROP INDEX sales_ac_loc_i
109 | /
110 |
111 |
112 |
--------------------------------------------------------------------------------
/att_clu_zone_map/08_zm_prune.sql:
--------------------------------------------------------------------------------
1 | -- DISCLAIMER:
2 | -- This script is provided for educational purposes only. It is
3 | -- NOT supported by Oracle World Wide Technical Support.
4 | -- The script has been tested and appears to work as intended.
5 | -- You should always run new scripts initially
6 | -- on a test instance.
7 |
8 | set timing off
9 | set echo off
10 | set lines 400 pages 1000
11 | set feedback 1
12 | set pause off
13 | set echo on
14 | column plan_table_output format a150
15 | set trims on
16 |
17 | PROMPT Connect to the Attribute Clusters/Zone Map Schema
18 | connect aczm12c/oracle_4U
19 |
20 | PROMPT Clean up
21 |
22 | ALTER TABLE sales_ac DROP CLUSTERING
23 | /
24 | DROP MATERIALIZED ZONEMAP sales_ac_zmap
25 | /
26 | DROP MATERIALIZED ZONEMAP zmap$_sales_ac
27 | /
28 |
29 | PROMPT Cluster the table again
30 |
31 | ALTER TABLE sales_ac
32 | ADD CLUSTERING sales_ac
33 | JOIN locations ON (sales_ac.location_id = locations.location_id)
34 | JOIN products ON (sales_ac.product_id = products.product_id)
35 | BY INTERLEAVED ORDER ((locations.state, locations.county), products.product_name, sales_ac.location_id)
36 | WITHOUT MATERIALIZED ZONEMAP
37 | /
38 |
39 | PROMPT Since we have changed the clustering columns, we need to
40 | PROMPT re-organize the table. This can be achieved using a move operation.
41 |
42 | ALTER TABLE sales_ac MOVE
43 | /
44 |
45 | PROMPT
46 | PROMPT Manually create the zone map.
47 | PROMPT
48 |
49 | CREATE MATERIALIZED ZONEMAP sales_ac_zmap
50 | AS
51 | SELECT SYS_OP_ZONE_ID(s.rowid),
52 | MIN(l.state) min_state,
53 | MAX(l.state) max_state,
54 | MIN(l.county) min_county,
55 | MAX(l.county) max_county,
56 | MIN(p.product_name) min_prod,
57 | MAX(p.product_name) max_prod,
58 | MIN(s.location_id) min_loc,
59 | MAX(s.location_id) max_loc
60 | FROM sales_ac s,
61 | locations l,
62 | products p
63 | WHERE s.location_id = l.location_id(+)
64 | AND s.product_id = p.product_id(+)
65 | GROUP BY SYS_OP_ZONE_ID(s.rowid)
66 | /
67 |
68 | PROMPT Observe that we are achieving reduced IO for
69 | PROMPT compared agains the non-zone mapped table (sales)
70 |
71 | SET AUTOTRACE ON STATISTICS
72 |
73 | PROMPT Conventional
74 |
75 | SELECT SUM(amount)
76 | FROM sales
77 | JOIN locations ON (sales.location_id = locations.location_id)
78 | WHERE locations.state = 'California'
79 | AND locations.county = 'Alpine County'
80 | /
81 |
82 | PROMPT With zone map
83 |
84 | SELECT SUM(amount)
85 | FROM sales_ac
86 | JOIN locations ON (sales_ac.location_id = locations.location_id)
87 | WHERE locations.state = 'California'
88 | AND locations.county = 'Alpine County'
89 | /
90 |
91 | PROMPT Conventional
92 |
93 | SELECT SUM(amount)
94 | FROM sales
95 | WHERE location_id = 1000
96 | /
97 |
98 | PROMPT With zone map
99 |
100 | SELECT SUM(amount)
101 | FROM sales_ac
102 | WHERE location_id = 1000
103 | /
104 |
105 | SET AUTOTRACE OFF
106 |
107 | --
108 | PROMPT Scan and join pruning
109 | --
110 | SELECT SUM(amount)
111 | FROM sales_ac
112 | JOIN locations ON (sales_ac.location_id = locations.location_id)
113 | WHERE locations.state = 'California'
114 | AND locations.county = 'Alpine County'
115 | /
116 |
117 | SELECT * FROM TABLE(dbms_xplan.display_cursor);
118 |
119 | PROMPT Create an index on SALES_AC LOCATION_ID
120 |
121 | CREATE INDEX sales_ac_loc_i on sales_ac(location_id)
122 | /
123 | CREATE INDEX sales_loc_i on sales(location_id)
124 | /
125 |
126 | --
127 | PROMPT Index rowids can be pruned by zone
128 | --
129 | SELECT sum(amount)
130 | FROM sales_ac
131 | WHERE location_id = 1000
132 | AND order_item_number = 1
133 | /
134 |
135 | SELECT * FROM TABLE(dbms_xplan.display_cursor);
136 |
137 | DROP INDEX sales_ac_loc_i
138 | /
139 | DROP INDEX sales_loc_i
140 | /
141 |
142 |
143 |
144 |
--------------------------------------------------------------------------------
/att_clu_zone_map/09_part_zm.sql:
--------------------------------------------------------------------------------
1 | -- DISCLAIMER:
2 | -- This script is provided for educational purposes only. It is
3 | -- NOT supported by Oracle World Wide Technical Support.
4 | -- The script has been tested and appears to work as intended.
5 | -- You should always run new scripts initially
6 | -- on a test instance.
7 |
8 | set timing off
9 | set echo off
10 | set lines 400 pages 1000
11 | set feedback 1
12 | set pause off
13 | set echo on
14 | set autotrace off
15 | column plan_table_output format a150
16 | set trims on
17 |
18 | PROMPT Connect to the Attribute Clusters/Zone Map Schema
19 | connect aczm12c/oracle_4U
20 |
21 | --
22 | PROMPT Drop the partitioned sales table (SALES_P) if it exists
23 | --
24 | DROP TABLE sales_p
25 | /
26 |
27 | --
28 | PROMPT Create a range partitioned sales_p table
29 | PROMPT with join attribute clustering and a zone map
30 | --
31 | CREATE TABLE sales_p (
32 | order_id number(20) not null
33 | , order_item_number number(3) not null
34 | , sale_date date not null
35 | , delivered date
36 | , sale_agent varchar2(100) not null
37 | , product_id number(10) not null
38 | , amount number(10,2) not null
39 | , quantity number(5) not null
40 | , location_id number(20) not null
41 | , warehouse varchar2(100) not null
42 | )
43 | CLUSTERING sales_p
44 | JOIN locations ON (sales_p.location_id = locations.location_id)
45 | JOIN products ON (sales_p.product_id = products.product_id)
46 | BY INTERLEAVED ORDER ((locations.state, locations.county),products.product_name, sales_p.delivered)
47 | WITH MATERIALIZED ZONEMAP
48 | PARTITION BY RANGE(sale_date) (
49 | PARTITION p1 VALUES LESS THAN (to_date('2005-01-01','YYYY-MM-DD'))
50 | ,PARTITION p2 VALUES LESS THAN (to_date('2010-01-01','YYYY-MM-DD'))
51 | )
52 | /
53 |
54 | --
55 | PROMPT Fill SALES_P with data
56 | --
57 | INSERT /*+ APPEND */ INTO sales_p SELECT * FROM sales_source
58 | /
59 | COMMIT
60 | /
61 |
62 | --
63 | PROMPT Gather statistics on the table
64 | --
65 | EXECUTE dbms_stats.gather_table_stats(ownname=>NULL,tabname=>'sales_p');
66 |
67 | --
68 | PROMPT Confirm that the query plan includes a zone map filter
69 | --
70 | SELECT SUM(amount)
71 | FROM sales_p
72 | JOIN locations ON (sales_p.location_id = locations.location_id)
73 | WHERE locations.state = 'California'
74 | AND locations.county = 'Alpine County'
75 | /
76 |
77 | SELECT * FROM TABLE(dbms_xplan.display_cursor);
78 |
79 | column zone_id$ format 99999999999999
80 |
81 | --
82 | PROMPT Observe the zone id and the min and max order_id
83 | PROMPT for each zone. The zone map state for each zone
84 | PROMPT will be "0", which equates to "valid".
85 | PROMPT Zone level "1" represents partitions and "0" represents zones.
86 | PROMPT "Delivered" date correlates well with the partition key: "Sale Date".
87 | PROMPT This is because we can expect a delivery to occur soon after a sale.
88 | PROMPT So, because the delivered date correlates well with the partition key,
89 | PROMPT each partition will contain a subset of "delivered" values.
90 | PROMPT We should expect to be able to prune partitions from
91 | PROMPT queries that filter on the "delivered" date.
92 |
93 | SELECT zone_id$ ,
94 | min_4_delivered ,
95 | max_4_delivered ,
96 | zone_level$,
97 | zone_state$ ,
98 | zone_rows$
99 | FROM ZMAP$_SALES_P;
100 |
101 | PROMPT Observe that Pstart, Pstop shows, KEY(ZM),
102 | PROMPT indicating the potential to prune partitions
103 |
104 | SELECT SUM(amount)
105 | FROM sales_p
106 | WHERE delivered between TO_DATE('18-SEP-00', 'DD-MON-YY') and TO_DATE('19-SEP-00', 'DD-MON-YY')
107 | /
108 |
109 | SELECT * FROM TABLE(dbms_xplan.display_cursor);
110 |
111 | PROMPT Observe the effects of IO pruning
112 | PROMPT Exadata storage indexes may effect the
113 | PROMPT actual number of blocks read from storage cells.
114 | PROMPT However, using zone maps will ensure that pruning can
115 | PROMPT ooccur in all appropriate circumstances.
116 |
117 | SET AUTOTRACE ON STATISTICS
118 |
119 | PROMPT Conventional table
120 |
121 | SELECT SUM(amount)
122 | FROM sales
123 | WHERE delivered between TO_DATE('18-SEP-00', 'DD-MON-YY') and TO_DATE('19-SEP-00', 'DD-MON-YY')
124 | /
125 |
126 | PROMPT With zone map
127 |
128 | SELECT SUM(amount)
129 | FROM sales_p
130 | WHERE delivered between TO_DATE('18-SEP-00', 'DD-MON-YY') and TO_DATE('19-SEP-00', 'DD-MON-YY')
131 | /
132 |
133 | SET AUTOTRACE OFF
134 |
135 |
--------------------------------------------------------------------------------
/att_clu_zone_map/10_zm_maint.sql:
--------------------------------------------------------------------------------
1 | -- DISCLAIMER:
2 | -- This script is provided for educational purposes only. It is
3 | -- NOT supported by Oracle World Wide Technical Support.
4 | -- The script has been tested and appears to work as intended.
5 | -- You should always run new scripts initially
6 | -- on a test instance.
7 |
8 | set timing off
9 | set echo off
10 | set lines 400 pages 1000
11 | set feedback 1
12 | set pause off
13 | set echo on
14 | set autotrace off
15 | column plan_table_output format a150
16 | set trims on
17 |
18 | PROMPT Connect to the Attribute Clusters/Zone Map Schema
19 | connect aczm12c/oracle_4U
20 |
21 | --
22 | PROMPT Delete operations do not invalidate zones or partitions
23 | PROMPT because they will not invalidate MIN/MAX value ranges.
24 |
25 | --
26 | DELETE FROM sales_p WHERE order_id = 10
27 | /
28 | COMMIT
29 | /
30 |
31 | PROMPT Observe that the state remains "0" for all zones and partitions
32 |
33 | SELECT zone_id$ ,
34 | zone_level$,
35 | zone_state$ ,
36 | zone_rows$
37 | FROM ZMAP$_SALES_P;
38 |
39 | PROMPT The zone map is not made stale...
40 |
41 | SELECT stale
42 | FROM user_zonemaps
43 | WHERE zonemap_name = 'ZMAP$_SALES_P'
44 | /
45 |
46 | --
47 | PROMPT Conventional path insert will invalidate
48 | PROMPT relevant zones and partitions unless
49 | PROMPT the zone map is set to refresh on commit.
50 | --
51 | INSERT INTO sales_p
52 | SELECT 10,1,TO_DATE('01-JAN-2000','DD-MON-YYYY'),TO_DATE('02-JAN-2000','DD-MON-YYYY'),'JANE',23,20,2,67,'WINSTON SALEM'
53 | FROM dual
54 | /
55 | COMMIT
56 | /
57 |
58 | PROMPT Individual zones are now invalidated...
59 |
60 | SELECT zone_id$ ,
61 | zone_level$,
62 | zone_state$ ,
63 | zone_rows$
64 | FROM ZMAP$_SALES_P;
65 |
66 | PROMPT But the zone map is not stale...
67 |
68 | SELECT stale
69 | FROM user_zonemaps
70 | WHERE zonemap_name = 'ZMAP$_SALES_P'
71 | /
72 |
73 | PROMPT If the zone map itself is not stale,
74 | PROMPT a fast refresh is possible.
75 | PROMPT Only stale zones are scanned to refresh
76 | PROMPT the zone map.
77 |
78 | EXECUTE dbms_mview.refresh('ZMAP$_SALES_P', 'f');
79 |
80 | PROMPT The zones are valid (0) again...
81 |
82 | SELECT zone_id$ ,
83 | zone_level$,
84 | zone_state$ ,
85 | zone_rows$
86 | FROM ZMAP$_SALES_P;
87 |
88 | PROMPT Remove the "test" row. Zones will remain valid.
89 |
90 | DELETE FROM sales_p WHERE order_id = 10
91 | /
92 | COMMIT
93 | /
94 |
95 | PROMPT Direct path operations will maintain the zone map
96 |
97 | INSERT /*+ APPEND */ INTO sales_p
98 | SELECT 10,1,TO_DATE('01-JAN-2000','DD-MON-YYYY'),TO_DATE('02-JAN-2000','DD-MON-YYYY'),'JANE',23,20,2,67,'WINSTON SALEM'
99 | FROM dual
100 | /
101 | COMMIT
102 | /
103 |
104 | PROMPT All zones still valid...
105 |
106 | SELECT zone_id$ ,
107 | zone_level$,
108 | zone_state$ ,
109 | zone_rows$
110 | FROM ZMAP$_SALES_P;
111 |
112 | --
113 | PROMPT Updates to non-zone map columns (and columns not
114 | PROMPT used to join with dimension tables)
115 | PROMPT do not invalidate the zones or partitions
116 | PROMPT (unless there is row movement when a
117 | PROMPT partition key is updated)
118 | --
119 | UPDATE sales_p SET amount = amount + 100
120 | WHERE location_id < 20
121 | /
122 | COMMIT
123 | /
124 |
125 | PROMPT All zones are still valid...
126 |
127 | SELECT zone_id$ ,
128 | zone_level$,
129 | zone_state$ ,
130 | zone_rows$
131 | FROM ZMAP$_SALES_P;
132 |
133 | PROMPT Remove the "test" row. Zones will remain valid.
134 |
135 | DELETE FROM sales_p WHERE order_id = 10
136 | /
137 | COMMIT
138 | /
139 |
140 | PROMPT A conventional path insert will invalidate zones...
141 |
142 | INSERT INTO sales_p
143 | SELECT 10,1,TO_DATE('01-JAN-2000','DD-MON-YYYY'),TO_DATE('02-JAN-2000','DD-MON-YYYY'),'JANE',23,20,2,67,'WINSTON SALEM'
144 | FROM dual
145 | /
146 | COMMIT
147 | /
148 |
149 | PROMPT Note invalid zones (marked with "1")...
150 |
151 | SELECT zone_id$ ,
152 | zone_level$,
153 | zone_state$ ,
154 | zone_rows$
155 | FROM ZMAP$_SALES_P;
156 |
157 | PROMPT Even if some zones are stale,
158 | PROMPT queries will continue to use the zone map where possible.
159 | PROMPT The primary effect of zones being markes as stale
160 | PROMPT is that these zones cannot be skipped: stale zones
161 | PROMPT and partitions will always be scanned.
162 |
163 | SELECT SUM(amount)
164 | FROM sales_p
165 | WHERE delivered between TO_DATE('18-SEP-2000', 'DD-MON-YY') and TO_DATE('19-SEP-2000', 'DD-MON-YY')
166 | /
167 |
168 | SELECT * FROM TABLE(dbms_xplan.display_cursor);
169 |
170 | PROMPT Data movement can maintain zone maps
171 | PROMPT and attribute clusters.
172 |
173 | ALTER TABLE sales_p MOVE PARTITION p1
174 | /
175 |
176 | PROMPT All zones are valid (0)...
177 |
178 | SELECT zone_id$ ,
179 | zone_level$,
180 | zone_state$ ,
181 | zone_rows$
182 | FROM ZMAP$_SALES_P;
183 |
184 | PROMPT Remove the "test" row. Zones will remain valid.
185 |
186 | DELETE FROM sales_p WHERE order_id = 10
187 | /
188 | COMMIT
189 | /
190 |
--------------------------------------------------------------------------------
/att_clu_zone_map/README.md:
--------------------------------------------------------------------------------
1 |
Zone Map and Attribute Clustering Examples
2 |
3 | Purpose
4 |
5 |
6 | This tutorial covers Oracle Database 12c attribute clusters and zone maps, features designed to offer significant IO reduction for queries that would otherwise perform full table scans.
7 |
8 | Time to Complete
9 | Approximately 45 minutes.
10 |
11 | Introduction
12 |
13 | Attribute clustering is a table-level directive that clusters data in close physical proximity based on the content of certain columns. Storing data that logically belongs together in close physical proximity can greatly reduce the amount of data to be processed and can lead to better performance of certain queries in the workload.
14 |
15 | A zone map is a independent access structure that can be built for a table. During table and index scans, zone maps enable you to prune disk blocks of a table and (potentially full partitions of a partitioned table) based on predicates on table columns. Zone maps do this by maintaining a list of minimum and maximum column values for each zone (or range) of blocks in a table. Zone maps do this for partitions and sub-partitions too. If columns with common attributes are clustered together, it becomes possible to minimize the number of zones that need to be scanned in order to find a particular predicate match. For this reason, the effectiveness of zone maps is improved if rows are clustered together using attribute clustering or if they are manually sorted on load (using, for example, an ETL process that includes a sort). Zone maps can be used with or without attribute clustering.
16 |
17 | In contrast to traditional clustering methods, attribute clusters have the capability to cluster data in fact tables based on dimension table attribute values. This has wide application, but it is particularly useful in Data Warehousing, star schema environments. It is possible to reduce significantly the number of fact table blocks that need to be scanned during joins that filter on dimension attribute values, including dimension attribute value hierarchies. Zone maps can be used as an alternative to bitmap indexes.
18 |
19 | Hardware and Software Requirements
20 |
21 | - Oracle Database 12c
22 | - Zone maps require Oracle Exadata
23 |
24 | Prerequisites
25 |
26 | - Have access to Oracle Database 12c with a sample ORCL database, the SYS user with SYSDBA privilege and OS authentication (so that you can execute the sqlplus / as sysdba command.)
27 | - This example uses the USERS tablespace, included in Oracle Database 12c. To demonstrate these features adequately, reasonably large tables are required so approximately 1GB is required in the USERS tablespace.
28 | - Have downloaded and unzipped the 12c_aczm.zip file (which is in the files subdirectory of this tutorial) into a working directory.
29 | - Navigate to your working directory and execute all files from that location.
30 | - Execute the tutorial setup which creates the aczm12c user.
31 |
32 |
33 | Assumptions
34 |
35 | - This tutorial assumes that when you begin executing steps for a topic, you complete the entire topic before going to another one.
36 | - You can also re-execute the tutorial setup from step number one (01_setup12c.sql): it will drop the aczm12c user and all its objects and then recreates it.
37 |
38 | Tutorial Overview
39 |
40 | Note that the generated data used in the tutorial is pseudo-random so your query results will not match the example output exactly. A 16K block size was when creating the sample output so the database statistic values you see will reflect differences in proportion the block size you are using.
41 |
42 | - Create a SALES_SOURCE fact (this is used as a source of data for the fact tables created later in this tutorial).
43 | - Create two dimension tables: PRODUCTS and LOCATIONS.
44 | - Create a join attribute clustered table called SALES_AC, clustering it using product and location attribute values.
45 | - For comparative purposes, create a non-attribute clustered table called SALES.
46 | - Examine the behavior of index range scans on the attribute clustered fact table without using zone maps.
47 | - Remove fact table indexes and observe IO pruning using zone maps.
48 | - Create a partitioned table, SALES_P, and observe zone and partition pruning (for example, using predicates on columns that are not included in the partition key).
49 | - Invalidating and refreshing zone maps.
50 |
51 | Setup
52 |
53 | Set up the tutorial by running the following scripts. These scripts do not form part of the tutorial, they create the database user and the source table data.
54 |
55 | - 01_setup12c.sql
56 | - 02_table_create.sql
57 | - 03_dim_fill.sql
58 | - 04_source_fill.sql
59 |
60 | Attribute Clustering
61 |
62 | Here, we will create a couple of fact tables. SALES will not have attribute clustering or a zone map. It will be used to compare against SALES_AC, which will have attribute clustering and/or zone maps.
63 |
64 | - 05_create_fact.sql
65 |
66 | Attribute clusters can be used without zone maps. By themselves, there is no scan IO pruning (other than via Exadata storage indexes). However, index range scans can benefit from improved performance where index columns match attribute cluster columns.
67 |
68 | In many cases, attribute clusters bring common data values together and make them local to one another. This can benefit compression ratios for row-based compression in particular.
69 |
70 | - 06_ac_only.sql
71 |
72 | Linear and Interleaved Zone Maps
73 |
74 | An Oracle Exadata environment is required from this point onwards: zone maps are an Exadata-only feature.
75 |
76 | - 07_zm_lin_inter.sql
77 |
78 |
79 | Join and Index Pruning
80 |
81 | - 08_zm_prune.sql
82 |
83 | Zone Maps on Partitioned Tables
84 |
85 | Zone maps keep partition-level information as well as zone-level information. This makes it possible to partition eliminate on columns that are not included in partition keys or even on dimension attribute values. The likelihood of partition elimination is dependent on the level of correlation between zone map column values and the values in the partition key column (or columns).
86 |
87 | - 09_part_zm.sql
88 |
89 | Zone Map Maintenance
90 |
91 | Certain operations will invalidate zones. Maintenance operations are required to refresh zone maps.
92 |
93 | Zone maps containing some stale zones can still be used by queries. Query data will continue to be returned as expected.
94 |
95 | - 10_zm_maint.sql
96 |
97 |
--------------------------------------------------------------------------------
/bulk_load/01_dirs.sql:
--------------------------------------------------------------------------------
1 | --
2 | --
3 | -- This script points to directories on the file system.
4 | -- You can modify it to poing where you like, but
5 | -- make sure that the physical directory exists on the
6 | -- file system and that it is readable by the "oracle"
7 | -- operating system user.
8 | --
9 | --
10 | CREATE OR REPLACE DIRECTORY data_dir AS '/home/oracle/direct';
11 | CREATE OR REPLACE DIRECTORY bad_dir AS '/home/oracle/direct';
12 | CREATE OR REPLACE DIRECTORY log_dir AS '/home/oracle/direct';
13 |
--------------------------------------------------------------------------------
/bulk_load/02_table.sql:
--------------------------------------------------------------------------------
1 | --
2 | -- This creates the SALES staging table.
3 | --
4 | DROP TABLE sales_stage;
5 |
6 | CREATE TABLE sales_stage
7 | (id NUMBER(10)
8 | ,txt VARCHAR2(100)
9 | );
10 |
11 |
--------------------------------------------------------------------------------
/bulk_load/03_etable.sql:
--------------------------------------------------------------------------------
1 | --
2 | -- This script creates an external table.
3 | -- The "dirs" script must be run first because
4 | -- the directories are referenced.
5 | --
6 | DROP TABLE sales_ext
7 | /
8 |
9 | --
10 | -- I like to specify the character set explicitly
11 | -- because the external table would otherwise
12 | -- inherit the characterset of the database and use
13 | -- this when reading the datafiles. I prefer a fixed
14 | -- width character set as it enables inter-file
15 | -- parallism in both 11g and 12c.
16 | --
17 | CREATE TABLE sales_ext (
18 | id NUMBER
19 | , txt CHAR(100))
20 | ORGANIZATION EXTERNAL
21 | ( TYPE ORACLE_LOADER
22 | DEFAULT DIRECTORY data_dir
23 | ACCESS PARAMETERS
24 | ( RECORDS DELIMITED BY NEWLINE
25 | CHARACTERSET US7ASCII
26 | BADFILE bad_dir: 'sales%a_%p.bad'
27 | LOGFILE log_dir: 'sales%a_%p.log'
28 | FIELDS TERMINATED BY '|'
29 | MISSING FIELD VALUES ARE NULL )
30 | LOCATION ( data_dir:'sales_1.dat',
31 | data_dir:'sales_2.dat' ))
32 | PARALLEL
33 | REJECT LIMIT 10;
34 |
--------------------------------------------------------------------------------
/bulk_load/04_etablez.sql:
--------------------------------------------------------------------------------
1 | --
2 | -- This script creates an external table with a
3 | -- preprocessor script called "unc.sh". In this case
4 | -- the pre-processor will gunzip the data files
5 | -- on-the-fly as they are loaded.
6 | -- The "dirs" script must be run first because
7 | -- the directories are referenced.
8 | --
9 | DROP TABLE salesz_ext
10 | /
11 |
12 | CREATE TABLE salesz_ext (
13 | id NUMBER
14 | , txt CHAR(100))
15 | ORGANIZATION EXTERNAL
16 | ( TYPE ORACLE_LOADER
17 | DEFAULT DIRECTORY data_dir
18 | ACCESS PARAMETERS
19 | ( RECORDS DELIMITED BY NEWLINE
20 | PREPROCESSOR data_dir:'unc.sh'
21 | CHARACTERSET US7ASCII
22 | BADFILE bad_dir: 'salesz%a_%p.bad'
23 | LOGFILE log_dir: 'salesz%a_%p.log'
24 | FIELDS TERMINATED BY '|'
25 | MISSING FIELD VALUES ARE NULL )
26 | LOCATION ( data_dir:'sales_1.dat.gz',
27 | data_dir:'sales_2.dat.gz' ))
28 | PARALLEL
29 | REJECT LIMIT 10;
30 |
--------------------------------------------------------------------------------
/bulk_load/05_makedat.sh:
--------------------------------------------------------------------------------
1 | #
2 | # This directory should be the same as
3 | # the one referenced in the 01_dirs.sql
4 | # script.
5 | #
6 | # It creates four example data files and
7 | # the "unc.sh" script.
8 | # If you want to do some more serious volume tests,
9 | # you can make the ROWCOUNT larger but it could
10 | # take some time to complete!
11 | #
12 | DIR=/home/oracle/direct
13 | ROWCOUNT=1000
14 |
15 | if [ ! -d ${DIR} ]
16 | then
17 | echo "ERROR:"
18 | echo "Before running this script you will need to create"
19 | echo "an empty directory called ${DIR}. "
20 | echo "If you want to use a different directory, then edit"
21 | echo "this script to change its location and also change"
22 | echo "the name of the directory in 01_dirs.sql and re-run"
23 | echo "that SQL script too."
24 | exit 2
25 | fi
26 |
27 | FILE1=${DIR}/sales_1.dat
28 | FILE2=${DIR}/sales_2.dat
29 | FILE1Z=${DIR}/sales_1.dat.gz
30 | FILE2Z=${DIR}/sales_2.dat.gz
31 |
32 | rm -r ${FILE1} 2>/dev/null
33 | rm -r ${FILE2} 2>/dev/null
34 | rm -r ${FILE1Z} 2>/dev/null
35 | rm -r ${FILE2Z} 2>/dev/null
36 |
37 | echo "Creating DAT file 1..."
38 |
39 | for i in {1..${ROWCOUNT}}
40 | do
41 | echo "${i}|XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" >> ${FILE1}
42 | done
43 |
44 | echo "Creating DAT file 2..."
45 |
46 | cp ${FILE1} ${FILE2}
47 |
48 | echo "Creating GZIP files..."
49 |
50 | cd ${DIR}
51 | gzip<${FILE1}>${FILE1Z}
52 | gzip<${FILE2}>${FILE2Z}
53 |
54 | echo "Creating unc.sh script..."
55 | echo "/bin/gunzip -c \$1" > ${DIR}/unc.sh
56 | chmod +x ${DIR}/unc.sh
57 |
--------------------------------------------------------------------------------
/bulk_load/06_ins.lst:
--------------------------------------------------------------------------------
1 | SQL>
2 | SQL> --
3 | SQL> -- This will ensure that we are not using AutoDOP
4 | SQL> -- Auto DOP is not a "problem", but using manual
5 | SQL> -- DOP will mean that the script will work
6 | SQL> -- as intended in this test case.
7 | SQL> --
8 | SQL> alter session set parallel_degree_policy = 'MANUAL';
9 |
10 | Session altered.
11 |
12 | Elapsed: 00:00:00.00
13 | SQL> --
14 | SQL> -- Enable parallel DML so that the write into the
15 | SQL> -- staging table will be in parallel for maximum
16 | SQL> -- performance.
17 | SQL> --
18 | SQL> alter session enable parallel dml;
19 |
20 | Session altered.
21 |
22 | Elapsed: 00:00:00.00
23 | SQL>
24 | SQL> --
25 | SQL> -- Read the data files via the external table
26 | SQL> -- and insert the rows into the staging table.
27 | SQL> --
28 | SQL> INSERT /*+ APPEND PARALLEL(itab,2) */ INTO sales_stage itab
29 | 2 SELECT /*+ PARALLEL(tab,2) */ *
30 | 3 FROM sales_ext tab
31 | 4 /
32 |
33 | 2000 rows created.
34 |
35 | Elapsed: 00:00:00.08
36 | SQL>
37 | SQL> SELECT *
38 | 2 FROM table(dbms_xplan.display_cursor);
39 |
40 | PLAN_TABLE_OUTPUT
41 | ------------------------------------------------------------------------------------------------------------------------------------------------------
42 | SQL_ID 07pb04qn3ug31, child number 0
43 | -------------------------------------
44 | INSERT /*+ APPEND PARALLEL(itab,2) */ INTO sales_stage itab SELECT /*+
45 | PARALLEL(tab,2) */ * FROM sales_ext tab
46 |
47 | Plan hash value: 2731173783
48 |
49 | -----------------------------------------------------------------------------------------------------------------------------
50 | | Id | Operation | Name | Rows | Bytes | Cost (%CPU)| Time | TQ |IN-OUT| PQ Distrib |
51 | -----------------------------------------------------------------------------------------------------------------------------
52 | | 0 | INSERT STATEMENT | | | | 393 (100)| | | | |
53 | | 1 | PX COORDINATOR | | | | | | | | |
54 | | 2 | PX SEND QC (RANDOM) | :TQ10000 | 204K| 22M| 393 (5)| 00:00:01 | Q1,00 | P->S | QC (RAND) |
55 | | 3 | LOAD AS SELECT (HYBRID TSM/HWMB)| | | | | | Q1,00 | PCWP | |
56 | | 4 | PX BLOCK ITERATOR | | 204K| 22M| 393 (5)| 00:00:01 | Q1,00 | PCWC | |
57 | | 5 | EXTERNAL TABLE ACCESS FULL | SALES_EXT | 204K| 22M| 393 (5)| 00:00:01 | Q1,00 | PCWP | |
58 | -----------------------------------------------------------------------------------------------------------------------------
59 |
60 |
61 | 18 rows selected.
62 |
63 | Elapsed: 00:00:00.03
64 | SQL>
65 | SQL> --
66 | SQL> -- Expect to see an error here!
67 | SQL> -- This is because you will need to
68 | SQL> -- commit the data before it is read.
69 | SQL> --
70 | SQL> SELECT count(*)
71 | 2 FROM sales_stage
72 | 3 /
73 | FROM sales_stage
74 | *
75 | ERROR at line 2:
76 | ORA-12838: cannot read/modify an object after modifying it in parallel
77 |
78 |
79 | Elapsed: 00:00:00.01
80 | SQL>
81 | SQL> --
82 | SQL> -- Commit the transaction
83 | SQL> --
84 | SQL> COMMIT;
85 |
86 | Commit complete.
87 |
88 | Elapsed: 00:00:00.01
89 | SQL>
90 | SQL> --
91 | SQL> -- Now you can read the loaded data
92 | SQL> --
93 | SQL> SELECT count(*)
94 | 2 FROM sales_stage
95 | 3 /
96 |
97 | COUNT(*)
98 | ----------
99 | 2000
100 |
101 | Elapsed: 00:00:00.00
102 | SQL>
103 | SQL> spool off
104 |
--------------------------------------------------------------------------------
/bulk_load/06_ins.sql:
--------------------------------------------------------------------------------
1 | set timing on
2 | set echo on
3 | set linesize 300
4 | column table_name format a30
5 | column segment_name format a30
6 | column plan_table_output format a150
7 | column partition_name format a30
8 | set pagesize 1000
9 | set trims on
10 | spool 06_ins
11 |
12 | --
13 | -- This will ensure that we are not using AutoDOP
14 | -- Auto DOP is not a "problem", but using manual
15 | -- DOP will mean that the script will work
16 | -- as intended in this test case.
17 | --
18 | alter session set parallel_degree_policy = 'MANUAL';
19 | --
20 | -- Enable parallel DML so that the write into the
21 | -- staging table will be in parallel for maximum
22 | -- performance.
23 | --
24 | alter session enable parallel dml;
25 |
26 | --
27 | -- Read the data files via the external table
28 | -- and insert the rows into the staging table.
29 | --
30 | INSERT /*+ APPEND PARALLEL(itab,2) */ INTO sales_stage itab
31 | SELECT /*+ PARALLEL(tab,2) */ *
32 | FROM sales_ext tab
33 | /
34 |
35 | SELECT *
36 | FROM table(dbms_xplan.display_cursor);
37 |
38 | --
39 | -- Expect to see an error here!
40 | -- This is because you will need to
41 | -- commit the data before it is read.
42 | --
43 | SELECT count(*)
44 | FROM sales_stage
45 | /
46 |
47 | --
48 | -- Commit the transaction
49 | --
50 | COMMIT;
51 |
52 | --
53 | -- Now you can read the loaded data
54 | --
55 | SELECT count(*)
56 | FROM sales_stage
57 | /
58 |
59 | spool off
60 |
--------------------------------------------------------------------------------
/bulk_load/07_insz.lst:
--------------------------------------------------------------------------------
1 | SQL>
2 | SQL> --
3 | SQL> -- This will ensure that we are not using AutoDOP
4 | SQL> -- Auto DOP is not a "problem", but using manual
5 | SQL> -- DOP will mean that the script will work
6 | SQL> -- as intended in this test case.
7 | SQL> --
8 | SQL> alter session set parallel_degree_policy = 'MANUAL';
9 |
10 | Session altered.
11 |
12 | Elapsed: 00:00:00.00
13 | SQL> --
14 | SQL> -- Enable parallel DML so that the write into the
15 | SQL> -- staging table will be in parallel for maximum
16 | SQL> -- performance.
17 | SQL> --
18 | SQL> alter session enable parallel dml;
19 |
20 | Session altered.
21 |
22 | Elapsed: 00:00:00.00
23 | SQL>
24 | SQL> --
25 | SQL> -- Read the data files via the gzip external table
26 | SQL> -- and insert the rows into the staging table.
27 | SQL> -- We can read the gzip files at DOP 2 because there are
28 | SQL> -- two files - we can't read just part of a gzip file so if we
29 | SQL> -- want to read at DOP 4, then we will need 4 gzip files.
30 | SQL> --
31 | SQL> INSERT /*+ APPEND PARALLEL(itab,2) */ INTO sales_stage itab
32 | 2 SELECT /*+ PARALLEL(tab,2) */ *
33 | 3 FROM salesz_ext tab
34 | 4 /
35 |
36 | 2000 rows created.
37 |
38 | Elapsed: 00:00:00.07
39 | SQL>
40 | SQL> SELECT *
41 | 2 FROM table(dbms_xplan.display_cursor);
42 |
43 | PLAN_TABLE_OUTPUT
44 | ------------------------------------------------------------------------------------------------------------------------------------------------------
45 | SQL_ID 4z4mu1yt280b3, child number 0
46 | -------------------------------------
47 | INSERT /*+ APPEND PARALLEL(itab,2) */ INTO sales_stage itab SELECT /*+
48 | PARALLEL(tab,2) */ * FROM salesz_ext tab
49 |
50 | Plan hash value: 2329907319
51 |
52 | ------------------------------------------------------------------------------------------------------------------------------
53 | | Id | Operation | Name | Rows | Bytes | Cost (%CPU)| Time | TQ |IN-OUT| PQ Distrib |
54 | ------------------------------------------------------------------------------------------------------------------------------
55 | | 0 | INSERT STATEMENT | | | | 393 (100)| | | | |
56 | | 1 | PX COORDINATOR | | | | | | | | |
57 | | 2 | PX SEND QC (RANDOM) | :TQ10000 | 204K| 22M| 393 (5)| 00:00:01 | Q1,00 | P->S | QC (RAND) |
58 | | 3 | LOAD AS SELECT (HYBRID TSM/HWMB)| | | | | | Q1,00 | PCWP | |
59 | | 4 | PX BLOCK ITERATOR | | 204K| 22M| 393 (5)| 00:00:01 | Q1,00 | PCWC | |
60 | | 5 | EXTERNAL TABLE ACCESS FULL | SALESZ_EXT | 204K| 22M| 393 (5)| 00:00:01 | Q1,00 | PCWP | |
61 | ------------------------------------------------------------------------------------------------------------------------------
62 |
63 | Note
64 | -----
65 | - SQL plan baseline SQL_PLAN_2g0y79q5bypbf8adf9077 used for this statement
66 |
67 |
68 | 22 rows selected.
69 |
70 | Elapsed: 00:00:00.02
71 | SQL>
72 | SQL> --
73 | SQL> -- Expect to see an error here!
74 | SQL> -- This is because you will need to
75 | SQL> -- commit the data before it is read.
76 | SQL> --
77 | SQL> SELECT count(*)
78 | 2 FROM sales_stage
79 | 3 /
80 | SELECT count(*)
81 | *
82 | ERROR at line 1:
83 | ORA-12838: cannot read/modify an object after modifying it in parallel
84 |
85 |
86 | Elapsed: 00:00:00.00
87 | SQL>
88 | SQL> --
89 | SQL> -- Commit the transaction
90 | SQL> --
91 | SQL> COMMIT;
92 |
93 | Commit complete.
94 |
95 | Elapsed: 00:00:00.01
96 | SQL>
97 | SQL> --
98 | SQL> -- Now you can read the loaded data
99 | SQL> --
100 | SQL> SELECT count(*)
101 | 2 FROM sales_stage
102 | 3 /
103 |
104 | COUNT(*)
105 | ----------
106 | 4000
107 |
108 | Elapsed: 00:00:00.00
109 | SQL>
110 | SQL> spool off
111 |
--------------------------------------------------------------------------------
/bulk_load/07_insz.sql:
--------------------------------------------------------------------------------
1 | set timing on
2 | set echo on
3 | set linesize 300
4 | column table_name format a30
5 | column segment_name format a30
6 | column plan_table_output format a150
7 | column partition_name format a30
8 | set pagesize 1000
9 | set trims on
10 | spool 07_insz
11 |
12 | --
13 | -- This will ensure that we are not using AutoDOP
14 | -- Auto DOP is not a "problem", but using manual
15 | -- DOP will mean that the script will work
16 | -- as intended in this test case.
17 | --
18 | alter session set parallel_degree_policy = 'MANUAL';
19 | --
20 | -- Enable parallel DML so that the write into the
21 | -- staging table will be in parallel for maximum
22 | -- performance.
23 | --
24 | alter session enable parallel dml;
25 |
26 | --
27 | -- Read the data files via the gzip external table
28 | -- and insert the rows into the staging table.
29 | -- We can read the gzip files at DOP 2 because there are
30 | -- two files - we can't read just part of a gzip file so if we
31 | -- want to read at DOP 4, then we will need 4 gzip files.
32 | --
33 | INSERT /*+ APPEND PARALLEL(itab,2) */ INTO sales_stage itab
34 | SELECT /*+ PARALLEL(tab,2) */ *
35 | FROM salesz_ext tab
36 | /
37 |
38 | SELECT *
39 | FROM table(dbms_xplan.display_cursor);
40 |
41 | --
42 | -- Expect to see an error here!
43 | -- This is because you will need to
44 | -- commit the data before it is read.
45 | --
46 | SELECT count(*)
47 | FROM sales_stage
48 | /
49 |
50 | --
51 | -- Commit the transaction
52 | --
53 | COMMIT;
54 |
55 | --
56 | -- Now you can read the loaded data
57 | --
58 | SELECT count(*)
59 | FROM sales_stage
60 | /
61 |
62 | spool off
63 |
--------------------------------------------------------------------------------
/bulk_load/README.md:
--------------------------------------------------------------------------------
1 | Bulk Loading via External Table Examples
2 |
3 | A couple of examples of bulk loading data into an Oracle database using external tables.
4 |
5 | For full details, and the background behind the examples, check out the LINK Youtube web seminar. It explains how to load data in bulk into an Oracle Database.
6 |
7 | The examples were tested on Oracle Enterprise Linux and Oracle Database 12.1.0.2 but they should work on Oracle 11gR2 too.
8 |
9 | It's easiest to use the scripts in an Oracle DBA user account, but this is not necessary as long as you have priviledges to create tables and Oracle DB directories. The scripts assume that you have created an empty "/home/oracle/direct" directory but you can change this by editing "01_dirs.sql" and "05_makedat.sh" to your chosen location. Note that 05_makedat.sh will delete some files from this directory, so make sure that it does not contain anything of value.
10 |
11 | The scripts are intended to be executed in order, "01...", "02..." and so on. Example ".lst" output files are given so you can see what the "ins" scripts do when you run them.
12 |
13 | DISCLAIMER:
14 |
-- These scripts are provided for educational purposes only.
15 |
-- They are NOT supported by Oracle World Wide Technical Support.
16 |
-- The scripts have been tested and they appear to work as intended.
17 |
-- You should always run scripts on a test instance.
18 |
-- Note that they will DROP tables when they are executed.
19 |
20 |
--------------------------------------------------------------------------------
/compare_ofe/README.md:
--------------------------------------------------------------------------------
1 | Get a detailed list of database parameters and fix controls affected by optimizer_features_enable (OFE)
2 |
3 | The script should be run on a test instance and it requires access to SYS.X$... tables and the ability to "alter session set optimizer_features_enable".
4 |
5 | If the database has any Optimizer database parameters explicitly set, then this can mask reported differences.
6 |
7 | The script will create some tables and drop them on completion, so you will be prompted for the name of schema where it is safe to do this.
8 |
9 | Usage:
10 |
1) Use SQLPLUS to connect to an account with access to SYS.X$ tables (usually SYS)
11 |
2) @ofe
12 |
13 | See Optimizer blog for more details.
14 |
--------------------------------------------------------------------------------
/compare_ofe/ofe.sql:
--------------------------------------------------------------------------------
1 | REM
2 | REM
3 | REM Compare database settings between different optimizer feature enable (OFE) settings
4 | REM
5 | REM
6 | WHENEVER SQLERROR EXIT FAILURE
7 |
8 | set trims on
9 | set feedback off
10 | set linesize 200
11 | set pagesize 1000
12 | set verify off
13 | column version_from format a10
14 | column sql_feature format a36
15 | column parameter_value format a40
16 | column parameter_desc format a80
17 | column parameter_name format a40
18 | column version format a20
19 | column available_versions format a20
20 |
21 | PROMPT
22 | PROMPT *****
23 | PROMPT ***** WARNING:
24 | PROMPT ***** If you have explicitly set any Optimizer parameters
25 | PROMPT ***** then be aware that this can mask differences between
26 | PROMPT ***** different optimizer_features_enable settings.
27 | PROMPT ***** This can make the results of the comparison (below) incomplete.
28 | PROMPT *****
29 | PROMPT
30 | PROMPT Press to continue...
31 | PAUSE
32 |
33 | accept schname prompt 'Enter the name of a schema where it is safe to create tables: '
34 |
35 | declare
36 | ORA_00942 exception; pragma Exception_Init(ORA_00942, -00942);
37 | begin
38 | execute immediate 'drop table &schname..hiver_fix ';
39 | exception when ORA_00942 then null;
40 | end;
41 | /
42 |
43 | declare
44 | ORA_00942 exception; pragma Exception_Init(ORA_00942, -00942);
45 | begin
46 | execute immediate 'drop table &schname..lover_fix ';
47 | exception when ORA_00942 then null;
48 | end;
49 | /
50 |
51 | declare
52 | ORA_00942 exception; pragma Exception_Init(ORA_00942, -00942);
53 | begin
54 | execute immediate 'drop table &schname..hiver_env ';
55 | exception when ORA_00942 then null;
56 | end;
57 | /
58 |
59 | declare
60 | ORA_00942 exception; pragma Exception_Init(ORA_00942, -00942);
61 | begin
62 | execute immediate 'drop table &schname..lover_env ';
63 | exception when ORA_00942 then null;
64 | end;
65 | /
66 |
67 | select distinct regexp_replace(regexp_replace(optimizer_feature_enable,'^8',' 8'),'^9',' 9') available_versions
68 | from v$session_fix_control
69 | order by 1;
70 |
71 | define lover="11.2.0.4"
72 | define hiver="12.1.0.2"
73 |
74 | accept lover default &lover. prompt 'Enter low version [default: &lover.]: '
75 | accept hiver default &hiver. prompt 'Enter high version [default: &hiver.]: '
76 |
77 | alter session set optimizer_features_enable = '&hiver';
78 |
79 | create table &schname..hiver_fix as
80 | select bugno,
81 | regexp_replace(regexp_replace(optimizer_feature_enable,'^8',' 8'),'^9',' 9') version_from,
82 | value,
83 | sql_feature,
84 | description
85 | from v$session_fix_control
86 | where session_id = userenv('sid');
87 |
88 | create table &schname..hiver_env as
89 | SELECT pi.ksppinm parameter_name, pcv.ksppstvl parameter_value, pcv.ksppstdf isdefault, pi.ksppdesc parameter_desc
90 | FROM sys.x$ksppi pi,
91 | sys.x$ksppcv pcv
92 | WHERE pi.indx = pcv.indx ;
93 |
94 | alter session set optimizer_features_enable = '&lover';
95 |
96 | create table &schname..lover_fix as
97 | select bugno,
98 | regexp_replace(regexp_replace(optimizer_feature_enable,'^8',' 8'),'^9',' 9') version_from,
99 | value,
100 | sql_feature,
101 | description
102 | from v$session_fix_control
103 | where session_id = userenv('sid');
104 |
105 | create table &schname..lover_env as
106 | SELECT pi.ksppinm parameter_name, pcv.ksppstvl parameter_value, pcv.ksppstdf isdefault, pi.ksppdesc parameter_desc
107 | FROM sys.x$ksppi pi,
108 | sys.x$ksppcv pcv
109 | WHERE pi.indx = pcv.indx ;
110 |
111 | PROMPT
112 | PROMPT ***
113 | PROMPT *** List of OFE-related fix controls added after &lover. - up until &hiver. inclusive:
114 | PROMPT ***
115 |
116 | select *
117 | from &schname..hiver_fix
118 | minus
119 | select *
120 | from &schname..lover_fix
121 | order by 2,1;
122 |
123 | prompt Press to continue...
124 | pause
125 |
126 | PROMPT ***
127 | PROMPT *** List of additional or changed OFE-related parameters in &hiver. compared to &lover.:
128 | PROMPT ***
129 |
130 | select *
131 | from &schname..hiver_env
132 | minus
133 | select *
134 | from &schname..lover_env
135 | order by 1;
136 |
137 | drop table &schname..hiver_fix ;
138 | drop table &schname..lover_fix ;
139 |
140 | drop table &schname..hiver_env ;
141 | drop table &schname..lover_env ;
142 |
143 | disconnect
144 |
--------------------------------------------------------------------------------
/db_load/README.md:
--------------------------------------------------------------------------------
1 | Data Loading Examples
2 |
3 | Some examples of loading data into an Oracle database, and note that the Java code includes examples of both good and (very!) bad practice. For full details, and the background behind the examples, check out the Youtube web seminar. It explains how you can use good practice to ensure a smooth path forward if your solution needs to scale out and accomodate large data volumes and high load rates.
4 |
5 | The Java was tested on JDK 7 and Oracle Database 12.1.0.2, but it will work without issue on Oracle 11gR2 too. To compile it, your project will need to include the Oracle JDBC client libraries.
6 |
7 | The Python example requires cx_Oracle, which in turn depends an Oracle client installation (for example, the basic instant client plus the SDK).
8 |
9 | The example output file is included so that you can see how the Java code behaves. Note that it shows what happens if you use a transatlantic SQLNet connection: any optimization that reduces the number of network rountrips results in a very significant performance improvment - above all others! If you are using a local database, you will want to increase *rowCount* significantly.
10 |
11 | A SQL script is included to create the test table. Your Oracle user will need to have database resource/create table permissions to create it. Also, make sure that you can "select \* from" v$mystat and v$statname using your Oracle user account (a DBA account holder can grant your user access to these data dictionary views if necessary).
--------------------------------------------------------------------------------
/db_load/insert_example.py:
--------------------------------------------------------------------------------
1 | #-- DISCLAIMER:
2 | #-- This script is provided for educational purposes only. It is
3 | #-- NOT supported by Oracle World Wide Technical Support.
4 | #-- The script has been tested and appears to work as intended.
5 | #-- You should always run new scripts initially
6 | #-- on a test instance.
7 |
8 | import cx_Oracle
9 |
10 | dsn_tns = cx_Oracle.makedsn('myusserver', 7796, 'npbr')
11 |
12 | db = cx_Oracle.connect('adhoc', 'adhoc', dsn_tns)
13 |
14 | db.autocommit = False
15 |
16 | print db.version
17 |
18 | stats = db.cursor()
19 | stats.prepare("select name,value from v$mystat m, v$statname n where m.statistic# = n.statistic# "
20 | + "and name in ('parse count (hard)','parse count (total)','user commits','execute count',"
21 | + "'bytes received via SQL*Net from client','bytes sent via SQL*Net to client',"
22 | + "'bytes via SQL*Net vector from client','bytes via SQL*Net vector to client',"
23 | + "'SQL*Net roundtrips to/from client') order by name")
24 | stats.execute(None)
25 | print(stats.fetchall())
26 |
27 | #cursor = db.cursor()
28 | #cursor.prepare('insert into ins values (:p1,:p2,:p3)')
29 | #named_params = {'p1':50, 'p2':'A', 'p3':'B'}
30 | #cursor.execute(None,named_params)
31 |
32 | db.commit()
33 |
34 | cursorm = db.cursor()
35 | cursorm.prepare('insert into ins values (:p1,:p2,:p3)')
36 | ROWS = []
37 | for y in range(0,10):
38 | ROWS.append((50,'X','Y'))
39 |
40 | cursorm.executemany(None,ROWS)
41 |
42 | db.commit()
43 |
44 | stats.execute(None)
45 | print(stats.fetchall())
46 |
47 | db.close()
48 |
--------------------------------------------------------------------------------
/db_load/table.sql:
--------------------------------------------------------------------------------
1 | create table ins(
2 | id number(10)
3 | ,t1 varchar2(100)
4 | ,t2 varchar2(100));
5 |
6 |
--------------------------------------------------------------------------------
/direct_path/11g/README.md:
--------------------------------------------------------------------------------
1 | Direct Path Load Examples for Oracle Database 11g
2 |
3 | Unlike 12.1.0.2, the LOAD AS SELECT is not decorated with the load strategy.
4 |
5 | The SQL scripts demonstrate how this information can be retrieved from a 10053 trace. The "show_type.sh" shows how you can grep this information from the "TRC" trace files.
6 |
7 | Compare "tsm_v_tsmhwmb.lst" here with the 12c example to see how there are fewer new extents created in 12c due to hybrid TSM/HWMB.
8 |
--------------------------------------------------------------------------------
/direct_path/11g/equi.lst:
--------------------------------------------------------------------------------
1 | SQL>
2 | SQL> DROP TABLE sales_dl;
3 |
4 | Table dropped.
5 |
6 | SQL>
7 | SQL> CREATE TABLE sales_dl (sale_id NUMBER(10), customer_id NUMBER(10));
8 |
9 | Table created.
10 |
11 | SQL>
12 | SQL> DECLARE
13 | 2 i NUMBER(10);
14 | 3 BEGIN
15 | 4 FOR i IN 1..10
16 | 5 LOOP
17 | 6 INSERT INTO sales_dl
18 | 7 SELECT ROWNUM, MOD(ROWNUM,1000)
19 | 8 FROM dual
20 | 9 CONNECT BY LEVEL <= 100000;
21 | 10 COMMIT;
22 | 11 END LOOP;
23 | 12 END;
24 | 13 /
25 |
26 | PL/SQL procedure successfully completed.
27 |
28 | SQL>
29 | SQL> EXEC dbms_stats.gather_table_stats(ownname=>NULL, tabname=>'SALES_DL');
30 |
31 | PL/SQL procedure successfully completed.
32 |
33 | SQL>
34 | SQL> alter table sales_dl parallel 4;
35 |
36 | Table altered.
37 |
38 | SQL>
39 | SQL> alter session set parallel_force_local = FALSE;
40 |
41 | Session altered.
42 |
43 | SQL> alter session set parallel_degree_policy = 'MANUAL';
44 |
45 | Session altered.
46 |
47 | SQL> alter session enable parallel dml;
48 |
49 | Session altered.
50 |
51 | SQL> alter session enable parallel ddl;
52 |
53 | Session altered.
54 |
55 | SQL>
56 | SQL> drop table sales_p1;
57 |
58 | Table dropped.
59 |
60 | SQL> drop table sales_p2;
61 |
62 | Table dropped.
63 |
64 | SQL>
65 | SQL> --
66 | SQL> -- TSM PCTAS
67 | SQL> --
68 | SQL> create table sales_p1
69 | 2 partition by hash (sale_id)
70 | 3 partitions 64
71 | 4 parallel 4
72 | 5 as select * from sales_dl
73 | 6 /
74 |
75 | Table created.
76 |
77 | SQL>
78 | SQL> select * from table(dbms_xplan.display_cursor);
79 |
80 | PLAN_TABLE_OUTPUT
81 | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
82 | SQL_ID 2bptnfjagyuzy, child number 0
83 | -------------------------------------
84 | create table sales_p
85 |
86 | Plan hash value: 1354270924
87 |
88 | ---------------------------------------------------------------------------------------------------------------------
89 | | Id | Operation | Name | Rows | Bytes | Cost (%CPU)| Time | TQ |IN-OUT| PQ Distrib |
90 | ---------------------------------------------------------------------------------------------------------------------
91 | | 0 | CREATE TABLE STATEMENT | | | | 5494 (100)| | | | |
92 | | 1 | PX COORDINATOR | | | | | | | | |
93 | | 2 | PX SEND QC (RANDOM) | :TQ10001 | 1000K| 8789K| 153 (2)| 00:00:01 | Q1,01 | P->S | QC (RAND) |
94 | | 3 | LOAD AS SELECT | | | | | | Q1,01 | PCWP | |
95 | | 4 | PX RECEIVE | | 1000K| 8789K| 153 (2)| 00:00:01 | Q1,01 | PCWP | |
96 | | 5 | PX SEND PARTITION (KEY)| :TQ10000 | 1000K| 8789K| 153 (2)| 00:00:01 | Q1,00 | P->P | PART (KEY) |
97 | | 6 | PX BLOCK ITERATOR | | 1000K| 8789K| 153 (2)| 00:00:01 | Q1,00 | PCWC | |
98 | |* 7 | TABLE ACCESS FULL | SALES_DL | 1000K| 8789K| 153 (2)| 00:00:01 | Q1,00 | PCWP | |
99 | ---------------------------------------------------------------------------------------------------------------------
100 |
101 | Predicate Information (identified by operation id):
102 | ---------------------------------------------------
103 |
104 | 7 - access(:Z>=:Z AND :Z<=:Z)
105 |
106 |
107 | 24 rows selected.
108 |
109 | SQL>
110 | SQL> create table sales_p2
111 | 2 partition by hash (sale_id)
112 | 3 partitions 64
113 | 4 parallel 4
114 | 5 as select * from sales_dl where 1=-1
115 | 6 /
116 |
117 | Table created.
118 |
119 | SQL>
120 | SQL> --
121 | SQL> -- An equi-partition PIDL
122 | SQL> --
123 | SQL> alter session set tracefile_identifier = 'EQUI';
124 |
125 | Session altered.
126 |
127 | SQL> ALTER SESSION SET EVENTS='10053 trace name context forever, level 1';
128 |
129 | Session altered.
130 |
131 | SQL>
132 | SQL> insert /*+ append */
133 | 2 into sales_p2 t1
134 | 3 select * from sales_p t2;
135 |
136 | 1039996 rows created.
137 |
138 | SQL>
139 | SQL> ALTER SESSION SET EVENTS '10053 trace name context off';
140 |
141 | Session altered.
142 |
143 | SQL>
144 | SQL> commit;
145 |
146 | Commit complete.
147 |
148 | SQL>
149 | SQL> exit
150 |
--------------------------------------------------------------------------------
/direct_path/11g/equi.sql:
--------------------------------------------------------------------------------
1 | set linesize 260
2 | column PLAN_TABLE_OUTPUT format a200
3 | set pagesize 200
4 | set trims on
5 | set tab off
6 | set echo on
7 | spool equi
8 |
9 | DROP TABLE sales_dl;
10 |
11 | CREATE TABLE sales_dl (sale_id NUMBER(10), customer_id NUMBER(10));
12 |
13 | DECLARE
14 | i NUMBER(10);
15 | BEGIN
16 | FOR i IN 1..10
17 | LOOP
18 | INSERT INTO sales_dl
19 | SELECT ROWNUM, MOD(ROWNUM,1000)
20 | FROM dual
21 | CONNECT BY LEVEL <= 100000;
22 | COMMIT;
23 | END LOOP;
24 | END;
25 | /
26 |
27 | EXEC dbms_stats.gather_table_stats(ownname=>NULL, tabname=>'SALES_DL');
28 |
29 | alter table sales_dl parallel 4;
30 |
31 | alter session set parallel_force_local = FALSE;
32 | alter session set parallel_degree_policy = 'MANUAL';
33 | alter session enable parallel dml;
34 | alter session enable parallel ddl;
35 |
36 | drop table sales_p1;
37 | drop table sales_p2;
38 |
39 | --
40 | -- TSM PCTAS
41 | --
42 | create table sales_p1
43 | partition by hash (sale_id)
44 | partitions 64
45 | parallel 4
46 | as select * from sales_dl
47 | /
48 |
49 | select * from table(dbms_xplan.display_cursor);
50 |
51 | create table sales_p2
52 | partition by hash (sale_id)
53 | partitions 64
54 | parallel 4
55 | as select * from sales_dl where 1=-1
56 | /
57 |
58 | --
59 | -- An equi-partition PIDL
60 | --
61 | alter session set tracefile_identifier = 'EQUI';
62 | ALTER SESSION SET EVENTS='10053 trace name context forever, level 1';
63 |
64 | insert /*+ append */
65 | into sales_p2 t1
66 | select * from sales_p t2;
67 |
68 | ALTER SESSION SET EVENTS '10053 trace name context off';
69 |
70 | commit;
71 |
72 |
--------------------------------------------------------------------------------
/direct_path/11g/merge.sql:
--------------------------------------------------------------------------------
1 | set linesize 260
2 | column PLAN_TABLE_OUTPUT format a200
3 | set pagesize 200
4 | set trims on
5 | set tab off
6 | set echo on
7 | spool merge
8 |
9 | alter session enable parallel dml;
10 | alter session set parallel_force_local = FALSE;
11 | alter session set parallel_degree_policy = 'MANUAL';
12 |
13 | DROP TABLE sales_dl;
14 | DROP TABLE sales_dl_copy;
15 |
16 | CREATE TABLE sales_dl (sale_id NUMBER(10), customer_id NUMBER(10), counter NUMBER(10));
17 |
18 | BEGIN
19 | INSERT INTO sales_dl
20 | SELECT ROWNUM, MOD(ROWNUM,1000),ROWNUM*2
21 | FROM dual
22 | CONNECT BY LEVEL <= 1000000;
23 | COMMIT;
24 | END;
25 | /
26 |
27 | EXEC dbms_stats.gather_table_stats(ownname=>NULL, tabname=>'SALES_DL');
28 |
29 | alter session enable parallel dml;
30 | alter session set parallel_force_local = FALSE;
31 | alter session set parallel_degree_policy = 'MANUAL';
32 |
33 | CREATE TABLE sales_dl_copy
34 | AS
35 | SELECT * FROM sales_dl;
36 |
37 | EXEC dbms_stats.gather_table_stats(ownname=>NULL, tabname=>'SALES_DL_COPY');
38 |
39 | alter table sales_dl parallel 4;
40 | alter table sales_dl_copy parallel 4;
41 |
42 | select segment_type,extent_id,bytes,blocks
43 | from user_extents
44 | where segment_name ='SALES_DL_COPY'
45 | order by extent_id;
46 |
47 | MERGE INTO sales_dl_copy sdlc USING (
48 | SELECT sale_id, customer_id
49 | FROM sales_dl WHERE sale_id < 10000
50 | ) sdl
51 | ON (sdlc.sale_id = sdl.sale_id - 5000)
52 | WHEN MATCHED THEN
53 | UPDATE SET sdlc.counter = - sdlc.counter
54 | WHEN NOT MATCHED THEN
55 | INSERT /*+ APPEND */ (sale_id,customer_id)
56 | VALUES (sdl.sale_id-5000,sdl.customer_id)
57 | ;
58 |
59 | select * from table(dbms_xplan.display_cursor);
60 |
61 | commit;
62 |
63 | --
64 | -- Here are the extents for SALES_DL_COPY after
65 | -- the MERGE operation
66 | -- Compare the 11g and 12c case.
67 | --
68 | -- Your exact results will depend on:
69 | -- DB block size (8K in my case)
70 | -- Tablespace storage defaults (I am using default USERS tablespace)
71 | --
72 | -- The number of extents in the 12c case will
73 | -- also depend on the number of active RAC instances.
74 | -- In my case it is two.
75 | --
76 | -- Expect fewer extents in the 12c case than the 11g case.
77 | --
78 |
79 | select segment_type,extent_id,bytes,blocks
80 | from user_extents
81 | where segment_name ='SALES_DL_COPY'
82 | order by extent_id;
83 |
84 |
--------------------------------------------------------------------------------
/direct_path/11g/show_type.sh:
--------------------------------------------------------------------------------
1 | grep "load_type" *.trc
2 |
--------------------------------------------------------------------------------
/direct_path/11g/tsm.lst:
--------------------------------------------------------------------------------
1 | SQL>
2 | SQL> DROP TABLE sales_dl;
3 |
4 | Table dropped.
5 |
6 | SQL> DROP TABLE sales_copy;
7 |
8 | Table dropped.
9 |
10 | SQL>
11 | SQL> CREATE TABLE sales_dl (sale_id NUMBER(10), customer_id NUMBER(10));
12 |
13 | Table created.
14 |
15 | SQL>
16 | SQL> DECLARE
17 | 2 i NUMBER(10);
18 | 3 BEGIN
19 | 4 FOR i IN 1..10
20 | 5 LOOP
21 | 6 INSERT INTO sales_dl
22 | 7 SELECT ROWNUM, MOD(ROWNUM,1000)
23 | 8 FROM dual
24 | 9 CONNECT BY LEVEL <= 100000;
25 | 10 COMMIT;
26 | 11 END LOOP;
27 | 12 END;
28 | 13 /
29 |
30 | PL/SQL procedure successfully completed.
31 |
32 | SQL>
33 | SQL> EXEC dbms_stats.gather_table_stats(ownname=>NULL, tabname=>'SALES_DL');
34 |
35 | PL/SQL procedure successfully completed.
36 |
37 | SQL>
38 | SQL> alter session enable parallel dml;
39 |
40 | Session altered.
41 |
42 | SQL> alter session set parallel_force_local = FALSE;
43 |
44 | Session altered.
45 |
46 | SQL> alter session set parallel_degree_policy = 'MANUAL';
47 |
48 | Session altered.
49 |
50 | SQL>
51 | SQL> create table sales_copy as select * from sales_dl where 1=-1;
52 |
53 | Table created.
54 |
55 | SQL>
56 | SQL> alter session set tracefile_identifier = 'TSM';
57 |
58 | Session altered.
59 |
60 | SQL> ALTER SESSION SET EVENTS='10053 trace name context forever, level 1';
61 |
62 | Session altered.
63 |
64 | SQL>
65 | SQL> insert /*+ APPEND PARALLEL(t1,8) */ into sales_copy t1
66 | 2 select /*+ PARALLEL(t2,8) */ * from sales_dl t2;
67 |
68 | 1000000 rows created.
69 |
70 | SQL>
71 | SQL> ALTER SESSION SET EVENTS '10053 trace name context off';
72 |
73 | Session altered.
74 |
75 | SQL>
76 | SQL> commit;
77 |
78 | Commit complete.
79 |
80 | SQL> exit
81 |
--------------------------------------------------------------------------------
/direct_path/11g/tsm.sql:
--------------------------------------------------------------------------------
1 | set linesize 200
2 | column PLAN_TABLE_OUTPUT format a130
3 | set pagesize 200
4 | set trims on
5 | set tab off
6 | set echo on
7 | spool tsm
8 |
9 | DROP TABLE sales_dl;
10 | DROP TABLE sales_copy;
11 |
12 | CREATE TABLE sales_dl (sale_id NUMBER(10), customer_id NUMBER(10));
13 |
14 | DECLARE
15 | i NUMBER(10);
16 | BEGIN
17 | FOR i IN 1..10
18 | LOOP
19 | INSERT INTO sales_dl
20 | SELECT ROWNUM, MOD(ROWNUM,1000)
21 | FROM dual
22 | CONNECT BY LEVEL <= 100000;
23 | COMMIT;
24 | END LOOP;
25 | END;
26 | /
27 |
28 | EXEC dbms_stats.gather_table_stats(ownname=>NULL, tabname=>'SALES_DL');
29 |
30 | alter session enable parallel dml;
31 | alter session set parallel_force_local = FALSE;
32 | alter session set parallel_degree_policy = 'MANUAL';
33 |
34 | create table sales_copy as select * from sales_dl where 1=-1;
35 |
36 | alter session set tracefile_identifier = 'TSM';
37 | ALTER SESSION SET EVENTS='10053 trace name context forever, level 1';
38 |
39 | insert /*+ APPEND PARALLEL(t1,8) */ into sales_copy t1
40 | select /*+ PARALLEL(t2,8) */ * from sales_dl t2;
41 |
42 | ALTER SESSION SET EVENTS '10053 trace name context off';
43 |
44 | commit;
45 |
--------------------------------------------------------------------------------
/direct_path/11g/tsm_v_tsmhwmb.sql:
--------------------------------------------------------------------------------
1 | set linesize 260
2 | column PLAN_TABLE_OUTPUT format a200
3 | set pagesize 200
4 | set trims on
5 | set tab off
6 | set echo on
7 | spool tsm_v_tsmhwmb
8 |
9 | alter session enable parallel dml;
10 | alter session set parallel_force_local = FALSE;
11 | alter session set parallel_degree_policy = 'MANUAL';
12 |
13 | DROP TABLE sales_dl;
14 | DROP TABLE sales_dl_copy;
15 |
16 | CREATE TABLE sales_dl (sale_id NUMBER(10), customer_id NUMBER(10));
17 |
18 | DECLARE
19 | i NUMBER(10);
20 | BEGIN
21 | FOR i IN 1..10
22 | LOOP
23 | INSERT INTO sales_dl
24 | SELECT ROWNUM, MOD(ROWNUM,1000)
25 | FROM dual
26 | CONNECT BY LEVEL <= 100000;
27 | COMMIT;
28 | END LOOP;
29 | END;
30 | /
31 |
32 | EXEC dbms_stats.gather_table_stats(ownname=>NULL, tabname=>'SALES_DL');
33 |
34 | alter session enable parallel dml;
35 | alter session set parallel_force_local = FALSE;
36 | alter session set parallel_degree_policy = 'MANUAL';
37 |
38 | CREATE TABLE sales_dl_copy
39 | AS
40 | SELECT * FROM sales_dl WHERE 1=-1;
41 |
42 |
43 | INSERT /*+ APPEND PARALLEL(t1,8) */
44 | INTO sales_dl_copy t1
45 | SELECT /*+ PARALLEL(t2,8) */ *
46 | FROM sales_dl t2;
47 |
48 | select * from table(dbms_xplan.display_cursor);
49 |
50 | commit;
51 |
52 | --
53 | -- Here are the exents for the SALES_DL_COPY table
54 | -- It should be similar in 11g and 12c
55 | --
56 | select segment_type,extent_id,bytes,blocks
57 | from user_extents
58 | where segment_name ='SALES_DL_COPY'
59 | order by extent_id;
60 |
61 | --
62 | -- Perform four more PIDL operations
63 | --
64 | BEGIN
65 | FOR i IN 1..4
66 | LOOP
67 | INSERT /*+ APPEND PARALLEL(t1,8) */ INTO sales_dl_copy t1
68 | SELECT /*+ PARALLEL(t2,8) */ * FROM sales_dl t2 WHERE rownum<10000;
69 | COMMIT;
70 | END LOOP;
71 | END;
72 | /
73 |
74 | --
75 | -- Here are the extents for SALES_DL_COPY after
76 | -- five PIDL operations at DOP 8.
77 | -- Compare the 11g and 12c case.
78 | --
79 | -- Your exact results will depend on:
80 | -- DB block size (8K in my case)
81 | -- Tablespace storage defaults (I am using default USERS tablespace)
82 | --
83 | -- The number of extents in the 12c case will
84 | -- also depend on the number of active RAC instances.
85 | -- In my case it is two.
86 | --
87 | -- Expect fewer extents in the 12c case than the 11g case.
88 | --
89 | --
90 |
91 | select segment_type,extent_id,bytes,blocks
92 | from user_extents
93 | where segment_name ='SALES_DL_COPY'
94 | order by extent_id;
95 |
96 | spool off
97 |
--------------------------------------------------------------------------------
/direct_path/12c/README.md:
--------------------------------------------------------------------------------
1 | Direct Path Load Examples for Oracle Database 12c
2 |
3 | These scripts are intended for Oracle Database release 12.1.0.2 and above.
4 |
5 | The LST files examples were created on a 2-node RAC cluster. The number of nodes in your cluster will affect the final number of extents listed in your tsm_v_tsmhwmb.lst file, so expect your results to be different. If you are using a large RAC cluster, it is possible to get a similar number of extents created in the 12c example as the 11g example at lower DOP, but if you increase the DOP so that there is more that one PX server per database instance, then the the 12c case will "beat" the 11g case.
6 |
7 | The SQL execution plans may not be displayed correctly on 12.1.0.1 and space management plan decorations will not be shown either.
8 |
--------------------------------------------------------------------------------
/direct_path/12c/equi.sql:
--------------------------------------------------------------------------------
1 | set linesize 260
2 | column PLAN_TABLE_OUTPUT format a200
3 | set pagesize 200
4 | set trims on
5 | set tab off
6 | set echo on
7 | spool equi
8 |
9 | DROP TABLE sales_dl;
10 |
11 | CREATE TABLE sales_dl (sale_id NUMBER(10), customer_id NUMBER(10));
12 |
13 | DECLARE
14 | i NUMBER(10);
15 | BEGIN
16 | FOR i IN 1..10
17 | LOOP
18 | INSERT INTO sales_dl
19 | SELECT ROWNUM, MOD(ROWNUM,1000)
20 | FROM dual
21 | CONNECT BY LEVEL <= 100000;
22 | COMMIT;
23 | END LOOP;
24 | END;
25 | /
26 |
27 | EXEC dbms_stats.gather_table_stats(ownname=>NULL, tabname=>'SALES_DL');
28 |
29 | alter table sales_dl parallel 4;
30 |
31 | alter session set parallel_force_local = FALSE;
32 | alter session set parallel_degree_policy = 'MANUAL';
33 | alter session enable parallel dml;
34 | alter session enable parallel ddl;
35 |
36 | drop table sales_p1;
37 | drop table sales_p2;
38 |
39 | --
40 | -- TSM PCTAS
41 | --
42 | create table sales_p1
43 | partition by hash (sale_id)
44 | partitions 64
45 | parallel 4
46 | as select * from sales_dl
47 | /
48 |
49 | select * from table(dbms_xplan.display_cursor);
50 |
51 | create table sales_p2
52 | partition by hash (sale_id)
53 | partitions 64
54 | parallel 4
55 | as select * from sales_dl where 1=-1
56 | /
57 |
58 | --
59 | -- An equi-partition PIDL
60 | --
61 | insert /*+ append */
62 | into sales_p2 t1
63 | select * from sales_p1 t2;
64 |
65 | select * from table(dbms_xplan.display_cursor);
66 |
67 | commit;
68 |
69 |
--------------------------------------------------------------------------------
/direct_path/12c/hwm.sql:
--------------------------------------------------------------------------------
1 | set linesize 260
2 | column PLAN_TABLE_OUTPUT format a200
3 | set pagesize 200
4 | set trims on
5 | set tab off
6 | set echo on
7 | spool hwm
8 |
9 | DROP TABLE sales_dl;
10 |
11 | CREATE TABLE sales_dl (sale_id NUMBER(10), customer_id NUMBER(10));
12 |
13 | DECLARE
14 | i NUMBER(10);
15 | BEGIN
16 | FOR i IN 1..10
17 | LOOP
18 | INSERT INTO sales_dl
19 | SELECT ROWNUM, MOD(ROWNUM,1000)
20 | FROM dual
21 | CONNECT BY LEVEL <= 100000;
22 | COMMIT;
23 | END LOOP;
24 | END;
25 | /
26 |
27 | EXEC dbms_stats.gather_table_stats(ownname=>NULL, tabname=>'SALES_DL');
28 |
29 | alter table sales_dl parallel 4;
30 |
31 | alter session set parallel_force_local = FALSE;
32 | alter session set parallel_degree_policy = 'MANUAL';
33 | alter session enable parallel dml;
34 | alter session enable parallel ddl;
35 |
36 | drop table sales_p1;
37 | drop table sales_p2;
38 |
39 | --
40 | -- TSM PCTAS
41 | --
42 | create table sales_p1
43 | partition by range (sale_id)
44 | subpartition by hash (customer_id)
45 | subpartitions 32
46 | (
47 | partition p0 values less than (50000),
48 | partition p1 values less than (200000))
49 | parallel 4
50 | as select * from sales_dl
51 | /
52 |
53 | select * from table(dbms_xplan.display_cursor);
54 |
55 | create table sales_p2
56 | partition by range (sale_id)
57 | subpartition by hash (customer_id)
58 | subpartitions 64
59 | (
60 | partition p0 values less than (50000),
61 | partition p1 values less than (200000))
62 | parallel 4
63 | as select * from sales_dl where 1=-1
64 | /
65 |
66 |
67 | --
68 | -- An HWM PIDL
69 | --
70 | insert /*+ append */
71 | into sales_p2 t1
72 | select * from sales_p1 t2;
73 |
74 | select * from table(dbms_xplan.display_cursor);
75 |
76 | commit;
77 |
78 |
--------------------------------------------------------------------------------
/direct_path/12c/hwmb.sql:
--------------------------------------------------------------------------------
1 | set linesize 260
2 | column PLAN_TABLE_OUTPUT format a200
3 | set pagesize 200
4 | set trims on
5 | set tab off
6 | set echo on
7 | spool hwmb
8 |
9 | DROP TABLE sales_dl;
10 |
11 | CREATE TABLE sales_dl (sale_id NUMBER(10), customer_id NUMBER(10));
12 |
13 | DECLARE
14 | i NUMBER(10);
15 | BEGIN
16 | FOR i IN 1..10
17 | LOOP
18 | INSERT INTO sales_dl
19 | SELECT ROWNUM, MOD(ROWNUM,1000)
20 | FROM dual
21 | CONNECT BY LEVEL <= 100000;
22 | COMMIT;
23 | END LOOP;
24 | END;
25 | /
26 |
27 | EXEC dbms_stats.gather_table_stats(ownname=>NULL, tabname=>'SALES_DL');
28 |
29 | alter table sales_dl parallel 4;
30 |
31 | alter session set parallel_force_local = FALSE;
32 | alter session set parallel_degree_policy = 'MANUAL';
33 | alter session enable parallel dml;
34 | alter session enable parallel ddl;
35 |
36 | drop table sales_p1;
37 | drop table sales_p2;
38 |
39 | --
40 | -- TSM/HWMB PCTAS
41 | --
42 | create table sales_p1 partition by hash (sale_id) partitions 4
43 | parallel 4
44 | as select * from sales_dl
45 | /
46 |
47 | select * from table(dbms_xplan.display_cursor);
48 |
49 | create table sales_p2 partition by hash (sale_id) partitions 4
50 | parallel 4
51 | as select * from sales_dl where 1=-1
52 | /
53 |
54 | --
55 | -- An HWMB PIDL
56 | --
57 | insert /*+ append */
58 | into sales_p2 t1
59 | select * from sales_p1 t2;
60 |
61 | select * from table(dbms_xplan.display_cursor);
62 |
63 | commit;
64 |
65 |
--------------------------------------------------------------------------------
/direct_path/12c/merge.sql:
--------------------------------------------------------------------------------
1 | set linesize 260
2 | column PLAN_TABLE_OUTPUT format a200
3 | set pagesize 200
4 | set trims on
5 | set tab off
6 | set echo on
7 | spool merge
8 |
9 | alter session enable parallel dml;
10 | alter session set parallel_force_local = FALSE;
11 | alter session set parallel_degree_policy = 'MANUAL';
12 |
13 | DROP TABLE sales_dl;
14 | DROP TABLE sales_dl_copy;
15 |
16 | CREATE TABLE sales_dl (sale_id NUMBER(10), customer_id NUMBER(10), counter NUMBER(10));
17 |
18 | BEGIN
19 | INSERT INTO sales_dl
20 | SELECT ROWNUM, MOD(ROWNUM,1000),ROWNUM*2
21 | FROM dual
22 | CONNECT BY LEVEL <= 1000000;
23 | COMMIT;
24 | END;
25 | /
26 |
27 | EXEC dbms_stats.gather_table_stats(ownname=>NULL, tabname=>'SALES_DL');
28 |
29 | alter session enable parallel dml;
30 | alter session set parallel_force_local = FALSE;
31 | alter session set parallel_degree_policy = 'MANUAL';
32 |
33 | CREATE TABLE sales_dl_copy
34 | AS
35 | SELECT * FROM sales_dl;
36 |
37 | EXEC dbms_stats.gather_table_stats(ownname=>NULL, tabname=>'SALES_DL_COPY');
38 |
39 | alter table sales_dl parallel 4;
40 | alter table sales_dl_copy parallel 4;
41 |
42 | select segment_type,extent_id,bytes,blocks
43 | from user_extents
44 | where segment_name ='SALES_DL_COPY'
45 | order by extent_id;
46 |
47 | MERGE INTO sales_dl_copy sdlc USING (
48 | SELECT sale_id, customer_id
49 | FROM sales_dl WHERE sale_id < 10000
50 | ) sdl
51 | ON (sdlc.sale_id = sdl.sale_id - 5000)
52 | WHEN MATCHED THEN
53 | UPDATE SET sdlc.counter = - sdlc.counter
54 | WHEN NOT MATCHED THEN
55 | INSERT /*+ APPEND */ (sale_id,customer_id)
56 | VALUES (sdl.sale_id-5000,sdl.customer_id)
57 | ;
58 |
59 | select * from table(dbms_xplan.display_cursor);
60 |
61 | commit;
62 |
63 | --
64 | -- Here are the extents for SALES_DL_COPY after
65 | -- the MERGE operation
66 | -- Compare the 11g and 12c case.
67 | --
68 | -- Your exact results will depend on:
69 | -- DB block size (8K in my case)
70 | -- Tablespace storage defaults (I am using default USERS tablespace)
71 | --
72 | -- The number of extents in the 12c case will
73 | -- also depend on the number of active RAC instances.
74 | -- In my case it is two.
75 | --
76 | -- Expect fewer extents in the 12c case than the 11g case.
77 | --
78 |
79 | select segment_type,extent_id,bytes,blocks
80 | from user_extents
81 | where segment_name ='SALES_DL_COPY'
82 | order by extent_id;
83 |
84 |
--------------------------------------------------------------------------------
/direct_path/12c/tsm_v_tsmhwmb.sql:
--------------------------------------------------------------------------------
1 | set linesize 260
2 | column PLAN_TABLE_OUTPUT format a200
3 | set pagesize 200
4 | set trims on
5 | set tab off
6 | set echo on
7 | spool tsm_v_tsmhwmb
8 |
9 | alter session enable parallel dml;
10 | alter session set parallel_force_local = FALSE;
11 | alter session set parallel_degree_policy = 'MANUAL';
12 |
13 | DROP TABLE sales_dl;
14 | DROP TABLE sales_dl_copy;
15 |
16 | CREATE TABLE sales_dl (sale_id NUMBER(10), customer_id NUMBER(10));
17 |
18 | DECLARE
19 | i NUMBER(10);
20 | BEGIN
21 | FOR i IN 1..10
22 | LOOP
23 | INSERT INTO sales_dl
24 | SELECT ROWNUM, MOD(ROWNUM,1000)
25 | FROM dual
26 | CONNECT BY LEVEL <= 100000;
27 | COMMIT;
28 | END LOOP;
29 | END;
30 | /
31 |
32 | EXEC dbms_stats.gather_table_stats(ownname=>NULL, tabname=>'SALES_DL');
33 |
34 | alter session enable parallel dml;
35 | alter session set parallel_force_local = FALSE;
36 | alter session set parallel_degree_policy = 'MANUAL';
37 |
38 | CREATE TABLE sales_dl_copy
39 | AS
40 | SELECT * FROM sales_dl WHERE 1=-1;
41 |
42 |
43 | INSERT /*+ APPEND PARALLEL(t1,8) */
44 | INTO sales_dl_copy t1
45 | SELECT /*+ PARALLEL(t2,8) */ *
46 | FROM sales_dl t2;
47 |
48 | select * from table(dbms_xplan.display_cursor);
49 |
50 | commit;
51 |
52 | --
53 | -- Here are the exents for the SALES_DL_COPY table
54 | -- It should be similar in 11g and 12c
55 | --
56 | select segment_type,extent_id,bytes,blocks
57 | from user_extents
58 | where segment_name ='SALES_DL_COPY'
59 | order by extent_id;
60 |
61 | --
62 | -- Perform four more PIDL operations
63 | --
64 | BEGIN
65 | FOR i IN 1..4
66 | LOOP
67 | INSERT /*+ APPEND PARALLEL(t1,8) */ INTO sales_dl_copy t1
68 | SELECT /*+ PARALLEL(t2,8) */ * FROM sales_dl t2 WHERE rownum<10000;
69 | COMMIT;
70 | END LOOP;
71 | END;
72 | /
73 |
74 | --
75 | -- Here are the extents for SALES_DL_COPY after
76 | -- five PIDL operations at DOP 8.
77 | -- Compare the 11g and 12c case.
78 | --
79 | -- Your exact results will depend on:
80 | -- DB block size (8K in my case)
81 | -- Tablespace storage defaults (I am using default USERS tablespace)
82 | --
83 | -- The number of extents in the 12c case will
84 | -- also depend on the number of active RAC instances.
85 | -- In my case it is two.
86 | --
87 | -- Expect fewer extents in the 12c case than the 11g case.
88 | --
89 |
90 | select segment_type,extent_id,bytes,blocks
91 | from user_extents
92 | where segment_name ='SALES_DL_COPY'
93 | order by extent_id;
94 |
95 | spool off
96 |
97 |
--------------------------------------------------------------------------------
/direct_path/README.md:
--------------------------------------------------------------------------------
1 | Direct Path Load Examples
2 |
3 | SQL scripts to compare direct path load in Oracle Database 11g Release 2 with Oracle Database 12c (12.1.0.2 and above). They are primarily intended to demonstrate the new Hybrid TSM/HWMB load strategy in 12c - comparing this to the TSM strategy available in 11g. See the 11g and 12c "tsm_v_tsmhwmb.sql" scripts and their associated spool file "tsm_v_tsmhwmb.lst" to see the difference in behavior between these two database versions. In particular, compare the reduced number of table extents created in the 12c example than 11g by comparing the "tsm_v_tsmhwmb.lst" files.
4 |
5 | The 12c directory contains a comprehensive set of examples demonstrating how the SQL execution plan is decorated with the chosen load strategy.
6 |
7 | The 11g directory contains a couple of examples for comparative purposes.
8 |
9 | Each SQL script has an associated LST file so you can see an example of the output without having to run them.
10 |
11 | The scripts need to run in a TEST Oracle database account with priviledges to create tables. Ideally, use
12 | the default USERS tablespace but note that the script outputs are sensitive to your tablespace and database storage characteristics, so your output may differ from the examples given.
13 |
14 | DISCLAIMER:
15 |
-- These scripts are provided for educational purposes only.
16 |
-- They are NOT supported by Oracle World Wide Technical Support.
17 |
-- The scripts have been tested and they appear to work as intended.
18 |
-- You should always run scripts on a test instance.
19 |
-- Note that they will DROP tables when they are executed.
20 |
21 |
22 |
--------------------------------------------------------------------------------
/new_synopses/01_mixed.lst:
--------------------------------------------------------------------------------
1 | SQL> @01_mixed
2 | SQL> set timing on
3 | SQL> set linesize 1000
4 | SQL> set pagesize 100
5 | SQL> set trims on
6 | SQL> column partition_name format a40
7 | SQL> column ndv_alg format a30
8 | SQL> column inc_stale format a30
9 | SQL>
10 | SQL> drop table t1 purge;
11 |
12 | Table dropped.
13 |
14 | Elapsed: 00:00:00.15
15 | SQL>
16 | SQL> create table t1 (id number(10),num1 number(10), num2 number(10),txt varchar2(20))
17 | 2 partition by range (num1)
18 | 3 interval (1) (
19 | 4 partition p1 values less than (1)
20 | 5 ,partition p2 values less than (2));
21 |
22 | Table created.
23 |
24 | Elapsed: 00:00:00.05
25 | SQL>
26 | SQL> insert /*+ APPEND */ into t1
27 | 2 select rownum, mod(rownum,5), mod(rownum,1000),'X'||mod(rownum,10000)
28 | 3 from (select 1 from dual connect by level <=1000);
29 |
30 | 1000 rows created.
31 |
32 | Elapsed: 00:00:00.23
33 | SQL>
34 | SQL> commit;
35 |
36 | Commit complete.
37 |
38 | Elapsed: 00:00:00.03
39 | SQL>
40 | SQL> --
41 | SQL> -- Enable incremental statistics
42 | SQL> --
43 | SQL> exec dbms_stats.set_table_prefs(null,'t1','incremental','true')
44 |
45 | PL/SQL procedure successfully completed.
46 |
47 | Elapsed: 00:00:00.02
48 | SQL> --
49 | SQL> --No need to set because this is the default
50 | SQL> --
51 | SQL> --exec dbms_stats.set_table_prefs(null,'t1', 'incremental_staleness', 'allow_mixed_format')
52 | SQL>
53 | SQL> --
54 | SQL> -- Create old-style synopses
55 | SQL> --
56 | SQL> exec dbms_stats.set_table_prefs(null,'t1', 'approximate_ndv_algorithm', 'adaptive sampling')
57 |
58 | PL/SQL procedure successfully completed.
59 |
60 | Elapsed: 00:00:00.00
61 | SQL> exec dbms_stats.gather_table_stats(null,'t1')
62 |
63 | PL/SQL procedure successfully completed.
64 |
65 | Elapsed: 00:00:00.54
66 | SQL>
67 | SQL> select dbms_stats.get_prefs('approximate_ndv_algorithm',user,'t1') ndv_alg from dual;
68 |
69 | NDV_ALG
70 | ------------------------------
71 | ADAPTIVE SAMPLING
72 |
73 | Elapsed: 00:00:00.01
74 | SQL> select dbms_stats.get_prefs('incremental_staleness',user,'t1') inc_stale from dual;
75 |
76 | INC_STALE
77 | ------------------------------
78 | ALLOW_MIXED_FORMAT
79 |
80 | Elapsed: 00:00:00.00
81 | SQL>
82 | SQL> @t1check
83 | SQL> select subobject_name partition_name,
84 | 2 decode(h.spare1,NULL,'ADAPTIVE SAMPLING','HYPERLOGLOG') synopsis_type
85 | 3 from dba_objects o,
86 | 4 sys.wri$_optstat_synopsis_head$ h
87 | 5 where o.object_type = 'TABLE PARTITION'
88 | 6 and o.object_name = 'T1'
89 | 7 and o.owner = USER
90 | 8 and h.group# = o.object_id*2
91 | 9 and intcol# = 1
92 | 10 order by partition_name;
93 |
94 | PARTITION_NAME SYNOPSIS_TYPE
95 | ---------------------------------------- -----------------
96 | P1 ADAPTIVE SAMPLING
97 | P2 ADAPTIVE SAMPLING
98 | SYS_P2875 ADAPTIVE SAMPLING
99 | SYS_P2876 ADAPTIVE SAMPLING
100 | SYS_P2877 ADAPTIVE SAMPLING
101 |
102 | Elapsed: 00:00:00.03
103 | SQL>
104 | SQL>
105 | SQL>
106 | SQL> --
107 | SQL> -- Make one existing partition stale and add a new partition
108 | SQL> --
109 | SQL> insert /*+ APPEND */ into t1
110 | 2 select rownum, mod(rownum,2)+4, mod(rownum,1000),'X'||mod(rownum,10000)
111 | 3 from (select 1 from dual connect by level <=1000);
112 |
113 | 1000 rows created.
114 |
115 | Elapsed: 00:00:00.01
116 | SQL>
117 | SQL> exec dbms_stats.set_table_prefs(null,'t1', 'approximate_ndv_algorithm', 'hyperloglog')
118 |
119 | PL/SQL procedure successfully completed.
120 |
121 | Elapsed: 00:00:00.00
122 | SQL> select dbms_stats.get_prefs('approximate_ndv_algorithm',user,'t1') ndv_alg from dual;
123 |
124 | NDV_ALG
125 | ------------------------------
126 | HYPERLOGLOG
127 |
128 | Elapsed: 00:00:00.00
129 | SQL> select dbms_stats.get_prefs('incremental_staleness',user,'t1') inc_stale from dual;
130 |
131 | INC_STALE
132 | ------------------------------
133 | ALLOW_MIXED_FORMAT
134 |
135 | Elapsed: 00:00:00.01
136 | SQL> exec dbms_stats.gather_table_stats(null,'t1')
137 |
138 | PL/SQL procedure successfully completed.
139 |
140 | Elapsed: 00:00:00.25
141 | SQL>
142 | SQL> @t1check
143 | SQL> select subobject_name partition_name,
144 | 2 decode(h.spare1,NULL,'ADAPTIVE SAMPLING','HYPERLOGLOG') synopsis_type
145 | 3 from dba_objects o,
146 | 4 sys.wri$_optstat_synopsis_head$ h
147 | 5 where o.object_type = 'TABLE PARTITION'
148 | 6 and o.object_name = 'T1'
149 | 7 and o.owner = USER
150 | 8 and h.group# = o.object_id*2
151 | 9 and intcol# = 1
152 | 10 order by partition_name;
153 |
154 | PARTITION_NAME SYNOPSIS_TYPE
155 | ---------------------------------------- -----------------
156 | P1 ADAPTIVE SAMPLING
157 | P2 ADAPTIVE SAMPLING
158 | SYS_P2875 ADAPTIVE SAMPLING
159 | SYS_P2876 ADAPTIVE SAMPLING
160 | SYS_P2877 HYPERLOGLOG
161 | SYS_P2913 HYPERLOGLOG
162 |
163 | 6 rows selected.
164 |
165 | Elapsed: 00:00:00.00
166 | SQL>
167 | SQL>
168 | SQL> exit
169 |
--------------------------------------------------------------------------------
/new_synopses/01_mixed.sql:
--------------------------------------------------------------------------------
1 | --
2 | -- Create some test tables
3 | -- NOTE!
4 | -- Tables called T1 and EXCH will be dropped
5 | --
6 | set echo on
7 | set timing on
8 | set linesize 1000
9 | set pagesize 100
10 | set trims on
11 | column partition_name format a40
12 | column ndv_alg format a30
13 | column inc_stale format a30
14 |
15 | drop table t1 purge;
16 |
17 | create table t1 (id number(10),num1 number(10), num2 number(10),txt varchar2(20))
18 | partition by range (num1)
19 | interval (1) (
20 | partition p1 values less than (1)
21 | ,partition p2 values less than (2));
22 |
23 | insert /*+ APPEND */ into t1
24 | select rownum, mod(rownum,5), mod(rownum,1000),'X'||mod(rownum,10000)
25 | from (select 1 from dual connect by level <=1000);
26 |
27 | commit;
28 |
29 | --
30 | -- Enable incremental statistics
31 | --
32 | exec dbms_stats.set_table_prefs(null,'t1','incremental','true')
33 | --
34 | --No need to set because this is the default
35 | --
36 | --exec dbms_stats.set_table_prefs(null,'t1', 'incremental_staleness', 'allow_mixed_format')
37 |
38 | --
39 | -- Create old-style synopses
40 | --
41 | exec dbms_stats.set_table_prefs(null,'t1', 'approximate_ndv_algorithm', 'adaptive sampling')
42 | exec dbms_stats.gather_table_stats(null,'t1')
43 |
44 | select dbms_stats.get_prefs('approximate_ndv_algorithm',user,'t1') ndv_alg from dual;
45 | select dbms_stats.get_prefs('incremental_staleness',user,'t1') inc_stale from dual;
46 |
47 | @t1check
48 |
49 | --
50 | -- Make one existing partition stale and add a new partition
51 | --
52 | insert /*+ APPEND */ into t1
53 | select rownum, mod(rownum,2)+4, mod(rownum,1000),'X'||mod(rownum,10000)
54 | from (select 1 from dual connect by level <=1000);
55 |
56 | exec dbms_stats.set_table_prefs(null,'t1', 'approximate_ndv_algorithm', 'hyperloglog')
57 | select dbms_stats.get_prefs('approximate_ndv_algorithm',user,'t1') ndv_alg from dual;
58 | select dbms_stats.get_prefs('incremental_staleness',user,'t1') inc_stale from dual;
59 | exec dbms_stats.gather_table_stats(null,'t1')
60 |
61 | @t1check
62 |
--------------------------------------------------------------------------------
/new_synopses/02_default.sql:
--------------------------------------------------------------------------------
1 | --
2 | -- Create some test tables
3 | -- NOTE!
4 | -- Tables called T1 and EXCH will be dropped
5 | --
6 | set echo on
7 | set timing on
8 | set linesize 1000
9 | set pagesize 100
10 | set trims on
11 | column partition_name format a40
12 | column ndv_alg format a30
13 | column inc_stale format a30
14 |
15 | drop table t1 purge;
16 |
17 | create table t1 (id number(10),num1 number(10), num2 number(10),txt varchar2(20))
18 | partition by range (num1)
19 | interval (1) (
20 | partition p1 values less than (1)
21 | ,partition p2 values less than (2));
22 |
23 | insert /*+ APPEND */ into t1
24 | select rownum, mod(rownum,5), mod(rownum,1000),'X'||mod(rownum,10000)
25 | from (select 1 from dual connect by level <=1000);
26 |
27 | commit;
28 |
29 | --
30 | -- Enable incremental statistics
31 | --
32 | exec dbms_stats.set_table_prefs(null,'t1','incremental','true')
33 | --
34 | --No need to set because this is the default
35 | --
36 | --exec dbms_stats.set_table_prefs(null,'t1', 'incremental_staleness', 'allow_mixed_format')
37 |
38 | --
39 | -- Create old-style synopses
40 | --
41 | exec dbms_stats.set_table_prefs(null,'t1', 'approximate_ndv_algorithm', 'adaptive sampling')
42 | exec dbms_stats.gather_table_stats(null,'t1')
43 |
44 | select dbms_stats.get_prefs('approximate_ndv_algorithm',user,'t1') ndv_alg from dual;
45 | select dbms_stats.get_prefs('incremental_staleness',user,'t1') inc_stale from dual;
46 |
47 | @t1check
48 |
49 | --
50 | -- Add a new partition and make one partition stale
51 | --
52 | insert /*+ APPEND */ into t1
53 | select rownum, mod(rownum,2)+4, mod(rownum,1000),'X'||mod(rownum,10000)
54 | from (select 1 from dual connect by level <=1000);
55 |
56 | @t1check
57 |
58 | --
59 | -- Setting back to default
60 | --
61 | exec dbms_stats.set_table_prefs(null,'t1', 'approximate_ndv_algorithm', 'repeat or hyperloglog')
62 | select dbms_stats.get_prefs('approximate_ndv_algorithm',user,'t1') ndv_alg from dual;
63 | select dbms_stats.get_prefs('incremental_staleness',user,'t1') inc_stale from dual;
64 |
65 | exec dbms_stats.gather_table_stats(null,'t1')
66 |
67 | @t1check
68 |
--------------------------------------------------------------------------------
/new_synopses/03_hll.sql:
--------------------------------------------------------------------------------
1 | --
2 | -- Create some test tables
3 | -- NOTE!
4 | -- Tables called T1 and EXCH will be dropped
5 | --
6 | set echo on
7 | set timing on
8 | set linesize 1000
9 | set pagesize 100
10 | set trims on
11 | column partition_name format a40
12 | column ndv_alg format a30
13 | column inc_stale format a30
14 |
15 | drop table t1 purge;
16 |
17 | create table t1 (id number(10),num1 number(10), num2 number(10),txt varchar2(20))
18 | partition by range (num1)
19 | interval (1) (
20 | partition p1 values less than (1)
21 | ,partition p2 values less than (2));
22 |
23 | insert /*+ APPEND */ into t1
24 | select rownum, mod(rownum,5), mod(rownum,1000),'X'||mod(rownum,10000)
25 | from (select 1 from dual connect by level <=1000);
26 |
27 | commit;
28 |
29 | --
30 | -- Enable incremental statistics
31 | --
32 | exec dbms_stats.set_table_prefs(null,'t1','incremental','true')
33 | --
34 | -- Disalow mixed format
35 | --
36 | exec dbms_stats.set_table_prefs(null,'t1', 'incremental_staleness', 'NULL')
37 |
38 | --
39 | -- Create old-style synopses
40 | --
41 | exec dbms_stats.set_table_prefs(null,'t1', 'approximate_ndv_algorithm', 'adaptive sampling')
42 | exec dbms_stats.gather_table_stats(null,'t1')
43 |
44 | select dbms_stats.get_prefs('approximate_ndv_algorithm',user,'t1') ndv_alg from dual;
45 | select dbms_stats.get_prefs('incremental_staleness',user,'t1') inc_stale from dual;
46 |
47 | @t1check
48 |
49 | --
50 | -- Add a new partition and don't make any others stale
51 | --
52 | insert /*+ APPEND */ into t1
53 | select rownum, 5, mod(rownum,1000),'X'||mod(rownum,10000)
54 | from (select 1 from dual connect by level <=1000);
55 |
56 | @t1check
57 |
58 | exec dbms_stats.set_table_prefs(null,'t1', 'approximate_ndv_algorithm', 'hyperloglog')
59 | select dbms_stats.get_prefs('approximate_ndv_algorithm',user,'t1') ndv_alg from dual;
60 | select dbms_stats.get_prefs('incremental_staleness',user,'t1') inc_stale from dual;
61 | exec dbms_stats.gather_table_stats(null,'t1')
62 |
63 | @t1check
64 |
--------------------------------------------------------------------------------
/new_synopses/04_defaultmx.sql:
--------------------------------------------------------------------------------
1 | set echo on
2 | set timing on
3 | set linesize 1000
4 | set pagesize 100
5 | set trims on
6 | column partition_name format a40
7 | column ndv_alg format a30
8 | column inc_stale format a30
9 |
10 | drop table t1 purge;
11 | drop table exch purge;
12 |
13 | create table t1 (id number(10),num1 number(10), num2 number(10),txt varchar2(20))
14 | partition by range (num1)
15 | interval (1) (
16 | partition p1 values less than (1)
17 | ,partition p2 values less than (2));
18 |
19 | insert /*+ APPEND */ into t1
20 | select rownum, mod(rownum,5), mod(rownum,1000),'X'||mod(rownum,10000)
21 | from (select 1 from dual connect by level <=1000);
22 |
23 | commit;
24 |
25 | create table exch as select * from t1 where 1=-1;
26 |
27 | insert /*+ APPEND */ into exch
28 | select rownum,0,mod(rownum,10000),'X'||mod(rownum,100000)
29 | from (select 1 from dual connect by level <=100);
30 |
31 | commit;
32 |
33 | --
34 | -- Enable incremental statistics
35 | --
36 | exec dbms_stats.set_table_prefs(null,'t1','incremental','true')
37 | exec dbms_stats.set_table_prefs(null,'exch','incremental','true')
38 |
39 | --
40 | -- Prepare to create a synopsis on the EXCH table
41 | --
42 | exec DBMS_STATS.SET_TABLE_PREFS (null,'exch','INCREMENTAL_LEVEL','table');
43 | --
44 | -- The exchange table has an old-style synopsis
45 | --
46 | exec dbms_stats.set_table_prefs(null,'exch', 'approximate_ndv_algorithm', 'adaptive sampling')
47 | exec dbms_stats.gather_table_stats(null,'exch');
48 |
49 | --
50 | -- The partitioned table has new-style synopses
51 | --
52 | exec dbms_stats.set_table_prefs(null,'t1', 'approximate_ndv_algorithm', 'hyperloglog')
53 | exec dbms_stats.gather_table_stats(null,'t1')
54 |
55 | @t1check
56 |
57 | pause
58 |
59 | alter table t1 exchange partition p1 with table exch;
60 |
61 | @t1check
62 |
63 | --
64 | -- Add a partition
65 | --
66 | insert /*+ APPEND */ into t1
67 | select rownum, 5, mod(rownum,1000),'X'||mod(rownum,10000)
68 | from (select 1 from dual connect by level <=1000);
69 |
70 | exec dbms_stats.gather_table_stats(null,'t1')
71 |
72 | @t1check
73 |
74 | --
75 | -- Make P1 stale
76 | --
77 | insert /*+ APPEND */ into t1
78 | select rownum, 0, mod(rownum,1000),'X'||mod(rownum,10000)
79 | from (select 1 from dual connect by level <=1000);
80 |
81 | exec dbms_stats.gather_table_stats(null,'t1')
82 |
83 | @t1check
84 |
85 |
86 |
87 |
88 |
--------------------------------------------------------------------------------
/new_synopses/05_defaultmx.sql:
--------------------------------------------------------------------------------
1 | set echo on
2 | set timing on
3 | set linesize 1000
4 | set pagesize 100
5 | set trims on
6 | column partition_name format a40
7 | column ndv_alg format a30
8 | column inc_stale format a30
9 |
10 | drop table t1 purge;
11 | drop table exch purge;
12 |
13 | create table t1 (id number(10),num1 number(10), num2 number(10),txt varchar2(20))
14 | partition by range (num1)
15 | interval (1) (
16 | partition p1 values less than (1)
17 | ,partition p2 values less than (2));
18 |
19 | insert /*+ APPEND */ into t1
20 | select rownum, mod(rownum,5), mod(rownum,1000),'X'||mod(rownum,10000)
21 | from (select 1 from dual connect by level <=1000);
22 |
23 | commit;
24 |
25 | create table exch as select * from t1 where 1=-1;
26 |
27 | insert /*+ APPEND */ into exch
28 | select rownum,0,mod(rownum,10000),'X'||mod(rownum,100000)
29 | from (select 1 from dual connect by level <=100);
30 |
31 | commit;
32 |
33 | --
34 | -- Enable incremental statistics
35 | --
36 | exec dbms_stats.set_table_prefs(null,'t1','incremental','true')
37 | exec dbms_stats.set_table_prefs(null,'exch','incremental','true')
38 |
39 | --
40 | -- Prepare to create a synopsis on the EXCH table
41 | --
42 | exec DBMS_STATS.SET_TABLE_PREFS (null,'exch','INCREMENTAL_LEVEL','table');
43 | --
44 | -- The exchange table has an new-style synopsis
45 | --
46 | exec dbms_stats.set_table_prefs(null,'exch', 'approximate_ndv_algorithm', 'hyperloglog')
47 | exec dbms_stats.gather_table_stats(null,'exch');
48 |
49 | --
50 | -- The partitioned table has old-style synopses
51 | --
52 | exec dbms_stats.set_table_prefs(null,'t1', 'approximate_ndv_algorithm', 'adaptive sampling')
53 | exec dbms_stats.gather_table_stats(null,'t1')
54 |
55 | @t1check
56 |
57 | pause
58 |
59 | alter table t1 exchange partition p1 with table exch;
60 |
61 | @t1check
62 |
63 | pause
64 |
65 | --
66 | -- Add a partition
67 | --
68 | insert /*+ APPEND */ into t1
69 | select rownum, 5, mod(rownum,1000),'X'||mod(rownum,10000)
70 | from (select 1 from dual connect by level <=1000);
71 |
72 | exec dbms_stats.gather_table_stats(null,'t1')
73 |
74 | @t1check
75 |
76 | pause
77 |
78 | --
79 | -- Make P1 stale
80 | --
81 | insert /*+ APPEND */ into t1
82 | select rownum, 0, mod(rownum,1000),'X'||mod(rownum,10000)
83 | from (select 1 from dual connect by level <=1000);
84 |
85 | exec dbms_stats.gather_table_stats(null,'t1')
86 |
87 | @t1check
88 |
--------------------------------------------------------------------------------
/new_synopses/06_defaultmx.sql:
--------------------------------------------------------------------------------
1 | set echo on
2 | set timing on
3 | set linesize 1000
4 | set pagesize 100
5 | set trims on
6 | column partition_name format a40
7 | column ndv_alg format a30
8 | column inc_stale format a30
9 |
10 | drop table t1 purge;
11 | drop table exch purge;
12 |
13 | create table t1 (id number(10),num1 number(10), num2 number(10),txt varchar2(20))
14 | partition by range (num1)
15 | interval (1) (
16 | partition p1 values less than (1)
17 | ,partition p2 values less than (2));
18 |
19 | insert /*+ APPEND */ into t1
20 | select rownum, mod(rownum,2), mod(rownum,1000),'X'||mod(rownum,10000)
21 | from (select 1 from dual connect by level <=1000);
22 |
23 | commit;
24 |
25 | create table exch as select * from t1 where 1=-1;
26 |
27 | insert /*+ APPEND */ into exch
28 | select rownum,0,mod(rownum,10000),'X'||mod(rownum,100000)
29 | from (select 1 from dual connect by level <=100);
30 |
31 | commit;
32 |
33 | --
34 | -- Enable incremental statistics
35 | --
36 | exec dbms_stats.set_table_prefs(null,'t1','incremental','true')
37 | exec dbms_stats.set_table_prefs(null,'exch','incremental','true')
38 |
39 | --
40 | -- Prepare to create a synopsis on the EXCH table
41 | --
42 | exec DBMS_STATS.SET_TABLE_PREFS (null,'exch','INCREMENTAL_LEVEL','table');
43 | --
44 | -- The exchange table has an new-style synopsis
45 | --
46 | exec dbms_stats.set_table_prefs(null,'exch', 'approximate_ndv_algorithm', 'hyperloglog')
47 | exec dbms_stats.gather_table_stats(null,'exch');
48 |
49 | --
50 | -- The partitioned table has old-style synopses
51 | --
52 | exec dbms_stats.set_table_prefs(null,'t1', 'approximate_ndv_algorithm', 'adaptive sampling')
53 | exec dbms_stats.gather_table_stats(null,'t1')
54 |
55 | @t1check
56 |
57 | pause
58 |
59 | alter table t1 exchange partition p1 with table exch;
60 |
61 | @t1check
62 |
63 | pause
64 |
65 | --
66 | -- Add a partition
67 | --
68 | insert /*+ APPEND */ into t1
69 | select rownum, 5, mod(rownum,1000),'X'||mod(rownum,10000)
70 | from (select 1 from dual connect by level <=1000);
71 |
72 | exec dbms_stats.gather_table_stats(null,'t1')
73 |
74 | @t1check
75 |
76 | pause
77 |
78 | --
79 | -- Make P1 stale
80 | --
81 | insert /*+ APPEND */ into t1
82 | select rownum, 0, mod(rownum,1000),'X'||mod(rownum,10000)
83 | from (select 1 from dual connect by level <=1000);
84 |
85 | exec dbms_stats.gather_table_stats(null,'t1')
86 |
87 | @t1check
88 |
--------------------------------------------------------------------------------
/new_synopses/07_defaultmx.sql:
--------------------------------------------------------------------------------
1 | set echo on
2 | set timing on
3 | set linesize 1000
4 | set pagesize 100
5 | set trims on
6 | column partition_name format a40
7 | column ndv_alg format a30
8 | column inc_stale format a30
9 |
10 | drop table t1 purge;
11 | drop table exch purge;
12 |
13 | create table t1 (id number(10),num1 number(10), num2 number(10),txt varchar2(20))
14 | partition by range (num1)
15 | interval (1) (
16 | partition p1 values less than (1)
17 | ,partition p2 values less than (2));
18 |
19 | insert /*+ APPEND */ into t1
20 | select rownum, mod(rownum,2), mod(rownum,1000),'X'||mod(rownum,10000)
21 | from (select 1 from dual connect by level <=1000);
22 |
23 | commit;
24 |
25 | create table exch as select * from t1 where 1=-1;
26 |
27 | insert /*+ APPEND */ into exch
28 | select rownum,0,mod(rownum,10000),'X'||mod(rownum,100000)
29 | from (select 1 from dual connect by level <=100);
30 |
31 | commit;
32 |
33 | --
34 | -- Enable incremental statistics
35 | --
36 | exec dbms_stats.set_table_prefs(null,'t1','incremental','true')
37 | exec dbms_stats.set_table_prefs(null,'exch','incremental','true')
38 |
39 | --
40 | -- Prepare to create a synopsis on the EXCH table
41 | --
42 | exec DBMS_STATS.SET_TABLE_PREFS (null,'exch','INCREMENTAL_LEVEL','table');
43 | --
44 | -- The exchange table has an old-style synopsis
45 | --
46 | exec dbms_stats.set_table_prefs(null,'exch', 'approximate_ndv_algorithm', 'adaptive sampling')
47 | exec dbms_stats.gather_table_stats(null,'exch');
48 |
49 | --
50 | -- The partitioned table has old-style synopses
51 | --
52 | exec dbms_stats.set_table_prefs(null,'t1', 'approximate_ndv_algorithm', 'hyperloglog')
53 | exec dbms_stats.gather_table_stats(null,'t1')
54 |
55 | @t1check
56 |
57 | pause
58 |
59 | alter table t1 exchange partition p1 with table exch;
60 |
61 | @t1check
62 |
63 | pause
64 |
65 | --
66 | -- Add a partition
67 | --
68 | insert /*+ APPEND */ into t1
69 | select rownum, 2, mod(rownum,1000),'X'||mod(rownum,10000)
70 | from (select 1 from dual connect by level <=1000);
71 |
72 | exec dbms_stats.gather_table_stats(null,'t1')
73 |
74 | @t1check
75 |
76 | pause
77 |
78 | --
79 | -- Make P1 stale
80 | --
81 | insert /*+ APPEND */ into t1
82 | select rownum, 0, mod(rownum,1000),'X'||mod(rownum,10000)
83 | from (select 1 from dual connect by level <=1000);
84 |
85 | exec dbms_stats.gather_table_stats(null,'t1')
86 |
87 | @t1check
88 |
--------------------------------------------------------------------------------
/new_synopses/08_nomix.sql:
--------------------------------------------------------------------------------
1 | set echo on
2 | set timing on
3 | set linesize 1000
4 | set pagesize 100
5 | set trims on
6 | column partition_name format a40
7 | column ndv_alg format a30
8 | column inc_stale format a30
9 |
10 | drop table t1 purge;
11 | drop table exch purge;
12 |
13 | create table t1 (id number(10),num1 number(10), num2 number(10),txt varchar2(20))
14 | partition by range (num1)
15 | interval (1) (
16 | partition p1 values less than (1)
17 | ,partition p2 values less than (2));
18 |
19 | insert /*+ APPEND */ into t1
20 | select rownum, mod(rownum,2), mod(rownum,1000),'X'||mod(rownum,10000)
21 | from (select 1 from dual connect by level <=1000);
22 |
23 | commit;
24 |
25 | create table exch as select * from t1 where 1=-1;
26 |
27 | insert /*+ APPEND */ into exch
28 | select rownum,0,mod(rownum,10000),'X'||mod(rownum,100000)
29 | from (select 1 from dual connect by level <=100);
30 |
31 | commit;
32 |
33 | --
34 | -- Enable incremental statistics
35 | --
36 | exec dbms_stats.set_table_prefs(null,'t1','incremental','true')
37 | exec dbms_stats.set_table_prefs(null,'exch','incremental','true')
38 |
39 | --
40 | -- Prepare to create a synopsis on the EXCH table
41 | --
42 | exec DBMS_STATS.SET_TABLE_PREFS (null,'exch','INCREMENTAL_LEVEL','table');
43 | --
44 | -- The exchange table has an old-style synopsis
45 | --
46 | exec dbms_stats.set_table_prefs(null,'exch', 'approximate_ndv_algorithm', 'adaptive sampling')
47 | exec dbms_stats.gather_table_stats(null,'exch');
48 |
49 | --
50 | -- The partitioned table has new-style synopses
51 | -- Also, disallow mixed format.
52 | -- Compare the behavior here to the previous example (07_defaultmx)
53 | --
54 | exec dbms_stats.set_table_prefs(null,'t1', 'approximate_ndv_algorithm', 'hyperloglog')
55 | exec dbms_stats.set_table_prefs(null,'t1', 'incremental_staleness', 'null')
56 | exec dbms_stats.gather_table_stats(null,'t1')
57 |
58 | @t1check
59 |
60 | pause
61 |
62 | alter table t1 exchange partition p1 with table exch;
63 |
64 | @t1check
65 |
66 | pause
67 |
68 | --
69 | -- Add a partition
70 | --
71 | insert /*+ APPEND */ into t1
72 | select rownum, 2, mod(rownum,1000),'X'||mod(rownum,10000)
73 | from (select 1 from dual connect by level <=1000);
74 |
75 | exec dbms_stats.gather_table_stats(null,'t1')
76 |
77 | @t1check
78 |
79 | pause
80 |
81 | --
82 | -- Make P1 stale
83 | --
84 | insert /*+ APPEND */ into t1
85 | select rownum, 0, mod(rownum,1000),'X'||mod(rownum,10000)
86 | from (select 1 from dual connect by level <=1000);
87 |
88 | exec dbms_stats.gather_table_stats(null,'t1')
89 |
90 | @t1check
91 |
92 |
--------------------------------------------------------------------------------
/new_synopses/README.md:
--------------------------------------------------------------------------------
1 | # New synopses for Oracle Database Cloud Services (Oracle Database 12c Release 2)
2 |
3 | These scripts are to support Part 3 of a 3-part blog post on incremental stats maintenance. Part 1 is here:
4 |
5 | https://blogs.oracle.com/optimizer/entry/efficient_statistics_maintenance_for_partitioned
6 |
7 | You will see how to create new-style synopses get an indication of their space and performance benefits.
8 |
9 | A DBA account is required if you want to view synopses data in the data dictionary, otherwise, a non-DBA account is OK.
10 |
11 | The "test" scripts show you the "nuts and bolts" of synopses and demonstrate performance benefits. The other numbered scripts demonstrate some of the different options for controlling synopses. The check.sql script shows you how to see what type of synopsis a partition has.
12 |
13 | ### Note
14 |
15 | All of the scripts are designed to work with Oracle Database 12c Release 2.
16 |
17 | ### DISCLAIMER
18 |
19 | * These scripts are provided for educational purposes only.
20 | * They are NOT supported by Oracle World Wide Technical Support.
21 | * The scripts have been tested and they appear to work as intended.
22 | * You should always run scripts on a test instance.
23 |
24 | ### WARNING
25 |
26 | * These scripts drop and create tables. For use on test databases.
27 |
--------------------------------------------------------------------------------
/new_synopses/check.sql:
--------------------------------------------------------------------------------
1 | --
2 | -- List synopsis types per partition
3 | -- There is an Enhancement Request to include
4 | -- this information in data dictionary views.
5 | --
6 | column partition_name format a40
7 | column synopsis_type format a40
8 | set pagesize 1000
9 | set linesize 500
10 | set trims on
11 | set tab off
12 |
13 | select subobject_name partition_name,
14 | decode(h.spare1,NULL,'ADAPTIVE SAMPLING','HYPERLOGLOG') synopsis_type
15 | from dba_objects o,
16 | sys.wri$_optstat_synopsis_head$ h
17 | where o.object_type = 'TABLE PARTITION'
18 | and o.object_name = UPPER('&table_name')
19 | and o.owner = USER
20 | and h.group# = o.object_id*2
21 | and intcol# = 1
22 | order by partition_name;
23 |
24 |
--------------------------------------------------------------------------------
/new_synopses/t1check.sql:
--------------------------------------------------------------------------------
1 | select subobject_name partition_name,
2 | decode(h.spare1,NULL,'ADAPTIVE SAMPLING','HYPERLOGLOG') synopsis_type
3 | from dba_objects o,
4 | sys.wri$_optstat_synopsis_head$ h
5 | where o.object_type = 'TABLE PARTITION'
6 | and o.object_name = 'T1'
7 | and o.owner = USER
8 | and h.group# = o.object_id*2
9 | and intcol# = 1
10 | order by partition_name;
11 |
12 |
13 |
--------------------------------------------------------------------------------
/new_synopses/test1.lst:
--------------------------------------------------------------------------------
1 | SQL> @test1
2 | SQL> set timing on
3 | SQL> set linesize 1000
4 | SQL> set pagesize 100
5 | SQL> set trims on
6 | SQL>
7 | SQL> drop table t1 purge;
8 |
9 | Table dropped.
10 |
11 | Elapsed: 00:00:09.44
12 | SQL>
13 | SQL> --
14 | SQL> -- This is our main application table
15 | SQL> --
16 | SQL> create table t1 (id number(10),num1 number(10), num2 number(10),txt1 varchar2(20), txt2 varchar2(20))
17 | 2 partition by range (num1)
18 | 3 interval (1) (
19 | 4 partition p1 values less than (1)
20 | 5 ,partition p2 values less than (2));
21 |
22 | Table created.
23 |
24 | Elapsed: 00:00:00.02
25 | SQL>
26 | SQL> --
27 | SQL> -- The the interval-partitioned table will have a large number of partitions
28 | SQL> -- once the INSERT has completed. Tables with a large number of partitions
29 | SQL> -- have more synopsis data (especially for adaptive sampling), so
30 | SQL> -- differences in the resource cost of managing the two synopsis formats
31 | SQL> -- is easier to see.
32 | SQL> --
33 | SQL> insert /*+ APPEND */ into t1
34 | 2 select rownum, mod(rownum,512), mod(rownum,1000),'X'||mod(rownum,10000),'Y'||mod(rownum,5)
35 | 3 from (select 1 from dual connect by level <=3000),
36 | 4 (select 1 from dual connect by level <=3000);
37 |
38 | 9000000 rows created.
39 |
40 | Elapsed: 00:00:18.04
41 | SQL>
42 | SQL> commit;
43 |
44 | Commit complete.
45 |
46 | Elapsed: 00:00:00.13
47 | SQL>
48 | SQL> drop table exch purge;
49 |
50 | Table dropped.
51 |
52 | Elapsed: 00:00:00.04
53 | SQL>
54 | SQL> --
55 | SQL> -- This is a table we can use for partition exchange load
56 | SQL> --
57 | SQL> create table exch as select * from t1 where 1=-1;
58 |
59 | Table created.
60 |
61 | Elapsed: 00:00:00.02
62 | SQL>
63 | SQL> insert /*+ APPEND */ into exch
64 | 2 select rownum,0,mod(rownum,10000),'X'||mod(rownum,100000),'Y'||mod(rownum,5)
65 | 3 from (select 1 from dual connect by level <=1000),
66 | 4 (select 1 from dual connect by level <=1000);
67 |
68 | 1000000 rows created.
69 |
70 | Elapsed: 00:00:01.27
71 | SQL>
72 | SQL> commit;
73 |
74 | Commit complete.
75 |
76 | Elapsed: 00:00:00.02
77 | SQL>
78 | SQL> --
79 | SQL> -- Enable incremental statistics
80 | SQL> --
81 | SQL> exec dbms_stats.set_table_prefs(null,'t1','incremental','true')
82 |
83 | PL/SQL procedure successfully completed.
84 |
85 | Elapsed: 00:00:00.00
86 | SQL> exec dbms_stats.set_table_prefs(null,'exch','incremental','true')
87 |
88 | PL/SQL procedure successfully completed.
89 |
90 | Elapsed: 00:00:00.00
91 | SQL> exit
92 |
--------------------------------------------------------------------------------
/new_synopses/test1.sql:
--------------------------------------------------------------------------------
1 | --
2 | -- Create some test tables
3 | -- WARNING! Use a test system...
4 | -- Tables called T1 and EXCH will be dropped
5 | --
6 | set echo on
7 | set timing on
8 | set linesize 1000
9 | set pagesize 100
10 | set trims on
11 |
12 | drop table t1 purge;
13 |
14 | --
15 | -- This is our main application table
16 | --
17 | create table t1 (id number(10),num1 number(10), num2 number(10),txt1 varchar2(20), txt2 varchar2(20))
18 | partition by range (num1)
19 | interval (1) (
20 | partition p1 values less than (1)
21 | ,partition p2 values less than (2));
22 |
23 | --
24 | -- The the interval-partitioned table will have a large number of partitions
25 | -- once the INSERT has completed. Tables with a large number of partitions
26 | -- have more synopsis data (especially for adaptive sampling), so
27 | -- differences in the resource cost of managing the two synopsis formats
28 | -- is easier to see.
29 | --
30 | insert /*+ APPEND */ into t1
31 | select rownum, mod(rownum,512), mod(rownum,1000),'X'||mod(rownum,10000),'Y'||mod(rownum,5)
32 | from (select 1 from dual connect by level <=3000),
33 | (select 1 from dual connect by level <=3000);
34 |
35 | commit;
36 |
37 | drop table exch purge;
38 |
39 | --
40 | -- This is a table we can use for partition exchange load
41 | --
42 | create table exch as select * from t1 where 1=-1;
43 |
44 | insert /*+ APPEND */ into exch
45 | select rownum,0,mod(rownum,10000),'X'||mod(rownum,100000),'Y'||mod(rownum,5)
46 | from (select 1 from dual connect by level <=1000),
47 | (select 1 from dual connect by level <=1000);
48 |
49 | commit;
50 |
51 | --
52 | -- Enable incremental statistics
53 | --
54 | exec dbms_stats.set_table_prefs(null,'t1','incremental','true')
55 | exec dbms_stats.set_table_prefs(null,'exch','incremental','true')
56 |
--------------------------------------------------------------------------------
/new_synopses/test2.sql:
--------------------------------------------------------------------------------
1 | set echo on
2 | set timing on
3 | set linesize 1000
4 | set pagesize 100
5 | set trims on
6 |
7 | --
8 | -- HyperLogLog synopses
9 | --
10 | exec dbms_stats.set_table_prefs(null,'t1', 'approximate_ndv_algorithm', 'hyperloglog')
11 | exec dbms_stats.delete_table_stats(null,'t1')
12 | exec dbms_stats.gather_table_stats(null,'t1')
13 |
14 | pause
15 |
16 | --
17 | -- Confirm we have HLL synopses
18 | --
19 | @look
20 |
21 | pause
22 |
23 | --
24 | -- Take a look at synopses for HyperLogLog algorithm
25 | --
26 | --
27 | -- No rows in this table for T1
28 | --
29 | select count(*) from sys.WRI$_OPTSTAT_SYNOPSIS$
30 | where bo# = (select object_id from user_objects where object_name = 'T1' and object_type = 'TABLE');
31 |
32 | --
33 | -- Number of rows = #Partitions * #Table Columns
34 | --
35 | select count(*) from sys.WRI$_OPTSTAT_SYNOPSIS_HEAD$
36 | where bo# = (select object_id from user_objects where object_name = 'T1' and object_type = 'TABLE');
37 |
38 | --
39 | -- Binary NDV data for each column per partition
40 | -- Just showing the first few rows...
41 | --
42 | select spare1,spare2 from sys.WRI$_OPTSTAT_SYNOPSIS_HEAD$
43 | where bo# = (select object_id from user_objects where object_name = 'T1' and object_type = 'TABLE')
44 | and rownum<11;
45 |
46 | pause
47 |
48 | --
49 | -- Adaptive sampling (pre-Oracle Database 12c Release 2)
50 | --
51 | exec dbms_stats.set_table_prefs(null,'t1', 'approximate_ndv_algorithm', 'adaptive sampling')
52 | exec dbms_stats.delete_table_stats(null,'t1')
53 | exec dbms_stats.gather_table_stats(null,'t1')
54 |
55 | --
56 | -- Confirm we have Adaptive Sampling synopses
57 | --
58 | @look
59 |
60 | pause
61 |
62 | --
63 | -- NDV data for Adaptive Sampling algorythm
64 | -- The number of rows is related to #Partitions, #Columns and NDV per column
65 | --
66 | select count(*) from sys.WRI$_OPTSTAT_SYNOPSIS$
67 | where bo# = (select object_id from user_objects where object_name = 'T1' and object_type = 'TABLE');
68 |
69 | --
70 | -- Same #rows as HyperLogLog
71 | --
72 | select count(*) from sys.WRI$_OPTSTAT_SYNOPSIS_HEAD$
73 | where bo# = (select object_id from user_objects where object_name = 'T1' and object_type = 'TABLE');
74 |
75 | --
76 | -- No binary NDV data
77 | --
78 | select spare1 ,spare2 from sys.WRI$_OPTSTAT_SYNOPSIS_HEAD$
79 | where bo# = (select object_id from user_objects where object_name = 'T1' and object_type = 'TABLE')
80 | and rownum<11;
81 |
82 | pause
83 |
84 | -- Ignore this timing
85 | exec dbms_stats.delete_table_stats(null,'t1')
86 |
87 | --
88 | -- Look at the timings for deleting and gathering statistics
89 | -- Adaptive Sampling
90 | --
91 | exec dbms_stats.set_table_prefs(null,'t1', 'approximate_ndv_algorithm', 'adaptive sampling')
92 | exec dbms_stats.gather_table_stats(null,'t1')
93 |
94 | exec dbms_stats.set_table_prefs(null,'t1', 'approximate_ndv_algorithm', 'adaptive sampling')
95 | exec dbms_stats.delete_table_stats(null,'t1')
96 | exec dbms_stats.gather_table_stats(null,'t1')
97 |
98 | exec dbms_stats.set_table_prefs(null,'t1', 'approximate_ndv_algorithm', 'adaptive sampling')
99 | exec dbms_stats.delete_table_stats(null,'t1')
100 | exec dbms_stats.gather_table_stats(null,'t1')
101 |
102 | exec dbms_stats.set_table_prefs(null,'t1', 'approximate_ndv_algorithm', 'adaptive sampling')
103 | exec dbms_stats.delete_table_stats(null,'t1')
104 | exec dbms_stats.gather_table_stats(null,'t1')
105 |
106 | -- Ignore this timing
107 | exec dbms_stats.delete_table_stats(null,'t1')
108 |
109 | --
110 | -- Compare these timing with the previous timings for deleting and gathering statistics
111 | -- HyperLogLog
112 | --
113 | exec dbms_stats.set_table_prefs(null,'t1', 'approximate_ndv_algorithm', 'hyperloglog')
114 | exec dbms_stats.gather_table_stats(null,'t1')
115 |
116 | exec dbms_stats.set_table_prefs(null,'t1', 'approximate_ndv_algorithm', 'hyperloglog')
117 | exec dbms_stats.delete_table_stats(null,'t1')
118 | exec dbms_stats.gather_table_stats(null,'t1')
119 |
120 | exec dbms_stats.set_table_prefs(null,'t1', 'approximate_ndv_algorithm', 'hyperloglog')
121 | exec dbms_stats.delete_table_stats(null,'t1')
122 | exec dbms_stats.gather_table_stats(null,'t1')
123 |
124 | exec dbms_stats.set_table_prefs(null,'t1', 'approximate_ndv_algorithm', 'hyperloglog')
125 | exec dbms_stats.delete_table_stats(null,'t1')
126 | exec dbms_stats.gather_table_stats(null,'t1')
127 |
128 |
--------------------------------------------------------------------------------
/new_synopses/test3.sql:
--------------------------------------------------------------------------------
1 | set echo on
2 | set timing on
3 | set linesize 1000
4 | set pagesize 100
5 | set trims on
6 |
7 | --
8 | -- We are going to perform a partition exchange load and
9 | -- take a look at the timings for HyperLogLog synopses.
10 | --
11 | exec dbms_stats.set_table_prefs(null,'t1','incremental_staleness', 'NULL')
12 |
13 | exec DBMS_STATS.SET_TABLE_PREFS (null,'exch','INCREMENTAL_LEVEL','table');
14 | exec dbms_stats.set_table_prefs(null,'exch', 'approximate_ndv_algorithm', 'hyperloglog')
15 | exec dbms_stats.delete_table_stats(null,'exch')
16 | exec dbms_stats.gather_table_stats(null,'exch');
17 |
18 | --
19 | -- The exchange table has synopses
20 | --
21 | select count(*) from sys.WRI$_OPTSTAT_SYNOPSIS$
22 | where bo# = (select object_id from user_objects where object_name = 'EXCH' and object_type = 'TABLE');
23 |
24 | --
25 | -- The exchange table has 5 columns...
26 | --
27 | select count(*) from sys.WRI$_OPTSTAT_SYNOPSIS_HEAD$
28 | where bo# = (select object_id from user_objects where object_name = 'EXCH' and object_type = 'TABLE');
29 |
30 | select spare1,spare2 from sys.WRI$_OPTSTAT_SYNOPSIS_HEAD$
31 | where bo# = (select object_id from user_objects where object_name = 'EXCH' and object_type = 'TABLE')
32 | and rownum<11;
33 |
34 | pause
35 |
36 | --
37 | -- Get stats up to date on our main table, T1
38 | --
39 | exec dbms_stats.set_table_prefs(null,'t1', 'approximate_ndv_algorithm', 'hyperloglog')
40 | exec dbms_stats.delete_table_stats(null,'t1')
41 | exec dbms_stats.gather_table_stats(null,'t1')
42 |
43 | pause
44 |
45 | --
46 | -- Perform the exchange - we expect it to be very fast
47 | --
48 | alter table t1 exchange partition p1 with table exch;
49 |
50 | pause
51 |
52 | --
53 | -- Gather stats to refresh the global-level table stats
54 | -- We expect this to be very fast because the synopsis is used
55 | --
56 | exec dbms_stats.gather_table_stats(null,'t1')
57 |
58 | pause
59 |
60 | --
61 | -- Confirm we have HyperLogLog synopses...
62 | --
63 | @look
64 |
65 | pause
66 |
67 | --
68 | -- Note these timings for HYPERLOGLOG
69 | -- Compare the timings with the next set of tests
70 | --
71 | alter table t1 exchange partition p1 with table exch;
72 | alter table t1 exchange partition p1 with table exch;
73 | alter table t1 exchange partition p1 with table exch;
74 | alter table t1 exchange partition p1 with table exch;
75 | --
76 | -- This last exchange and gather stats returns the T1 to the state it
77 | -- it was prior to the first exchange.
78 | --
79 | alter table t1 exchange partition p1 with table exch;
80 | exec dbms_stats.gather_table_stats(null,'t1')
81 |
--------------------------------------------------------------------------------
/new_synopses/test4.sql:
--------------------------------------------------------------------------------
1 | set echo on
2 | set timing on
3 | set linesize 1000
4 | set pagesize 100
5 | set trims on
6 |
7 | --
8 | -- We are going to perform a partition exchange load and
9 | -- take a look at the timings for Adaptive Sampling synopses.
10 | -- Compare the results here with "test3".
11 | --
12 | exec dbms_stats.set_table_prefs(null,'t1','incremental_staleness', 'NULL')
13 |
14 | exec DBMS_STATS.SET_TABLE_PREFS (null,'exch','INCREMENTAL_LEVEL','table');
15 | exec dbms_stats.set_table_prefs(null,'exch', 'approximate_ndv_algorithm', 'adaptive sampling')
16 | exec dbms_stats.delete_table_stats(null,'exch')
17 | exec dbms_stats.gather_table_stats(null,'exch');
18 |
19 | --
20 | -- The exchange table has synopses
21 | --
22 | select count(*) from sys.WRI$_OPTSTAT_SYNOPSIS$
23 | where bo# = (select object_id from user_objects where object_name = 'EXCH' and object_type = 'TABLE');
24 |
25 | --
26 | -- The exchange table has 5 columns...
27 | --
28 | select count(*) from sys.WRI$_OPTSTAT_SYNOPSIS_HEAD$
29 | where bo# = (select object_id from user_objects where object_name = 'EXCH' and object_type = 'TABLE');
30 |
31 | select spare1,spare2 from sys.WRI$_OPTSTAT_SYNOPSIS_HEAD$
32 | where bo# = (select object_id from user_objects where object_name = 'EXCH' and object_type = 'TABLE')
33 | and rownum<11;
34 |
35 | pause
36 |
37 | --
38 | -- Get stats up to date on our main table, T1
39 | --
40 | exec dbms_stats.set_table_prefs(null,'t1', 'approximate_ndv_algorithm', 'adaptive sampling')
41 | exec dbms_stats.delete_table_stats(null,'t1')
42 | exec dbms_stats.gather_table_stats(null,'t1')
43 |
44 | pause
45 |
46 | --
47 | -- Perform the exchange - we expect it to be fast, but not
48 | -- as fast as HyperLogLog because we must manipulate more
49 | -- synopsis data in this case
50 | --
51 | alter table t1 exchange partition p1 with table exch;
52 |
53 | pause
54 |
55 | --
56 | -- Gather stats to refresh the global-level table stats
57 | -- We expect this to be very fast because the synopsis is used
58 | --
59 | exec dbms_stats.gather_table_stats(null,'t1')
60 |
61 | pause
62 |
63 | --
64 | -- Confirm we have Adaptive Sampling synopses...
65 | --
66 | @look
67 |
68 | pause
69 |
70 | --
71 | -- Note these timings for Adaptive Sampling
72 | -- Compare the timings with "test3"
73 | --
74 | alter table t1 exchange partition p1 with table exch;
75 | alter table t1 exchange partition p1 with table exch;
76 | alter table t1 exchange partition p1 with table exch;
77 | alter table t1 exchange partition p1 with table exch;
78 | --
79 | -- This last exchange and gather stats returns the T1 to the state it
80 | -- it was prior to the first exchange.
81 | --
82 | alter table t1 exchange partition p1 with table exch;
83 | exec dbms_stats.gather_table_stats(null,'t1')
84 |
--------------------------------------------------------------------------------
/partition_exchange_load/README.md:
--------------------------------------------------------------------------------
1 | # Partition Exchange Loading
2 |
3 | These scripts are to support Part 2 of a 3-part blog post on partition echange loading. Part 1 is here:
4 |
5 | https://blogs.oracle.com/optimizer/entry/efficient_statistics_maintenance_for_partitioned
6 |
7 | They are primarily designed with Oracle Database 12c in mind but they will largely work in Oracle Database 11g - except that the steps to pre-create synopses will fail and the post-exchange stats gathering for the new partition cannot be avoided.
8 |
9 | ### [example.sql](https://github.com/oracle/dw-vldb/tree/master/partition_exchange_load/example.sql)
10 |
11 | This script shows how partition exchange load interacts with incremental statistics for a subpartitioned main table.
12 |
13 | ### [list_s.sql](https://github.com/oracle/dw-vldb/tree/master/partition_exchange_load/list_s.sql)
14 |
15 | This script lists extended statistics and histograms for a paritioned table.
16 |
17 | ### [check_hist.sql](https://github.com/oracle/dw-vldb/tree/master/partition_exchange_load/check_hist.sql)
18 |
19 | This script compares histograms at the table level with histograms for each (sub)partition. This is not a problem and not uncommon if histograms are created automatically (using 'FOR ALL COLUMNS SIZE AUTO'). Nevertheless, it's useful to be aware of this if you have been managing histograms and partition statistics in an ad-hoc manner. It might not have been your intention.
20 |
21 | ### Note
22 |
23 | All of the scripts are designed to work with Oracle Database 12c.
24 |
25 | ### DISCLAIMER
26 |
27 | * These scripts are provided for educational purposes only.
28 | * They are NOT supported by Oracle World Wide Technical Support.
29 | * The scripts have been tested and they appear to work as intended.
30 | * You should always run scripts on a test instance.
31 |
32 | ### WARNING
33 |
34 | * These scripts drop and create tables. For use on test databases.
35 |
--------------------------------------------------------------------------------
/partition_exchange_load/check_hist.sql:
--------------------------------------------------------------------------------
1 | PROMPT ======================================================================================
2 | PROMPT Check histograms for all (sub)partitions
3 | PROMPT ======================================================================================
4 | set serveroutput on
5 | set verify off
6 | set feedback off
7 | set linesize 1000
8 | set trims on
9 | set serveroutput on
10 | column column_name format a40
11 | column table_name format a40
12 | column partition_name format a40
13 | column subpartition_name format a40
14 |
15 | accept ptable prompt 'Enter the name of the partitioned table: '
16 |
17 | prompt
18 | prompt Table-level histograms:
19 | prompt
20 | select column_name
21 | from user_tab_col_statistics
22 | where histogram != 'NONE'
23 | and table_name = upper('&ptable')
24 | order by column_name;
25 |
26 | prompt
27 | prompt Partition columns that have histograms not present at table-level:
28 | prompt
29 | break on partition_name
30 | select partition_name,column_name
31 | from user_part_col_statistics
32 | where histogram != 'NONE'
33 | and table_name = upper('&ptable')
34 | and column_name not in (select column_name
35 | from user_tab_col_statistics
36 | where histogram is not null
37 | and histogram != 'NONE'
38 | and table_name = upper('&ptable'))
39 | order by partition_name,column_name;
40 | clear breaks
41 |
42 | prompt
43 | prompt Subpartition columns that have histograms not present at table-level:
44 | prompt
45 | break on subpartition_name
46 | select subpartition_name,column_name
47 | from user_subpart_col_statistics
48 | where histogram != 'NONE'
49 | and table_name = upper('&ptable')
50 | and column_name not in (select column_name
51 | from user_tab_col_statistics
52 | where histogram is not null
53 | and histogram != 'NONE'
54 | and table_name = upper('&ptable'))
55 | order by subpartition_name,column_name;
56 | clear breaks
57 |
58 | prompt
59 | prompt Partition columns missing histograms that exist at table-level:
60 | prompt
61 | break on partition_name
62 | select partition_name, column_name
63 | from user_part_col_statistics
64 | where histogram = 'NONE'
65 | and table_name = upper('&ptable')
66 | and column_name in (select column_name
67 | from user_tab_col_statistics
68 | where histogram is not null
69 | and histogram != 'NONE'
70 | and table_name = upper('&ptable'))
71 | order by partition_name,column_name;
72 | clear breaks
73 |
74 | prompt
75 | prompt Subpartition columns missing histograms that exist at table-level:
76 | prompt
77 | break on subpartition_name
78 | select subpartition_name, column_name
79 | from user_subpart_col_statistics
80 | where histogram = 'NONE'
81 | and table_name = upper('&ptable')
82 | and column_name in (select column_name
83 | from user_tab_col_statistics
84 | where histogram is not null
85 | and histogram != 'NONE'
86 | and table_name = upper('&ptable'))
87 | order by subpartition_name,column_name;
88 | clear breaks
89 |
--------------------------------------------------------------------------------
/partition_exchange_load/example.sql:
--------------------------------------------------------------------------------
1 | -- ===================================================================================
2 | -- Complete example of a partition exchange load using incremental stats
3 | -- and a subpartitioned main table.
4 | -- ===================================================================================
5 | set linesize 2000
6 | set trims on
7 | set pagesize 50
8 | set echo on
9 |
10 | drop table range_list_main_tab;
11 | drop table list_part_load_tab;
12 |
13 | --
14 | -- interval range-list table
15 | --
16 | create table range_list_main_tab
17 | (num number,
18 | ten number)
19 | partition by range (num)
20 | interval (1000)
21 | subpartition by list (ten)
22 | subpartition template
23 | (subpartition t_spart1 values (0,2,4,6,8),
24 | subpartition t_spart2 values (1,3,5,7,9))
25 | (partition range_list_main_part1 values less than (1000),
26 | partition range_list_main_part2 values less than (2000));
27 |
28 | create index range_list_main_tab_n on range_list_main_tab(num) local;
29 |
30 | --
31 | -- list partitioned table
32 | --
33 | create table list_part_load_tab
34 | (num number,
35 | ten number)
36 | partition by list (ten)
37 | (partition list_part_load_part1 values (0,2,4,6,8),
38 | partition list_part_load_part2 values (1,3,5,7,9));
39 |
40 | exec dbms_stats.set_table_prefs(null, 'range_list_main_tab', 'incremental', 'true');
41 | exec dbms_stats.set_table_prefs(null, 'list_part_load_tab', 'incremental', 'true');
42 | exec dbms_stats.set_table_prefs(null, 'range_list_main_tab', 'incremental_level', 'partition');
43 | exec dbms_stats.set_table_prefs(null, 'list_part_load_tab', 'incremental_level', 'table');
44 |
45 | --
46 | -- The main table will have 998 rows
47 | --
48 | insert into range_list_main_tab
49 | select rownum,mod(rownum,10)
50 | from dual
51 | connect by level<500
52 | union all
53 | select rownum+1000,mod(rownum,10)
54 | from dual
55 | connect by level<500;
56 |
57 | --
58 | -- The load table will have 999 rows
59 | --
60 | insert into list_part_load_tab
61 | select rownum,mod(rownum,10)
62 | from dual
63 | connect by level<1000;
64 |
65 | exec dbms_stats.gather_table_stats(null, 'range_list_main_tab');
66 |
67 | --
68 | -- Let's sleep here to give the main table and load table
69 | -- different last_analyzed times
70 | --
71 | host sleep 5
72 |
73 | exec dbms_stats.gather_table_stats(null, 'list_part_load_tab');
74 |
75 | --
76 | -- Should be 1000 rows
77 | --
78 | select count(*) from range_list_main_tab;
79 |
80 | select to_char(last_analyzed,'dd-mon-yyyy hh24:mi:ss') table_ana
81 | from user_tables
82 | where table_name = upper('range_list_main_tab');
83 |
84 | select partition_name, to_char(last_analyzed,'dd-mon-yyyy hh24:mi:ss') part_ana
85 | from user_tab_partitions
86 | where table_name = upper('range_list_main_tab')
87 | order by partition_position;
88 |
89 | select subpartition_name, to_char(last_analyzed,'dd-mon-yyyy hh24:mi:ss') subpart_ana
90 | from user_tab_subpartitions
91 | where table_name = upper('range_list_main_tab')
92 | order by subpartition_name;
93 |
94 | select to_char(last_analyzed,'dd-mon-yyyy hh24:mi:ss') load_table_ana
95 | from user_tables
96 | where table_name = upper('list_part_load_tab');
97 |
98 | select partition_name, to_char(last_analyzed,'dd-mon-yyyy hh24:mi:ss') load_part_ana
99 | from user_tab_partitions
100 | where table_name = upper('list_part_load_tab')
101 | order by partition_position;
102 |
103 |
104 | --
105 | -- Perform the exchange after a delay
106 | --
107 | host sleep 5
108 | alter table range_list_main_tab
109 | exchange partition range_list_main_part1
110 | with table list_part_load_tab;
111 |
112 | --
113 | -- Exchange complete at:
114 | --
115 | select to_char(sysdate,'dd-mon-yyyy hh24:mi:ss') exchange_complete
116 | from dual;
117 |
118 | exec dbms_stats.gather_table_stats(null, 'range_list_main_tab');
119 |
120 | --
121 | -- Should now be 1498 rows
122 | --
123 | select count(*) from range_list_main_tab;
124 |
125 | --
126 | -- The time shown here will be the most recent because the global
127 | -- statistics must be updated after the partition has been exchanged.
128 | -- So, expect the time to be similar to the completion exchange time.
129 | --
130 | select to_char(last_analyzed,'dd-mon-yyyy hh24:mi:ss') table_ana
131 | from user_tables
132 | where table_name = upper('range_list_main_tab');
133 |
134 | --
135 | -- Part 1 statistics were gathered earlier, because they came from the load
136 | -- table. They did not have to be regathered after the partition was echanged.
137 | -- Part 2 statistics have not been regathered - there is no need.
138 | --
139 | select partition_name, to_char(last_analyzed,'dd-mon-yyyy hh24:mi:ss') part_ana
140 | from user_tab_partitions
141 | where table_name = upper('range_list_main_tab')
142 | order by partition_position;
143 |
144 | --
145 | -- The Part 1 subpartition stats came from the load table so they have not
146 | -- been regathered after the exchange.
147 | -- Part 2 subpartition stats have not been regathered - there is no need.
148 | --
149 | select subpartition_name, to_char(last_analyzed,'dd-mon-yyyy hh24:mi:ss') subpart_ana
150 | from user_tab_subpartitions
151 | where table_name = upper('range_list_main_tab')
152 | order by subpartition_name;
153 |
154 |
--------------------------------------------------------------------------------
/partition_exchange_load/list_s.sql:
--------------------------------------------------------------------------------
1 | PROMPT ======================================================================================
2 | PROMPT List extended statistics and histograms for a partitioned table
3 | PROMPT ======================================================================================
4 | set serveroutput on
5 | set verify off
6 | set feedback off
7 | set linesize 1000
8 | set pagesize 1000
9 | set trims on
10 | set serveroutput on
11 | column column_name format a40
12 | column extension format a100
13 |
14 |
15 | accept ptable prompt 'Enter the name of the partitioned table: '
16 |
17 | prompt Extended statistics on table...
18 | select extension,
19 | creator created_by
20 | from user_stat_extensions
21 | where table_name = upper('&ptable');
22 |
23 | prompt Histograms at table level...
24 | select column_name
25 | from user_tab_col_statistics
26 | where histogram != 'NONE'
27 | and table_name = upper('&ptable');
28 |
29 | prompt All columns with histograms at partition level...
30 | select distinct column_name
31 | from user_part_col_statistics
32 | where histogram != 'NONE'
33 | and table_name = upper('&ptable');
34 |
35 | prompt All columns with histograms at subpartition level...
36 | select distinct column_name
37 | from user_subpart_col_statistics
38 | where histogram != 'NONE'
39 | and table_name = upper('&ptable');
40 |
--------------------------------------------------------------------------------
/px_wm_wp/README.md:
--------------------------------------------------------------------------------
1 | Parallel Execution and Workload Management Examples
2 |
3 | Some scripts for setting up and monitoring parallel execution and Database Resource Manager in an EDW.
4 |
5 | These scripts as in a very early state, so they have not been fully tested.
6 |
7 | DISCLAIMER:
8 |
-- These scripts are provided for educational purposes only.
9 |
-- They are NOT supported by Oracle World Wide Technical Support.
10 |
-- The scripts have been tested and they appear to work as intended.
11 |
-- You should always run scripts on a test instance.
12 |
13 |
--------------------------------------------------------------------------------
/px_wm_wp/logon.sql:
--------------------------------------------------------------------------------
1 | --
2 | -- DISCLAIMER:
3 | -- This script is provided for educational purposes only. It is
4 | -- NOT supported by Oracle World Wide Technical Support.
5 | -- The script has been tested and appears to work as intended.
6 | -- You should always run new scripts initially
7 | -- on a test instance.
8 | --
9 | -- Script Vesion 0.1 - TEST
10 | --
11 | --
12 | CREATE OR REPLACE TRIGGER px_logon_trigger
13 | AFTER LOGON
14 | ON DATABASE
15 | DECLARE
16 | mapped_cgroup_count number;
17 | BEGIN
18 | SELECT COUNT(*)
19 | INTO mapped_cgroup_count
20 | FROM dba_rsrc_group_mappings
21 | WHERE attribute = 'ORACLE_USER'
22 | AND value = USER
23 | AND consumer_group IN ('ADHOC', 'CRITICAL')
24 | AND status IS NULL;
25 |
26 | IF mapped_cgroup_count > 0
27 | THEN
28 | EXECUTE IMMEDIATE 'ALTER SESSION SET parallel_degree_policy = ''AUTO''';
29 | END IF;
30 | END px_logon_trigger;
31 | /
32 |
33 |
--------------------------------------------------------------------------------
/px_wm_wp/px_dbrm_queries.sql:
--------------------------------------------------------------------------------
1 | --
2 | -- DISCLAIMER:
3 | -- This script is provided for educational purposes only. It is
4 | -- NOT supported by Oracle World Wide Technical Support.
5 | -- The script has been tested and appears to work as intended.
6 | -- You should always run new scripts initially
7 | -- on a test instance.
8 | --
9 | -- Script Vesion 0.1 - TEST
10 | --
11 | --
12 | select name AS "Consumer Group"
13 | ,ROUND(DECODE(sum(PQS_COMPLETED),0,0,
14 | sum(PQ_QUEUED_TIME)/1000/sum(PQS_COMPLETED)),2)
15 | AS "Total Q Time Per Prl Exec"
16 | ,ROUND(DECODE(sum(PQS_COMPLETED),0,0,
17 | sum(PQS_QUEUED)/sum(PQS_COMPLETED))*100,2)
18 | AS "Percent Prl Stmts Queued"
19 | ,SUM(CURRENT_PQS_QUEUED) AS "Current Prl Stmts Queued"
20 | ,SUM(CURRENT_PQS_ACTIVE) AS "Current Prl Stmts Active"
21 | ,SUM(CURRENT_PQ_SERVERS_ACTIVE) AS "Current PX Servers Active"
22 | ,SUM(IDLE_SESSIONS_KILLED) AS "Sesions Killed"
23 | ,SUM(SQL_CANCELED) AS "SQL Cancelled"
24 | from gv$rsrc_consumer_group
25 | group by name
26 | order by name;
27 |
28 | SELECT TO_CHAR(ROUND(begin_time,'MI'), 'HH:MI') AS "Time"
29 | ,consumer_group_name AS "Consumer Group"
30 | ,SUM(AVG_ACTIVE_PARALLEL_STMTS) AS "Ave Active Parallel Stmts"
31 | ,SUM(AVG_QUEUED_PARALLEL_STMTS) AS "Ave Queued"
32 | ,SUM(AVG_ACTIVE_PARALLEL_SERVERS) AS "Ave Active"
33 | FROM gv$rsrcmgrmetric_history
34 | GROUP BY ROUND(begin_time,'MI'),
35 | consumer_group_name
36 | ORDER BY ROUND(begin_time,'MI'),
37 | consumer_group_name;
38 |
39 | SELECT TO_CHAR(ROUND(begin_time,'MI'), 'HH:MI')
40 | AS "Time"
41 | ,consumer_group_name AS "Consumer Group"
42 | ,ROUND(sum(CPU_CONSUMED_TIME) / 60000,2)
43 | AS "Average Num Running Sessions"
44 | ,ROUND(sum(cpu_wait_time) / 60000,2)
45 | AS "Average Num Waiting Sessions"
46 | FROM gv$rsrcmgrmetric_history
47 | GROUP BY ROUND(begin_time,'MI'),
48 | consumer_group_name
49 | ORDER BY ROUND(begin_time,'MI')
50 | ,consumer_group_name;
51 |
52 | SELECT req_degree||' -> '||degree AS "Requested DOP -> Actual DOP"
53 | ,count(distinct qcsid) AS "Number Executing"
54 | FROM gv$px_session
55 | WHERE req_degree IS NOT NULL
56 | GROUP BY req_degree||'->'||degree;
57 |
58 |
59 | SELECT name
60 | ,value
61 | FROM gv$sysstat
62 | WHERE UPPER (name) LIKE '%PARALLEL OPERATIONS%'
63 | ORDER BY name;
64 |
65 |
66 | SELECT inst_id
67 | ,sql_text
68 | ,username
69 | ,status
70 | ,px_servers_requested
71 | ,px_servers_allocated
72 | FROM gv$sql_monitor
73 | ORDER BY last_refresh_time;
74 |
75 | select inst_id iid
76 | ,count(*) n
77 | from gv$pq_slave
78 | where status = 'BUSY'
79 | group by inst_id
80 | order by 1;
81 |
82 | select qksxareasons
83 | ,indx
84 | FROM x$qksxa_reason
85 | WHERE QKSXAREASONS LIKE '%DOP downgrade%';
86 |
87 |
--------------------------------------------------------------------------------
/spm/README.md:
--------------------------------------------------------------------------------
1 | SQL Plan Management Examples
2 |
3 | Some scripts for trying out SQL Plan Management.
4 |
5 | Just a few right now in the "inmem" folder, but expect more in the comming weeks.
6 |
7 | DISCLAIMER:
8 |
-- These scripts are provided for educational purposes only.
9 |
-- They are NOT supported by Oracle World Wide Technical Support.
10 |
-- The scripts have been tested and they appear to work as intended.
11 |
-- You should always run scripts on a test instance.
12 |
13 |
--------------------------------------------------------------------------------
/spm/inmem/README.md:
--------------------------------------------------------------------------------
1 | SQL Plan Management Examples
2 |
3 | Some scripts for trying out SQL Plan Management with Oracle Database In-Memory.
4 |
5 | Tested on Oracle Database 12c, version 12.1.0.2.
6 |
7 | They are intended to supplement parts 1 to 3 of a series of blog posts on SPM and Oracle Database In-Memory..
8 |
9 | Example "lst" files are provided so that you can see the expected output. Beware that "drop_base.sql" will drop ALL EXISTING SQL Plan Baselines. Use test instances only.
10 |
11 | DISCLAIMER:
12 |
-- These scripts are provided for educational purposes only.
13 |
-- They are NOT supported by Oracle World Wide Technical Support.
14 |
-- The scripts have been tested and they appear to work as intended
15 |
-- You should always run scripts on a test instance.
16 |
17 |
--------------------------------------------------------------------------------
/spm/inmem/part1/all.sql:
--------------------------------------------------------------------------------
1 | --@drop_base
2 |
3 | @tab
4 | @auto
5 | @query
6 | @query
7 | @autoo
8 | @base
9 | @query
10 | @plan
11 | @in
12 | @query
13 | @query
14 | @plan
15 | @base
16 |
--------------------------------------------------------------------------------
/spm/inmem/part1/auto.sql:
--------------------------------------------------------------------------------
1 | alter system set optimizer_capture_sql_plan_baselines=TRUE scope=memory;
2 |
--------------------------------------------------------------------------------
/spm/inmem/part1/autoo.sql:
--------------------------------------------------------------------------------
1 | alter system set optimizer_capture_sql_plan_baselines=FALSE scope=memory;
2 |
--------------------------------------------------------------------------------
/spm/inmem/part1/base.sql:
--------------------------------------------------------------------------------
1 | set linesize 150
2 | column sql_text format a100
3 | SELECT sql_text,enabled, accepted
4 | FROM dba_sql_plan_baselines
5 | WHERE sql_text LIKE '%SPM%';
6 |
--------------------------------------------------------------------------------
/spm/inmem/part1/drop_base.sql:
--------------------------------------------------------------------------------
1 | DECLARE
2 | l_plans_dropped PLS_INTEGER;
3 | BEGIN
4 |
5 | FOR REC IN (SELECT DISTINCT SQL_HANDLE FROM DBA_SQL_PLAN_BASELINES)
6 | LOOP
7 | L_PLANS_DROPPED := DBMS_SPM.DROP_SQL_PLAN_BASELINE (
8 | sql_handle => rec.sql_handle,
9 | PLAN_NAME => NULL);
10 | END LOOP;
11 |
12 | END;
13 | /
14 |
--------------------------------------------------------------------------------
/spm/inmem/part1/in.sql:
--------------------------------------------------------------------------------
1 | ALTER TABLE mysales INMEMORY;
2 |
--------------------------------------------------------------------------------
/spm/inmem/part1/noin.sql:
--------------------------------------------------------------------------------
1 | ALTER TABLE mysales NO INMEMORY;
2 |
--------------------------------------------------------------------------------
/spm/inmem/part1/plan.sql:
--------------------------------------------------------------------------------
1 | SET LINESIZE 130
2 | SET PAGESIZE 500
3 | SELECT * FROM table(DBMS_XPLAN.DISPLAY_CURSOR);
4 |
--------------------------------------------------------------------------------
/spm/inmem/part1/query.sql:
--------------------------------------------------------------------------------
1 | SELECT /* SPM */ COUNT(*)
2 | FROM mysales
3 | WHERE val = 'X';
4 |
--------------------------------------------------------------------------------
/spm/inmem/part1/seg.sql:
--------------------------------------------------------------------------------
1 | SELECT segment_name,
2 | bytes,
3 | inmemory_size
4 | FROM v$im_segments;
5 |
6 |
--------------------------------------------------------------------------------
/spm/inmem/part1/tab.sql:
--------------------------------------------------------------------------------
1 | DROP table mysales;
2 |
3 | CREATE TABLE mysales (
4 | id NUMBER(10)
5 | ,val VARCHAR2(100));
6 |
7 | INSERT INTO mysales
8 | SELECT ROWNUM,'X'
9 | FROM ( SELECT 1
10 | FROM dual
11 | CONNECT BY LEVEL <= 100000
12 | );
13 |
14 | COMMIT;
15 |
16 | EXECUTE dbms_stats.gather_table_stats(ownname=>NULL,tabname=>'MYSALES');
17 |
--------------------------------------------------------------------------------
/spm/inmem/part2/base.sql:
--------------------------------------------------------------------------------
1 | set linesize 150
2 | column sql_text format a50
3 | column plan_name format a35
4 | column sql_handle format a20
5 | SELECT sql_handle,plan_name,sql_text,enabled, accepted
6 | FROM dba_sql_plan_baselines
7 | WHERE sql_text LIKE '%SPM%';
8 |
--------------------------------------------------------------------------------
/spm/inmem/part2/drop_base.sql:
--------------------------------------------------------------------------------
1 | DECLARE
2 | l_plans_dropped PLS_INTEGER;
3 | BEGIN
4 |
5 | FOR REC IN (SELECT DISTINCT SQL_HANDLE FROM DBA_SQL_PLAN_BASELINES)
6 | LOOP
7 | L_PLANS_DROPPED := DBMS_SPM.DROP_SQL_PLAN_BASELINE (
8 | sql_handle => rec.sql_handle,
9 | PLAN_NAME => NULL);
10 | END LOOP;
11 |
12 | END;
13 | /
14 |
--------------------------------------------------------------------------------
/spm/inmem/part2/flush.sql:
--------------------------------------------------------------------------------
1 | alter system flush shared_pool;
2 |
--------------------------------------------------------------------------------
/spm/inmem/part2/in.sql:
--------------------------------------------------------------------------------
1 | ALTER TABLE mysales INMEMORY;
2 | ALTER SYSTEM FLUSH shared_pool;
3 |
--------------------------------------------------------------------------------
/spm/inmem/part2/load.sql:
--------------------------------------------------------------------------------
1 | DECLARE
2 | my_plans pls_integer;
3 | BEGIN
4 | my_plans := DBMS_SPM.LOAD_PLANS_FROM_CURSOR_CACHE(
5 | attribute_name => 'sql_text',
6 | attribute_value => '%SPM%');
7 | END;
8 | /
9 |
--------------------------------------------------------------------------------
/spm/inmem/part2/noin.sql:
--------------------------------------------------------------------------------
1 | ALTER TABLE mysales NO INMEMORY;
2 | ALTER SYSTEM flush shared_pool;
3 |
--------------------------------------------------------------------------------
/spm/inmem/part2/plan.sql:
--------------------------------------------------------------------------------
1 | set tab off
2 | SET LINESIZE 250
3 | SET PAGESIZE 500
4 | SELECT * FROM table(DBMS_XPLAN.DISPLAY_CURSOR());
5 |
--------------------------------------------------------------------------------
/spm/inmem/part2/seg.sql:
--------------------------------------------------------------------------------
1 | SELECT segment_name,
2 | bytes,
3 | inmemory_size
4 | FROM v$im_segments;
5 |
6 |
--------------------------------------------------------------------------------
/spm/inmem/part2/show.sql:
--------------------------------------------------------------------------------
1 | SELECT PLAN_TABLE_OUTPUT
2 | FROM
3 | TABLE(
4 | DBMS_XPLAN.DISPLAY_SQL_PLAN_BASELINE('&sql_handle','&plan_name','basic')
5 | ) t
6 | /
7 |
--------------------------------------------------------------------------------
/spm/inmem/part2/sql.sql:
--------------------------------------------------------------------------------
1 | select sql_text,is_shareable,is_bind_aware,child_number,sql_plan_baseline from v$sql
2 | where sql_text like '%SPM%';
3 |
--------------------------------------------------------------------------------
/spm/inmem/part2/t1.sql:
--------------------------------------------------------------------------------
1 | set tab off
2 | var val number
3 |
4 | exec :val := -1
5 | prompt val=-1
6 | select /* SPM */ count(*),sum(val) from mysales where sale_type = :val;
7 | @plan
8 |
9 | exec :val := 1
10 | prompt val=1
11 | select /* SPM */ count(*),sum(val) from mysales where sale_type = :val;
12 | @plan
13 |
--------------------------------------------------------------------------------
/spm/inmem/part2/tab.sql:
--------------------------------------------------------------------------------
1 | set linesize 200
2 | set tab off
3 | set pagesize 200
4 | drop table mysales;
5 |
6 | create table mysales (
7 | id number(10)
8 | ,sale_type number(10)
9 | ,txt varchar2(100)
10 | ,val number(10))
11 | /
12 |
13 | begin
14 | for i in 1..1500
15 | loop
16 | insert into mysales values (i,i,'X',i);
17 | end loop;
18 | commit;
19 | end;
20 | /
21 |
22 | begin
23 | for i in 1..20000
24 | loop
25 | insert into mysales values (i+1500,1,'XXXXXXXXXXXXXX',i);
26 | end loop;
27 | commit;
28 | end;
29 | /
30 |
31 | begin
32 | for i in 1..2000000
33 | loop
34 | insert into mysales values (i+50000,-1,'XXXXXXXXXXX',i);
35 | end loop;
36 | commit;
37 | end;
38 | /
39 |
40 | create index si on mysales(sale_type);
41 |
42 | execute dbms_stats.gather_table_stats(ownname=>null,tabname=>'MYSALES',method_opt=>'for columns sale_type');
43 |
44 | SELECT COLUMN_NAME, NOTES, HISTOGRAM
45 | FROM USER_TAB_COL_STATISTICS
46 | WHERE TABLE_NAME = 'MYSALES';
47 |
--------------------------------------------------------------------------------
/spm/inmem/part3/auto.sql:
--------------------------------------------------------------------------------
1 | alter system set optimizer_capture_sql_plan_baselines=TRUE scope=memory;
2 |
--------------------------------------------------------------------------------
/spm/inmem/part3/autoo.sql:
--------------------------------------------------------------------------------
1 | alter system set optimizer_capture_sql_plan_baselines=FALSE scope=memory;
2 |
--------------------------------------------------------------------------------
/spm/inmem/part3/base.sql:
--------------------------------------------------------------------------------
1 | set linesize 200
2 | column sql_text format a80
3 | column sql_handle format a20
4 | column plan_name format a35
5 | SELECT plan_name,sql_handle,sql_text,enabled, accepted
6 | FROM dba_sql_plan_baselines
7 | WHERE sql_text LIKE '%SPM%';
8 |
--------------------------------------------------------------------------------
/spm/inmem/part3/drop_base.sql:
--------------------------------------------------------------------------------
1 | DECLARE
2 | l_plans_dropped PLS_INTEGER;
3 | BEGIN
4 |
5 | FOR REC IN (SELECT DISTINCT SQL_HANDLE FROM DBA_SQL_PLAN_BASELINES)
6 | LOOP
7 | L_PLANS_DROPPED := DBMS_SPM.DROP_SQL_PLAN_BASELINE (
8 | sql_handle => rec.sql_handle,
9 | PLAN_NAME => NULL);
10 | END LOOP;
11 |
12 | END;
13 | /
14 |
--------------------------------------------------------------------------------
/spm/inmem/part3/evo2.sql:
--------------------------------------------------------------------------------
1 | set serveroutput on
2 | DECLARE
3 | cVal CLOB;
4 | BEGIN
5 | cVal := dbms_spm.evolve_sql_plan_baseline(sql_handle=>'&sql_handle',verify=>'NO');
6 | dbms_output.put_line(cVal);
7 | END;
8 | /
9 | set serveroutput off
10 |
--------------------------------------------------------------------------------
/spm/inmem/part3/flush.sql:
--------------------------------------------------------------------------------
1 | alter system flush shared_pool;
2 |
--------------------------------------------------------------------------------
/spm/inmem/part3/in.sql:
--------------------------------------------------------------------------------
1 | ALTER TABLE mysales INMEMORY;
2 | ALTER SYSTEM FLUSH shared_pool;
3 |
--------------------------------------------------------------------------------
/spm/inmem/part3/noin.sql:
--------------------------------------------------------------------------------
1 | ALTER TABLE mysales NO INMEMORY;
2 | ALTER SYSTEM flush shared_pool;
3 |
--------------------------------------------------------------------------------
/spm/inmem/part3/plan.sql:
--------------------------------------------------------------------------------
1 | set tab off
2 | set trims on
3 | SET LINESIZE 250
4 | SET PAGESIZE 500
5 | SELECT * FROM table(DBMS_XPLAN.DISPLAY_CURSOR);
6 |
--------------------------------------------------------------------------------
/spm/inmem/part3/q3.sql:
--------------------------------------------------------------------------------
1 | set tab off
2 | var val number
3 |
4 | select /* SPM */ count(*),sum(val) from mysales where sale_type in (2,3);
5 | @plan
6 |
--------------------------------------------------------------------------------
/spm/inmem/part3/seg.sql:
--------------------------------------------------------------------------------
1 | SELECT segment_name,
2 | bytes,
3 | inmemory_size
4 | FROM v$im_segments;
5 |
6 |
--------------------------------------------------------------------------------
/spm/inmem/part3/show.sql:
--------------------------------------------------------------------------------
1 | SELECT PLAN_TABLE_OUTPUT
2 | FROM V$SQL s, DBA_SQL_PLAN_BASELINES b,
3 | TABLE(
4 | DBMS_XPLAN.DISPLAY_SQL_PLAN_BASELINE(b.sql_handle,b.plan_name,'basic')
5 | ) t
6 | WHERE s.EXACT_MATCHING_SIGNATURE=b.SIGNATURE
7 | AND b.PLAN_NAME='&plan_name';
8 |
--------------------------------------------------------------------------------
/spm/inmem/part3/tab.sql:
--------------------------------------------------------------------------------
1 | set linesize 200
2 | set tab off
3 | set pagesize 200
4 | drop table mysales;
5 |
6 | create table mysales (
7 | id number(10)
8 | ,sale_type number(10)
9 | ,txt varchar2(100)
10 | ,val number(10))
11 | /
12 |
13 | begin
14 | for j in 1..20
15 | loop
16 | for i in 1..10000
17 | loop
18 | insert into mysales values ((i+200)*j,j,'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',i);
19 | end loop;
20 | end loop;
21 | commit;
22 | end;
23 | /
24 |
25 | create index si on mysales(sale_type);
26 |
27 | execute dbms_stats.gather_table_stats(ownname=>null,tabname=>'MYSALES');
28 |
--------------------------------------------------------------------------------
/spm_in_practice/README.md:
--------------------------------------------------------------------------------
1 | Utility Scripts for Using SPM in Practice
2 |
3 | The example.sql script will demonstrate the scripts in "util". You will need permission
4 | to create an Oracle user "spmtest". The user is created for you, so be
5 | sure to use a test environment when you experiment with this for the first time.
6 |
7 | Example output is shown in example.lst.
8 |
9 | For background, check out https://blogs.oracle.com/optimizer/entry/how_to_use_sql_plan
10 |
11 | DISCLAIMER:
12 |
-- These scripts are provided for educational purposes only.
13 |
-- They are NOT supported by Oracle World Wide Technical Support.
14 |
-- The scripts have been tested and they appear to work as intended.
15 |
-- You should always run scripts on a test instance.
16 |
17 |
--------------------------------------------------------------------------------
/spm_in_practice/bplan.sql:
--------------------------------------------------------------------------------
1 | SELECT *
2 | FROM TABLE(DBMS_XPLAN.DISPLAY_SQL_PLAN_BASELINE('&sql_handle',NULL)) t;
3 |
4 |
--------------------------------------------------------------------------------
/spm_in_practice/con:
--------------------------------------------------------------------------------
1 | sqlplus spmtest/spmtest
2 |
--------------------------------------------------------------------------------
/spm_in_practice/example.sql:
--------------------------------------------------------------------------------
1 | set echo on
2 | spool example
3 |
4 | alter system flush shared_pool;
5 |
6 | @user
7 |
8 | PROMPT
9 | PROMPT **** It is easy to drop SQL plan baselines for a specific schema...
10 | PROMPT
11 | @util/dropu SPMTEST
12 | @table
13 | @q1
14 |
15 | PROMPT **** Capture our query
16 |
17 | @load
18 |
19 | PROMPT **** Display the SQL Plan Baselines
20 |
21 | @util/listu SPMTEST
22 |
23 | PROMPT **** Press to continue...
24 | PAUSE
25 |
26 | PROMPT **** Display the plan for our SQL plan baseline
27 |
28 | var handle varchar2(50)
29 | begin
30 | select sql_handle into :handle from dba_sql_plan_baselines where parsing_schema_name = 'SPMTEST';
31 | end;
32 | /
33 |
34 | SELECT *
35 | FROM TABLE(DBMS_XPLAN.DISPLAY_SQL_PLAN_BASELINE(:handle,NULL)) t;
36 |
37 | PROMPT **** Note that the index is used in the exeution plan
38 | PROMPT
39 | PROMPT **** Press to continue...
40 | PAUSE
41 |
42 | PROMPT **** Execute the query again and confirm that it is using the SQL plan baseline
43 |
44 | @q1
45 | @util/plan
46 |
47 | PROMPT **** Note (above) that the SQL plan baseline is being used
48 | PROMPT
49 | PROMPT **** Press to continue...
50 | PAUSE
51 |
52 | PROMPT **** We do not expect to see non-matching SQL execution plans...
53 |
54 | @util/nomatchu SPMTEST
55 |
56 | PROMPT **** Query above returns no rows because all out queries
57 | PROMPT **** with SQL plan baselines are using them
58 | PROMPT **** Press to continue...
59 | PAUSE
60 |
61 | PROMPT **** Drop the index
62 |
63 | DROP INDEX tabi;
64 |
65 | PROMPT **** Press to continue...
66 | PAUSE
67 |
68 | PROMPT **** Execute the query again and confirm that it is NOT using the baselines
69 | PROMPT because the plan cannot be used - the index has gone
70 |
71 | @q1
72 | @q1
73 | @util/plan
74 |
75 | PROMPT **** Note (above) that the SQL plan baseline is NOT being used
76 | PROMPT **** because the index has gone.
77 | PROMPT **** Press to continue...
78 | PAUSE
79 |
80 | var planname varchar2(100)
81 | begin
82 | select plan_name into :planname from dba_sql_plan_baselines where parsing_schema_name = 'SPMTEST' and accepted = 'YES';
83 | end;
84 | /
85 |
86 | column hint format a100
87 | SELECT extractValue(value(h),'.') AS hint
88 | FROM sys.sqlobj$plan od,
89 | TABLE(xmlsequence(
90 | extract(xmltype(od.other_xml),'/*/outline_data/hint'))) h
91 | WHERE od.other_xml is not null
92 | AND (signature,category,obj_type,plan_id) = (select signature,
93 | category,
94 | obj_type,
95 | plan_id
96 | from sys.sqlobj$ so
97 | where so.name = :planname);
98 |
99 | PROMPT **** Above - the SQL plan baseline outline hints include an INDEX hint for the index we dropped.
100 | PROMPT **** The query is no longer able to obey this hint.
101 | PROMPT **** Press to continue...
102 | PAUSE
103 |
104 | PROMPT **** We now expect to find our problem query...
105 | @util/nomatchu SPMTEST
106 |
107 | PROMPT **** Above, we can see that a SQL statement with a SQL plan baseline
108 | PROMPT **** is not using the SQL plan baseline. In this case, it's because
109 | PROMPT **** we dropped the index so the accepted SQL plan baseline cannot be used.
110 | PROMPT **** Press to continue...
111 | PAUSE
112 |
113 | PROMPT **** We have captured a new plan in SQL plan history...
114 | @util/listu SPMTEST
115 | PROMPT **** Above, there are now two SQL plan history entries for our query. The new plan has not been accepted yet.
116 | PROMPT **** We can choose to evolve it if we wish and then the query will be under the control of SPM.
117 | PROMPT **** Press to continue...
118 | PAUSE
119 |
120 | PROMPT **** The query will show up as a candidate for evolution
121 | @util/evou SPMTEST
122 | PROMPT **** Above, we have identified a SQL plan history entry that is a candidate for evolving.
123 | PROMPT **** Press to continue...
124 | PAUSE
125 |
126 | PROMPT **** Evolve our SQL plan history entry...
127 | DECLARE
128 | ret CLOB;
129 | BEGIN
130 | ret := DBMS_SPM.EVOLVE_SQL_PLAN_BASELINE(sql_handle=>:handle, verify=>'NO');
131 | END;
132 | /
133 | PROMPT **** Press to continue...
134 | PAUSE
135 |
136 | @util/evou SPMTEST
137 | PROMPT **** Above, there are no longer candidates for evolving.
138 | PROMPT **** Press to continue...
139 | PAUSE
140 |
141 | @util/listu SPMTEST
142 | PROMPT **** Above, all out SQL plan baselines are accepted
143 | PROMPT **** Press to continue...
144 | PAUSE
145 |
146 | @q1
147 | @q1
148 | @q1
149 | @util/plan
150 |
151 | PROMPT **** Above, our query is using a SQL plan baseline again.
152 | PROMPT **** It's a full table scan this time because there is no index.
153 | PROMPT **** Press to continue...
154 | PAUSE
155 |
156 | @util/nomatchu SPMTEST
157 |
158 | PROMPT **** Now all the queries with SQL plan baselines are matching
159 | PROMPT **** successfully, so the above query returns no rows.
160 | PROMPT **** Press to continue...
161 | PAUSE
162 |
163 | spool off
164 |
--------------------------------------------------------------------------------
/spm_in_practice/load.sql:
--------------------------------------------------------------------------------
1 | DECLARE
2 | my_plans pls_integer;
3 | BEGIN
4 | my_plans := DBMS_SPM.LOAD_PLANS_FROM_CURSOR_CACHE(
5 | attribute_name => 'SQL_TEXT'
6 | ,attribute_value => '%MYSPMTEST%');
7 | END;
8 | /
9 |
--------------------------------------------------------------------------------
/spm_in_practice/q1.sql:
--------------------------------------------------------------------------------
1 | select /* MYSPMTEST */ count(*) from tab1 where id = 100;
2 |
--------------------------------------------------------------------------------
/spm_in_practice/table.sql:
--------------------------------------------------------------------------------
1 | DROP TABLE tab1 purge;
2 |
3 | CREATE TABLE tab1 (id, txt)
4 | AS SELECT level, CAST (to_char(level) AS varchar2(10)) FROM dual
5 | CONNECT BY level <= 10000
6 | /
7 |
8 | CREATE UNIQUE INDEX tabi ON tab1(id);
9 |
10 | EXEC dbms_stats.gather_table_stats(USER, 'tab1');
11 |
12 |
--------------------------------------------------------------------------------
/spm_in_practice/user.sql:
--------------------------------------------------------------------------------
1 | connect / as sysdba
2 |
3 | create user spmtest identified by spmtest;
4 | grant dba to spmtest;
5 |
6 | connect spmtest/spmtest
7 |
--------------------------------------------------------------------------------
/spm_in_practice/util/dropu.sql:
--------------------------------------------------------------------------------
1 | REM
2 | REM Drop SQL plan baselines for a specified user
3 | REM
4 |
5 | DECLARE
6 | l_plans_dropped PLS_INTEGER;
7 | BEGIN
8 | FOR REC IN (SELECT DISTINCT SQL_HANDLE FROM DBA_SQL_PLAN_BASELINES WHERE parsing_schema_name = '&1')
9 | LOOP
10 | L_PLANS_DROPPED := DBMS_SPM.DROP_SQL_PLAN_BASELINE (
11 | sql_handle => rec.sql_handle,
12 | PLAN_NAME => NULL);
13 | END LOOP;
14 |
15 | END;
16 | /
17 |
--------------------------------------------------------------------------------
/spm_in_practice/util/evo.sql:
--------------------------------------------------------------------------------
1 | REM
2 | REM Query SQL plan history for evolve candidates
3 | REM
4 |
5 | SELECT sql_text,
6 | signature,
7 | sql_handle,
8 | plan_name
9 | FROM dba_sql_plan_baselines
10 | WHERE enabled = 'YES'
11 | AND accepted = 'NO';
12 |
--------------------------------------------------------------------------------
/spm_in_practice/util/evou.sql:
--------------------------------------------------------------------------------
1 | REM
2 | REM Query SQL plan history for evolve candidates
3 | REM for a specified user
4 | REM
5 |
6 | SELECT sql_text,
7 | signature,
8 | sql_handle,
9 | plan_name
10 | FROM dba_sql_plan_baselines
11 | WHERE enabled = 'YES'
12 | AND accepted = 'NO'
13 | AND parsing_schema_name = '&1';
14 |
--------------------------------------------------------------------------------
/spm_in_practice/util/list.sql:
--------------------------------------------------------------------------------
1 | REM
2 | REM List SQL plan baselines
3 | REM
4 |
5 | set linesize 200
6 | set trims on
7 | set tab off
8 | column last_executed format a30
9 | column sql_text format a65
10 |
11 | select plan_name,sql_handle,sql_text, accepted, autopurge, last_executed, executions,ROWS_PROCESSED
12 | from dba_sql_plan_baselines
13 | order by last_executed
14 | /
15 |
--------------------------------------------------------------------------------
/spm_in_practice/util/listu.sql:
--------------------------------------------------------------------------------
1 | REM
2 | REM List SQL plan baselines for a specified user
3 | REM
4 |
5 | set linesize 200
6 | set trims on
7 | set tab off
8 | column last_executed format a30
9 | column sql_text format a40
10 | column sql_handle format a40
11 | column plan_name format a35
12 | column signature format 999999999999999999999
13 |
14 | select signature,plan_name,sql_handle,sql_text, accepted, enabled
15 | from dba_sql_plan_baselines
16 | where parsing_schema_name = '&1'
17 | /
18 |
--------------------------------------------------------------------------------
/spm_in_practice/util/noact.sql:
--------------------------------------------------------------------------------
1 | REM
2 | REM Identify "top SQL" where a SQL plan baseline exists
3 | REM but it is not active.
4 | REM
5 | REM This might not be intentional!
6 | REM
7 |
8 | SELECT sql_text,
9 | cpu_time,
10 | buffer_gets,
11 | executions,
12 | parsing_schema_name,
13 | exact_matching_signature
14 | FROM v$sql v
15 | WHERE executions>0
16 | AND sql_plan_baseline IS NULL
17 | AND parsing_schema_name != 'SYS'
18 | AND EXISTS (SELECT 1
19 | FROM dba_sql_plan_baselines
20 | WHERE signature = v.exact_matching_signature)
21 | AND NOT EXISTS (SELECT 1
22 | FROM dba_sql_plan_baselines
23 | WHERE signature = v.exact_matching_signature
24 | AND accepted = 'YES'
25 | AND enabled = 'YES')
26 | ORDER BY cpu_time;
27 |
28 |
--------------------------------------------------------------------------------
/spm_in_practice/util/nomatch.sql:
--------------------------------------------------------------------------------
1 | REM
2 | REM List SQL statements in the cursor cache that are not
3 | REM matching a SQL plan baseline but a SQL plan baseline exists.
4 | REM
5 | REM You may want to investigate these cases because it implies
6 | REM that there may have been a change preventing the plan in the
7 | REM SQL plan baseline from being used.
8 | REM
9 |
10 | set linesize 200
11 | set trims on
12 | set tab off
13 | column sql_text format a50
14 | column parsing_schema_name format a30
15 | column exact_matching_signature format 99999999999999999999
16 |
17 | SELECT sql_text,
18 | cpu_time,
19 | buffer_gets,
20 | executions,
21 | parsing_schema_name,
22 | sql_id,
23 | exact_matching_signature
24 | FROM v$sql v
25 | WHERE executions>0
26 | AND sql_plan_baseline IS NULL
27 | AND EXISTS (SELECT 1
28 | FROM dba_sql_plan_baselines
29 | WHERE signature = v.exact_matching_signature
30 | AND accepted = 'YES'
31 | AND enabled = 'YES')
32 | ORDER BY cpu_time;
33 |
--------------------------------------------------------------------------------
/spm_in_practice/util/nomatchu.sql:
--------------------------------------------------------------------------------
1 | REM
2 | REM List SQL statements in the cursor cache that are not
3 | REM matching a SQL plan baseline but a SQL plan baseline exists.
4 | REM
5 | REM You may want to investigate these cases because it implies
6 | REM that there may have been a change preventing the plan in the
7 | REM SQL plan baseline from being used.
8 | REM
9 | REM This query allows you to specify a particular schema.
10 | REM
11 |
12 | set linesize 200
13 | set trims on
14 | set tab off
15 | column sql_text format a50
16 | column parsing_schema_name format a30
17 | column exact_matching_signature format 99999999999999999999
18 |
19 | SELECT sql_text,
20 | cpu_time,
21 | buffer_gets,
22 | executions,
23 | parsing_schema_name,
24 | sql_id,
25 | exact_matching_signature
26 | FROM v$sql v
27 | WHERE executions>0
28 | AND sql_plan_baseline IS NULL
29 | AND EXISTS (SELECT 1
30 | FROM dba_sql_plan_baselines
31 | WHERE signature = v.exact_matching_signature
32 | AND accepted = 'YES'
33 | AND enabled = 'YES'
34 | AND parsing_schema_name = '&1')
35 | ORDER BY cpu_time;
36 |
--------------------------------------------------------------------------------
/spm_in_practice/util/plan.sql:
--------------------------------------------------------------------------------
1 | set linesize 200
2 | set tab off
3 | set pagesize 1000
4 | column plan_table_output format a180
5 |
6 | SELECT *
7 | FROM table(DBMS_XPLAN.DISPLAY_CURSOR(FORMAT=>'LAST'));
8 |
--------------------------------------------------------------------------------
/spm_in_practice/util/spmhint.sql:
--------------------------------------------------------------------------------
1 | REM
2 | REM Show the outline hints for SQL plan baseline
3 | REM
4 |
5 | column hint format a100
6 |
7 | SELECT extractValue(value(h),'.') AS hint
8 | FROM sys.sqlobj$plan od,
9 | TABLE(xmlsequence(
10 | extract(xmltype(od.other_xml),'/*/outline_data/hint'))) h
11 | WHERE od.other_xml is not null
12 | AND (signature,category,obj_type,plan_id) = (select signature,
13 | category,
14 | obj_type,
15 | plan_id
16 | from sys.sqlobj$ so
17 | where so.name = '&plan_name');
18 |
--------------------------------------------------------------------------------
/spm_in_practice/util/top.sql:
--------------------------------------------------------------------------------
1 | REM
2 | REM Identify "top SQL" not using a SQL plan baseline.
3 | REM You may wish to modify to focus on other metrics like
4 | REM executions or IO.
5 | REM
6 |
7 | SELECT sql_text,
8 | cpu_time,
9 | buffer_gets,
10 | executions,
11 | parsing_schema_name,
12 | sql_id,
13 | exact_matching_signature
14 | FROM v$sql
15 | WHERE sql_plan_baseline IS NULL
16 | AND executions>0
17 | AND parsing_schema_name != 'SYS'
18 | ORDER BY cpu_time;
19 |
20 |
--------------------------------------------------------------------------------
/spm_on_11g/README.md:
--------------------------------------------------------------------------------
1 | SQL Plan Management Examples
2 |
3 | Some scripts on the subject of SQL Plan Management in Oracle 11gR2. I have include spooled output in "lst" files so you can see how the scripts can be used. View in order: "introduction", "evolve" and "sts".
4 |
5 |
6 | DISCLAIMER:
7 |
-- These scripts are provided for educational purposes only.
8 |
-- They are NOT supported by Oracle World Wide Technical Support.
9 |
-- The scripts have been tested and they appear to work as intended.
10 |
-- You should always run scripts on a test instance.
11 |
12 | WARNING:
13 |
-- "drop.sql" will drop all SQL Plan Baselines
14 |
15 |
16 |
--------------------------------------------------------------------------------
/spm_on_11g/base.sql:
--------------------------------------------------------------------------------
1 | --
2 | -- List SQL plan baselines
3 | --
4 | column enabled format a10
5 | column sql_text format a70
6 | column plan_name format a40
7 | column sql_handle format a25
8 | column accepted format a10
9 | set linesize 200
10 | SELECT sql_text,sql_handle, plan_name, enabled, accepted
11 | FROM dba_sql_plan_baselines
12 | order by sql_handle
13 | /
14 |
--------------------------------------------------------------------------------
/spm_on_11g/con:
--------------------------------------------------------------------------------
1 | sqlplus adhoc/adhoc
2 |
--------------------------------------------------------------------------------
/spm_on_11g/drop.sql:
--------------------------------------------------------------------------------
1 | --
2 | --
3 | -- Drop ALL baselines!!
4 | --
5 | --
6 | DECLARE
7 | l_plans_dropped PLS_INTEGER;
8 | BEGIN
9 |
10 | FOR REC IN (SELECT DISTINCT SQL_HANDLE FROM DBA_SQL_PLAN_BASELINES)
11 | LOOP
12 | L_PLANS_DROPPED := DBMS_SPM.DROP_SQL_PLAN_BASELINE (
13 | sql_handle => rec.sql_handle,
14 | PLAN_NAME => NULL);
15 | END LOOP;
16 |
17 | END;
18 | /
19 |
--------------------------------------------------------------------------------
/spm_on_11g/evo.sql:
--------------------------------------------------------------------------------
1 | set linesize 10000
2 | set trims on
3 |
4 | set serveroutput on
5 | DECLARE
6 | cVal CLOB;
7 | BEGIN
8 | cVal := dbms_spm.evolve_sql_plan_baseline();
9 | dbms_output.put_line(cVal);
10 | END;
11 | /
12 |
13 | set linesize 250
14 |
--------------------------------------------------------------------------------
/spm_on_11g/flush.sql:
--------------------------------------------------------------------------------
1 | --
2 | -- Clean out the shared sql area to "start again"
3 | --
4 | alter system flush shared_pool;
5 |
--------------------------------------------------------------------------------
/spm_on_11g/loadc.sql:
--------------------------------------------------------------------------------
1 | --
2 | -- Create SQL plan baselines from SQL in the shared pool
3 | -- - containing the string "SPM_DEMO"
4 | --
5 | DECLARE
6 | my_plans pls_integer;
7 | BEGIN
8 | my_plans := DBMS_SPM.LOAD_PLANS_FROM_CURSOR_CACHE(
9 | attribute_name => 'SQL_TEXT', attribute_value => '%SPM_DEMO%');
10 | END;
11 | /
12 |
--------------------------------------------------------------------------------
/spm_on_11g/loads.sql:
--------------------------------------------------------------------------------
1 | --
2 | -- Load baselinese from a SQL Tuning Set
3 | --
4 | DECLARE
5 | my_plans pls_integer;
6 | BEGIN
7 | my_plans := DBMS_SPM.LOAD_PLANS_FROM_SQLSET('my_workload');
8 | END;
9 | /
10 |
--------------------------------------------------------------------------------
/spm_on_11g/make_tab.sql:
--------------------------------------------------------------------------------
1 | drop table sales;
2 |
3 | create table sales (
4 | id number(10)
5 | ,txt varchar2(100)
6 | ,val number(10))
7 | /
8 |
9 | begin
10 | for i in 1..20
11 | loop
12 | insert into sales values (i,'XX',i);
13 | end loop;
14 | commit;
15 | end;
16 | /
17 |
18 | begin
19 | for i in 1..600000
20 | loop
21 | insert into sales values (-1,'XX',i);
22 | end loop;
23 | commit;
24 | end;
25 | /
26 |
27 | create index si on sales(id);
28 |
29 | execute dbms_stats.gather_table_stats(ownname=>null,tabname=>'SALES',method_opt=>'for all indexed columns size 254');
30 |
31 |
--------------------------------------------------------------------------------
/spm_on_11g/makeset.sql:
--------------------------------------------------------------------------------
1 | --
2 | -- Loads SQL from shared pool into a SQL tuning set
3 | -- Note: Requires Diagnostic and Tuning Pack Licence
4 | --
5 | BEGIN
6 | DBMS_SQLTUNE.DROP_SQLSET( sqlset_name => 'my_workload' );
7 | END;
8 | /
9 |
10 | BEGIN
11 | DBMS_SQLTUNE.CREATE_SQLSET(
12 | sqlset_name => 'my_workload',
13 | description => 'My workload');
14 | END;
15 | /
16 |
17 | DECLARE
18 | c_sqlarea_cursor DBMS_SQLTUNE.SQLSET_CURSOR;
19 | BEGIN
20 | OPEN c_sqlarea_cursor FOR
21 | SELECT VALUE(p)
22 | FROM TABLE(
23 | DBMS_SQLTUNE.SELECT_CURSOR_CACHE(
24 | ' parsing_schema_name = ''ADHOC'' AND sql_text like ''%SPM_DEMO%'' ')
25 | ) p;
26 | -- load the tuning set
27 | DBMS_SQLTUNE.LOAD_SQLSET (
28 | sqlset_name => 'my_workload'
29 | , populate_cursor => c_sqlarea_cursor
30 | );
31 | END;
32 | /
33 |
34 | COLUMN SQL_TEXT FORMAT a30
35 | COLUMN SCH FORMAT a3
36 | COLUMN ELAPSED FORMAT 999999999
37 |
38 | column sql_text format a80
39 | set pagesize 1000
40 | SELECT SQL_ID, SQL_TEXT,
41 | ELAPSED_TIME AS "ELAPSED", BUFFER_GETS
42 | FROM TABLE( DBMS_SQLTUNE.SELECT_SQLSET( 'my_workload' ) ) ;
43 |
--------------------------------------------------------------------------------
/spm_on_11g/p.sql:
--------------------------------------------------------------------------------
1 | set linesize 130
2 | set pagesize 1000
3 | SELECT *
4 | FROM TABLE(DBMS_XPLAN.DISPLAY_CURSOR());
5 |
--------------------------------------------------------------------------------
/spm_on_11g/plan.sql:
--------------------------------------------------------------------------------
1 | set linesize 130
2 | set pagesize 1000
3 | SELECT *
4 | FROM TABLE(DBMS_XPLAN.DISPLAY_CURSOR('&sql_id',&child));
5 |
--------------------------------------------------------------------------------
/spm_on_11g/q1.sql:
--------------------------------------------------------------------------------
1 | set echo on
2 |
3 | var val number
4 |
5 | exec :val := -1
6 | select /* SPM_DEMO */ sum(val) from sales where id = :val;
7 | select /* SPM_DEMO */ sum(val) from sales where id = :val;
8 | select /* SPM_DEMO */ sum(val) from sales where id = :val;
9 | select /* SPM_DEMO */ sum(val) from sales where id = :val;
10 | select /* SPM_DEMO */ sum(val) from sales where id = :val;
11 | select /* SPM_DEMO */ sum(val) from sales where id = :val;
12 | select /* SPM_DEMO */ sum(val) from sales where id = :val;
13 | select /* SPM_DEMO */ sum(val) from sales where id = :val;
14 | select /* SPM_DEMO */ sum(val) from sales where id = :val;
15 | @p
16 |
--------------------------------------------------------------------------------
/spm_on_11g/q2.sql:
--------------------------------------------------------------------------------
1 | set echo on
2 |
3 | var val number
4 |
5 | exec :val := 1
6 | select /* SPM_DEMO */ sum(val) from sales where id = :val;
7 | select /* SPM_DEMO */ sum(val) from sales where id = :val;
8 | select /* SPM_DEMO */ sum(val) from sales where id = :val;
9 | select /* SPM_DEMO */ sum(val) from sales where id = :val;
10 | select /* SPM_DEMO */ sum(val) from sales where id = :val;
11 | select /* SPM_DEMO */ sum(val) from sales where id = :val;
12 | select /* SPM_DEMO */ sum(val) from sales where id = :val;
13 | select /* SPM_DEMO */ sum(val) from sales where id = :val;
14 | select /* SPM_DEMO */ sum(val) from sales where id = :val;
15 | select /* SPM_DEMO */ sum(val) from sales where id = :val;
16 | select /* SPM_DEMO */ sum(val) from sales where id = :val;
17 | @p
18 |
--------------------------------------------------------------------------------
/spm_on_11g/q3.sql:
--------------------------------------------------------------------------------
1 | select /* SPM_DEMO */ max(val) from sales;
2 | select /* SPM_DEMO */ max(val) from sales;
3 | select /* SPM_DEMO */ max(val) from sales;
4 | select /* SPM_DEMO */ max(val) from sales;
5 | select /* SPM_DEMO */ max(val) from sales;
6 | select /* SPM_DEMO */ max(val) from sales;
7 | select /* SPM_DEMO */ max(val) from sales;
8 |
9 | @p
10 |
--------------------------------------------------------------------------------
/spm_on_11g/show.sql:
--------------------------------------------------------------------------------
1 | column PLAN_TABLE_OUTPUT format a180
2 | SELECT *
3 | FROM TABLE(
4 | DBMS_XPLAN.DISPLAY_SQL_PLAN_BASELINE('&sql_handle','&plan_name','basic')
5 | ) t
6 | /
7 |
--------------------------------------------------------------------------------
/spm_on_11g/sig.sql:
--------------------------------------------------------------------------------
1 | --
2 | -- List SQL plan baselines
3 | --
4 | column enabled format a10
5 | column sql_text format a70
6 | column plan_name format a40
7 | column sql_handle format a25
8 | column accepted format a10
9 | column signature format 999999999999999999999999
10 | set linesize 200
11 | SELECT sql_text,sql_handle, plan_name, signature
12 | FROM dba_sql_plan_baselines
13 | order by sql_handle
14 | /
15 |
--------------------------------------------------------------------------------
/spm_on_11g/sql.sql:
--------------------------------------------------------------------------------
1 | --
2 | -- Take a look at the shared SQL areas cached in the shared pool
3 | --
4 | --
5 | set trims on
6 | set linesize 250
7 | column sql_text format a70
8 | column is_bind_sensitive format a20
9 | column is_bind_aware format a20
10 | column is_shareable format a20
11 | select sql_id,child_number,is_shareable,sql_text, executions,
12 | is_bind_sensitive, is_bind_aware
13 | from v$sql
14 | where sql_text like '%sales%' order by 1, child_number;
15 |
--------------------------------------------------------------------------------
/spm_on_11g/tab.sql:
--------------------------------------------------------------------------------
1 | --
2 | -- Create a SALES table with data skew
3 | -- and force the creation of a histogram
4 | --
5 | drop table sales;
6 |
7 | create table sales (
8 | id number(10)
9 | ,txt varchar2(100)
10 | ,val number(10))
11 | /
12 |
13 | begin
14 | for i in 1..20
15 | loop
16 | insert into sales values (i,'XX',i);
17 | end loop;
18 | commit;
19 | end;
20 | /
21 |
22 | begin
23 | for i in 1..600000
24 | loop
25 | insert into sales values (-1,'XX',i);
26 | end loop;
27 | commit;
28 | end;
29 | /
30 |
31 | create index si on sales(id);
32 |
33 | execute dbms_stats.gather_table_stats(ownname=>null,tabname=>'SALES',method_opt=>'for all indexed columns size 254');
34 |
35 |
--------------------------------------------------------------------------------
/spm_on_11g/user.sql:
--------------------------------------------------------------------------------
1 | connect / as sysdba
2 | create user adhoc identified by adhoc;
3 | grant dba to adhoc;
4 |
--------------------------------------------------------------------------------
/tpcds_etabs/README.md:
--------------------------------------------------------------------------------
1 | Make external tables to load TPC-DS data
2 |
3 | Run the tpcds_makex.sql in your TPC-DS schema at it will spool a new script called tpcds_etabs.sql. This spooled script will create a bunch of external tables enabling you to load the TPC-DS files generated by dsdgen.
4 |
5 | The example tpcds_etabs.sql script works for TPC-DS v1.4. For other versions, just run tpcds_makex.sql to create a new tpcds_etabs.sql file.
6 |
7 | If you leave the script unedited, you just create a symbolic link "/tmp/tpcdsload" pointing to the directory containing all of your dsdgen files.
8 |
9 | The external tables are named "X_tpcds_table_name" - so take care if you have any tables called "X_something" as these might be dropped when you run tpcds_etabs.sql.
10 |
--------------------------------------------------------------------------------
/tpcds_etabs/tpcds_makex.sql:
--------------------------------------------------------------------------------
1 | set serveroutput on
2 | set feedback off
3 | set echo off
4 | set termout off
5 |
6 | spool tpcds_etabs.sql
7 |
8 | declare
9 | sep char(1);
10 | datat user_tab_columns.data_type%type;
11 | begin
12 | dbms_output.put_line('create or replace directory tpcsd_load_dir as ''/tmp/tpcdsload'';');
13 | for tabl in (select table_name from user_tables where table_name not in (select table_name from user_external_tables) and substr(table_name,1,2) <> 'S_' order by 1)
14 | loop
15 | dbms_output.put_line('-------------------------------------------------------------------------------');
16 | dbms_output.put_line('drop table X_'||tabl.table_name||';');
17 | dbms_output.put_line('create table X_'||tabl.table_name||' (');
18 | sep := ' ';
19 | for tabc in (select * from user_tab_columns where table_name = tabl.table_name order by column_id)
20 | loop
21 | datat := tabc.data_type;
22 | if (datat = 'CHAR' or datat = 'VARCHAR2')
23 | then
24 | datat := datat||'('||tabc.data_length||')';
25 | end if;
26 | if (datat = 'NUMBER')
27 | then
28 | if (tabc.data_precision is null)
29 | then
30 | datat := datat||'(38)';
31 | else
32 | datat := datat||'('||tabc.data_precision||','||tabc.data_scale||')';
33 | end if;
34 | end if;
35 | dbms_output.put_line(sep||tabc.column_name||' '||datat);
36 | sep := ',';
37 | end loop;
38 | sep := ' ';
39 | dbms_output.put_line(')');
40 | dbms_output.put_line('organization external (type oracle_loader default directory tpcsd_load_dir access parameters (');
41 | dbms_output.put_line('RECORDS DELIMITED BY NEWLINE');
42 | dbms_output.put_line('FIELDS TERMINATED BY ''|''');
43 | dbms_output.put_line('(');
44 | for tabc in (select * from user_tab_columns where table_name = tabl.table_name order by column_id)
45 | loop
46 | if (tabc.data_type = 'DATE')
47 | then
48 | dbms_output.put_line(sep||tabc.column_name||' date "YYYY-MM-DD"');
49 | else
50 | dbms_output.put_line(sep||tabc.column_name);
51 | end if;
52 | sep := ',';
53 | end loop;
54 | dbms_output.put_line(')) location (''' || lower(tabl.table_name) ||'.dat'')');
55 | dbms_output.put_line(');');
56 | end loop;
57 | end;
58 | /
59 |
60 | spool off
61 |
--------------------------------------------------------------------------------
/upgrading_to_12c/README.md:
--------------------------------------------------------------------------------
1 | # The Optimizer and Upgrading to Oracle Database 12c
2 |
3 | If you are upgrading to Oracle Database 12c, you need to be aware that the Oracle Optimizer stores certain metadata to support its adaptive features. This can affect the way a DBA manages statistics during testing and upgrade. Certain optimizer statistics are created in response to the optimizer metadata to improve cardinality estimates over time. Specifically, these are *histograms* (in response to column usage information) and *column group statistics* (in response to SQL plan directives).
4 |
5 | You need to be aware of changes to optimizer statistics and metadata so that you can manage the optimizer successfully during an upgrade.
6 |
7 | You may need to:
8 |
9 | * Understand exactly what histograms and extended column group statistics are present in the database. Some may have been created automatically and (for databases that host complex and/or multiple applications) there can be a variety of strategies in use to maintain these entities.
10 | * Copy statistics from one database environment to another. For example, from pre-production to production. Even if these two environments have different data, it may be deemed beneficial to use the same histograms and column group statistics in production to those found to be useful in pre-production.
11 | * Copy histogram definitions from one database to another (or from one schema to another) so that you have a consistent set of statistics.
12 | * Copy extended statistic definitions from one database to another (or from one schema to another) so that you have a consistent set of statistics.
13 | * Reliably copy ALL relevant optimizer statistics from one database (or schema) to another.
14 | * Use a specific and consistent set of histograms for a period of time after an upgrade (i.e. use tailored *method_opt* settings rather METHOD_OPT=>'FOR ALL COLUMNS SIZE AUTO').
15 |
16 | In the directories below are some *experimental* scripts to help you manage statistics. The aim here is for you to take and adapt them to you own needs. They are broken down into three categories:
17 |
18 | ### [show_stats](https://github.com/oracle/dw-vldb/tree/master/upgrading_to_12c/show_stats)
19 |
20 | These scripts demonstrate how to view extended statistics, histograms, SQL plan directives and column usage information. They also demonstrate how you can see which histograms and extended statistics have been created automatically.
21 |
22 | ### [duplicate](https://github.com/oracle/dw-vldb/tree/master/upgrading_to_12c/duplicate)
23 |
24 | These scripts query a database schema to spool scripts that can be used to create matching histograms and extended statistics on another database.
25 |
26 | ### [dpump_copy](https://github.com/oracle/dw-vldb/tree/master/upgrading_to_12c/dpump_copy)
27 |
28 | These scripts demonstrate how easy it is to use Data Pump to copy all relevant statistics from one database schema to another.
29 |
30 | ### Note
31 |
32 | All of the scripts are designed to work with Oracle Database 12c Release 1. Expect further updates when a new Oracle Database release becomes available.
33 |
34 | ### DISCLAIMER
35 |
36 | * These scripts are provided for educational purposes only.
37 | * They are NOT supported by Oracle World Wide Technical Support.
38 | * The scripts have been tested and they appear to work as intended.
39 | * You should always run scripts on a test instance.
40 |
41 | ### WARNING
42 |
43 | * These scripts drop and create user accounts. For use on test databases. DBA access is required.
44 |
--------------------------------------------------------------------------------
/upgrading_to_12c/dpump_copy/README.md:
--------------------------------------------------------------------------------
1 | # Copying Statistics
2 |
3 | When you copy statistics from one database schema to another, you need to consider:
4 |
5 | - Base statistics information for tables and indexes (e.g. NUM_ROWS)
6 | - Extended statistics
7 | - Histograms
8 | - Column usage information
9 | - Individual table DBMS_STATS preferences
10 | - SQL plan directives
11 |
12 | Luckily it is very easy to use Data Pump to achieve this aim. In this directory is a self-contained example to create a schema "S1" with a full compliment of statistics and a schema "S2" with nothing but index statistics. The Data Pump example copies all statistic and metadata from S1 to S2.
13 |
14 | To run the example:
15 |
16 | ```
17 | $ sqlplus / as sysdba
18 | SQL> @users
19 | SQL> @look -- This lists statistics information and metadata before it has been copied to S2
20 | SQL> exit
21 | $ ./dp_copy -- Using Data Pump to copy all relevant metadata from S1 to S2
22 | $ sqlplus / as sysdba
23 | SQL> @look -- This lists statistics information and metadata after it has been copied to S2
24 | SQL> @gather_s2.sql -- S2 has a slightly different row count to S1, so this is corrected when stats are regathered.
25 | -- The relevant histograms, extended statistics, SQL plan directives and so on are retained.
26 | ```
27 | Example *lst* and *log* files are included so you can see the expected results.
28 |
29 | Notice how all of the statistics and metadata is copied across from S1 to S2.
30 |
31 | ### DISCLAIMER
32 |
33 | * These scripts are provided for educational purposes only.
34 | * They are NOT supported by Oracle World Wide Technical Support.
35 | * The scripts have been tested and they appear to work as intended.
36 | * You should always run scripts on a test instance.
37 |
38 | ### WARNING
39 |
40 | * These scripts drop and create user accounts. For use on test databases. DBA access is required.
41 |
--------------------------------------------------------------------------------
/upgrading_to_12c/dpump_copy/dp_copy:
--------------------------------------------------------------------------------
1 | expdp s1/s1 dumpfile=expdat.dmp content=metadata_only include=statistics
2 | if [ $? -eq 0 ]
3 | then
4 | impdp s2/s2 dumpfile=expdat.dmp table_exists_action=skip include=statistics remap_schema=s1:s2
5 | else
6 | echo "Error detected. Import skipped."
7 | fi
8 |
--------------------------------------------------------------------------------
/upgrading_to_12c/dpump_copy/dp_copy.log:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oracle/dw-vldb-samples/7c780e3f76baf6e0a904869658e074fb95bab986/upgrading_to_12c/dpump_copy/dp_copy.log
--------------------------------------------------------------------------------
/upgrading_to_12c/dpump_copy/gather_s2.sql:
--------------------------------------------------------------------------------
1 | exec dbms_stats.gather_table_stats(ownname=>'s2',tabname=>'t1',method_opt=>'for all columns size auto');
2 |
--------------------------------------------------------------------------------
/upgrading_to_12c/dpump_copy/look.sql:
--------------------------------------------------------------------------------
1 | set pagesize 1000
2 | set linesize 200
3 | column table_name format a30
4 | column index_name format a30
5 | column owner format a30
6 | column column_name format a30
7 | column tcb_pref format a10
8 | column cuse format a100 wrapped
9 | column analyzed format a19
10 | column extension format a20
11 | break on owner
12 |
13 | prompt =======================================================================
14 | prompt This script displays the various metadata associated with optimizer
15 | prompt statistics and adaptive features.
16 | prompt Specifically:
17 | prompt Stats informaiton in DBA_TABLES
18 | prompt Stats information in DBA_INDEXES
19 | prompt Extended statistics
20 | prompt Histograms
21 | prompt Column usage information
22 | prompt Table preferences
23 | prompt SQL plan directives
24 | prompt =======================================================================
25 |
26 | prompt Compare analyzed dates between S1 and S2 for tables
27 | prompt S1.T1 and S2.T2 have slightly different row counts. The difference will
28 | prompt be apparent in the statistics until the statistics are copied from S1 to S2.
29 | prompt ============================================================================
30 | select table_name,owner,to_char(last_analyzed,'YYYY-MM-DD HH24:MI:SS') analyzed, num_rows
31 | from dba_tables
32 | where owner in ('S1','S2');
33 |
34 | prompt Compare analyzed dates between S1 and S2 for indexes
35 | prompt ====================================================
36 | select table_name,index_name,owner,to_char(last_analyzed,'YYYY-MM-DD HH24:MI:SS') analyzed
37 | from dba_indexes
38 | where owner in ('S1','S2');
39 |
40 | prompt Extended stats. Compare user S1 with S2...
41 | prompt ==========================================
42 | select owner,table_name,extension
43 | from dba_stat_extensions
44 | where owner in ('S1','S2')
45 | order by owner,table_name;
46 |
47 | prompt Histograms. Compare user S1 with S2...
48 | prompt ======================================
49 | select owner,table_name,column_name,histogram
50 | from dba_tab_col_statistics
51 | where owner in ('S1','S2')
52 | order by owner,table_name,column_name;
53 |
54 | prompt Number of COL_USAGE$ entries for S1
55 | prompt ===================================
56 | select count(*) from sys.col_usage$
57 | where obj# = (select object_id from dba_objects where object_name = 'T1' and owner = 'S1');
58 | prompt Number of COL_USAGE$ entries for S2
59 | prompt ===================================
60 | select count(*) from sys.col_usage$
61 | where obj# = (select object_id from dba_objects where object_name = 'T1' and owner = 'S2');
62 |
63 | set long 1000000
64 | prompt Column usage report for S1.T1
65 | prompt =============================
66 | select dbms_stats.report_col_usage(ownname=>'s1',tabname=>'t1') cuse from dual;
67 | prompt Column usage report for S2.T1
68 | prompt =============================
69 | select dbms_stats.report_col_usage(ownname=>'s2',tabname=>'t1') cuse from dual;
70 |
71 | prompt S1.T1 TABLE_CACHED_BLOCKS preference
72 | prompt ====================================
73 | select dbms_stats.get_prefs ('TABLE_CACHED_BLOCKS','s1','t1') tcb_pref from dual;
74 | prompt S2.T1 TABLE_CACHED_BLOCKS preference (should match S1.T1 once copied)
75 | prompt =====================================================================
76 | select dbms_stats.get_prefs ('TABLE_CACHED_BLOCKS','s2','t1') tcb_pref from dual;
77 |
78 |
79 | PROMPT SQL plan directives
80 | PROMPT ===================
81 | exec dbms_spd.flush_sql_plan_directive;
82 |
83 | COLUMN dir_id FORMAT A20
84 | COLUMN owner FORMAT A10
85 | COLUMN object_name FORMAT A10
86 | COLUMN col_name FORMAT A10
87 |
88 | SELECT o.owner, o.object_name,
89 | o.subobject_name col_name, o.object_type, d.type, d.state, d.reason
90 | FROM dba_sql_plan_directives d, dba_sql_plan_dir_objects o
91 | WHERE d.directive_id=o.directive_id
92 | AND o.owner in ('S1','S2')
93 | ORDER BY 1,2,3,4,5;
94 |
95 |
--------------------------------------------------------------------------------
/upgrading_to_12c/dpump_copy/users.sql:
--------------------------------------------------------------------------------
1 | connect / as sysdba
2 |
3 | drop user s1 cascade;
4 | drop user s2 cascade;
5 |
6 | create user s1 identified by s1;
7 | grant dba to s1;
8 |
9 | create user s2 identified by s2;
10 | grant dba to s2;
11 |
12 | connect s1/s1
13 | create table t1 (id number(10),c1 varchar2(15),c2 varchar2(10),c3 varchar2(10));
14 | insert into t1 values (1,'X','X','A');
15 | insert into t1 values (2,'Y','Y','B');
16 | insert into t1 values (3,'Z','Z','C');
17 | begin
18 | for i in 1..10000
19 | loop
20 | insert into t1 values (4,'W'||i,'W'||i,'D');
21 | end loop;
22 | end;
23 | /
24 | commit;
25 | create index t1i on t1 (c1);
26 | --
27 | -- This query will generate column usage information for S1.T1
28 | -- so we should get a histogram when we gather stats with
29 | -- FOR ALL COLUMNS SIZE AUTO
30 | --
31 | select count(*) from t1 where c1 between 'W1' and 'W5';
32 | --
33 | -- Create extended stats
34 | --
35 | select dbms_stats.create_extended_stats(user,'t1','(c1,c2)') from dual;
36 | --
37 | -- Gather stats on S1.T1
38 | --
39 | exec dbms_stats.gather_table_stats(ownname=>null,tabname=>'t1',method_opt=>'for all columns size auto');
40 | --
41 | -- Set a table preference to check that it is copied from one schema to another
42 | --
43 | select dbms_stats.get_prefs ('TABLE_CACHED_BLOCKS','s1','t1') from dual;
44 | exec dbms_stats.set_table_prefs ('s1','t1','TABLE_CACHED_BLOCKS',50)
45 | select dbms_stats.get_prefs ('TABLE_CACHED_BLOCKS','s1','t1') from dual;
46 |
47 | connect s2/s2
48 | --
49 | -- Create S2.T1 with a slightly different number of rows
50 | --
51 | create table t1 (id number(10),c1 varchar2(15),c2 varchar2(10),c3 varchar2(10));
52 | insert into t1 values (1,'X','X','A');
53 | insert into t1 values (2,'Y','Y','B');
54 | insert into t1 values (3,'Z','Z','C');
55 | begin
56 | for i in 1..10100
57 | loop
58 | insert into t1 values (4,'W'||i,'W'||i,'D');
59 | end loop;
60 | end;
61 | /
62 | commit;
63 | --
64 | -- We'll get stats on the index, but we won't be gathering stats
65 | -- on the S2.T1 table because we want to copy everything from S1.T1
66 | --
67 | create index t1i on t1 (c1);
68 |
69 | --
70 | -- Let's create some SQL plan directives
71 | --
72 | connect s1/s1
73 |
74 | CREATE TABLE spdtab (
75 | id NUMBER,
76 | col1 VARCHAR2(1),
77 | col2 VARCHAR2(1)
78 | );
79 |
80 | INSERT INTO spdtab
81 | SELECT level, 'A', 'B'
82 | FROM dual
83 | CONNECT BY level <= 10;
84 | COMMIT;
85 |
86 | INSERT INTO spdtab
87 | SELECT 10+level, 'C', 'D'
88 | FROM dual
89 | CONNECT BY level <= 90;
90 | COMMIT;
91 |
92 | EXEC DBMS_STATS.gather_table_stats(USER, 'SPDTAB');
93 |
94 | SELECT *
95 | FROM spdtab
96 | WHERE col1 = 'A'
97 | AND col2 = 'B';
98 |
99 | connect s2/s2
100 |
101 | CREATE TABLE spdtab (
102 | id NUMBER,
103 | col1 VARCHAR2(1),
104 | col2 VARCHAR2(1)
105 | );
106 |
107 |
--------------------------------------------------------------------------------
/upgrading_to_12c/duplicate/README.md:
--------------------------------------------------------------------------------
1 | # Copying Histogram and Extended Stats Definitions
2 |
3 | If you have a pre-upgrade database or a test database and you want to duplicate extended stats and histogram definitions in another database or schema, these scrips show you how.
4 |
5 | ### copy_hist.sql
6 |
7 | This script queries a schema to discover what histograms are present. It generates a set of DBMS_STATS.SET_TABLE_PREFS commands to match these histograms so you can explicitly define an equivalent set of histograms on another database. Like this:
8 |
9 | ```sql
10 | exec dbms_stats.set_table_prefs('HR','REGIONS','METHOD_OPT','FOR ALL COLUMNS SIZE 1, FOR COLUMNS REGION_NAME SIZE 254')
11 | exec dbms_stats.set_table_prefs('HR','CUSTOMERS','METHOD_OPT','FOR ALL COLUMNS SIZE 1, FOR COLUMNS CUST_TYPE SIZE 254')
12 | ```
13 |
14 | Once the histograms are captured, the generated script can be executed on the relevat database to ensure that a consistent set of histograms are created until you choose to implement "FOR ALL COLUMNS SIZE AUTO".
15 |
16 | Log into a DBA account in SQL plus and run the script. You will be asked to choose a schema to "capture". A file (gen_copy_hist.sql) will be spooled containing commands to create the same set of histograms on another database.
17 |
18 | ### copy_hist_a.sql
19 |
20 | This script queries a schema to discover what histograms are present. It generates a set of DBMS_STATS.SET_TABLE_PREFS commands to match these histograms so you can explicitly define a set of histograms on another database, but also leaves scope for the Oracle Database to discover and create new histograms where needed. Like this:
21 |
22 | ```sql
23 | exec dbms_stats.set_table_prefs('HR','REGIONS','METHOD_OPT','FOR ALL COLUMNS SIZE AUTO, FOR COLUMNS REGION_NAME SIZE 254')
24 | exec dbms_stats.set_table_prefs('HR','CUSTOMERS','METHOD_OPT','FOR ALL COLUMNS SIZE AUTO, FOR COLUMNS CUST_TYPE SIZE 254')
25 | ```
26 |
27 | Once the histograms are captured, the generated script can be executed on the relevant database to ensure that a consistent set of histograms are created.
28 |
29 | Log into a DBA account in SQL plus and run the script. You will be asked to choose a schema to "capture". A file (gen_copy_hist_a.sql) will be spooled containing commands to create the same set of histograms on another database.
30 |
31 | ### copy_ext.sql
32 |
33 | This script is similar to the one above except that it creates a matching set of extended statistics. It spools a file called gen_copy_ext.sql.
34 |
35 | ### DISCLAIMER
36 |
37 | * These scripts are provided for educational purposes only.
38 | * They are NOT supported by Oracle World Wide Technical Support.
39 | * The scripts have been tested and they appear to work as intended.
40 | * You should always run scripts on a test instance.
41 |
42 | ### WARNING
43 |
44 | * These scripts drop and create user accounts. For use on test databases. DBA access is required.
45 |
--------------------------------------------------------------------------------
/upgrading_to_12c/duplicate/copy_ext.sql:
--------------------------------------------------------------------------------
1 | PROMPT ======================================================================================
2 | PROMPT Generate script to create extended statistics based on an example schema
3 | PROMPT ======================================================================================
4 | set serveroutput on
5 | set verify off
6 | set feedback off
7 | set linesize 1000
8 | set trims on
9 | set serveroutput on
10 |
11 | accept schema prompt 'Enter the name of the schema to copy: '
12 |
13 | spool gen_copy_ext.sql
14 | declare
15 | cursor extlist is
16 | select table_name,extension,creator,owner
17 | from dba_stat_extensions
18 | where owner = upper('&schema')
19 | order by creator,table_name,extension_name;
20 | begin
21 | dbms_output.put_line('var r VARCHAR2(50)');
22 | for c in extlist
23 | loop
24 | dbms_output.put('exec :r := dbms_stats.create_extended_stats('''||c.owner||''','''||c.table_name||''','''||c.extension||''')');
25 | dbms_output.put_line(' /* '||c.creator||' */');
26 | end loop;
27 | end;
28 | /
29 | spool off
30 |
--------------------------------------------------------------------------------
/upgrading_to_12c/duplicate/copy_hist.sql:
--------------------------------------------------------------------------------
1 | PROMPT ======================================================================================
2 | PROMPT Generate a script to create table histogram preferences based on example schema
3 | PROMPT ======================================================================================
4 | whenever sqlerror exit
5 |
6 | set serveroutput on
7 | set verify off
8 | set feedback off
9 | set linesize 1000
10 | set trims on
11 |
12 | accept schema prompt 'Enter the name of the schema to copy: '
13 |
14 | PROMPT Table histogram script...
15 |
16 | spool gen_copy_hist.sql
17 | declare
18 | n number(10) := 0;
19 | tname varchar2(100);
20 | cursor tlist is
21 | select distinct cs.owner,cs.table_name
22 | from dba_tab_col_statistics cs,
23 | dba_tables t
24 | where cs.histogram is not null
25 | and cs.histogram != 'NONE'
26 | and cs.owner = upper('&schema')
27 | and t.owner = upper('&schema')
28 | and t.table_name = cs.table_name
29 | order by cs.owner,cs.table_name;
30 | cursor collist is
31 | select column_name
32 | from dba_tab_col_statistics
33 | where histogram is not null
34 | and histogram != 'NONE'
35 | and owner = upper('&schema')
36 | and table_name = tname
37 | order by owner,table_name,column_name;
38 | begin
39 | dbms_output.put_line('PROMPT NOTE! It is assumed that histograms are disabled for tables not included in this script.');
40 | dbms_output.put_line('PROMPT For example:');
41 | dbms_output.put_line('PROMPT EXEC DBMS_STATS.SET_GLOBAL_PREFS(''METHOD_OPT'',''FOR ALL COLUMNS SIZE 1'')');
42 | dbms_output.put_line('PROMPT Alternatively:');
43 | dbms_output.put_line('PROMPT EXEC DBMS_STATS.SET_SCHEMA_PREFS(''&schema'',''METHOD_OPT'',''FOR ALL COLUMNS SIZE 1'')');
44 | for t in tlist
45 | loop
46 | dbms_output.put('exec dbms_stats.set_table_prefs('''||t.owner||''','''||t.table_name||''',''METHOD_OPT'',');
47 | dbms_output.put('''FOR ALL COLUMNS SIZE 1 FOR COLUMNS SIZE 254 ');
48 | tname := t.table_name;
49 | for c in collist
50 | loop
51 | dbms_output.put(c.column_name||' ');
52 | end loop;
53 | dbms_output.put(''')');
54 | dbms_output.put_line('');
55 | end loop;
56 | end;
57 | /
58 | spool off
59 |
--------------------------------------------------------------------------------
/upgrading_to_12c/duplicate/copy_hist_a.sql:
--------------------------------------------------------------------------------
1 | PROMPT =======================================================================================
2 | PROMPT Generate a script to create table histogram preferences based on example schema.
3 | PROMPT AUTO histogram creation is enabled for columns that don't already have a histogram.
4 | PROMPT This enables new histograms to be created if skews are found.
5 | PROMPT =======================================================================================
6 | whenever sqlerror exit
7 |
8 | set serveroutput on
9 | set verify off
10 | set feedback off
11 | set linesize 1000
12 | set trims on
13 |
14 | accept schema prompt 'Enter the name of the schema to copy: '
15 |
16 | PROMPT Table histogram script...
17 |
18 | spool gen_copy_hist_a.sql
19 | declare
20 | n number(10) := 0;
21 | tname varchar2(100);
22 | cursor tlist is
23 | select distinct cs.owner,cs.table_name
24 | from dba_tab_col_statistics cs,
25 | dba_tables t
26 | where cs.histogram is not null
27 | and cs.histogram != 'NONE'
28 | and cs.owner = upper('&schema')
29 | and t.owner = upper('&schema')
30 | and t.table_name = cs.table_name
31 | order by cs.owner,cs.table_name;
32 | cursor collist is
33 | select column_name
34 | from dba_tab_col_statistics
35 | where histogram is not null
36 | and histogram != 'NONE'
37 | and owner = upper('&schema')
38 | and table_name = tname
39 | order by owner,table_name,column_name;
40 | begin
41 | dbms_output.put_line('PROMPT NOTE! It is assumed that global or schema METHOD_OPT is its default value.');
42 | dbms_output.put_line('PROMPT For example:');
43 | dbms_output.put_line('PROMPT EXEC DBMS_STATS.SET_GLOBAL_PREFS(''METHOD_OPT'',''FOR ALL COLUMNS SIZE AUTO'')');
44 | dbms_output.put_line('PROMPT Alternatively:');
45 | dbms_output.put_line('PROMPT EXEC DBMS_STATS.SET_SCHEMA_PREFS(''&schema'',''METHOD_OPT'',''FOR ALL COLUMNS SIZE AUTO'')');
46 | for t in tlist
47 | loop
48 | dbms_output.put('exec dbms_stats.set_table_prefs('''||t.owner||''','''||t.table_name||''',''METHOD_OPT'',');
49 | dbms_output.put('''FOR ALL COLUMNS SIZE AUTO FOR COLUMNS SIZE 254 ');
50 | tname := t.table_name;
51 | for c in collist
52 | loop
53 | dbms_output.put(c.column_name||' ');
54 | end loop;
55 | dbms_output.put(''')');
56 | dbms_output.put_line('');
57 | end loop;
58 | end;
59 | /
60 | spool off
61 |
--------------------------------------------------------------------------------
/upgrading_to_12c/show_stats/README.md:
--------------------------------------------------------------------------------
1 | # Viewing Statistics
2 |
3 | If you have a pre-upgrade database or a test database and you want to duplicate extended stats and histogram definitions in another database or schema, these scrips show you how.
4 |
5 | These scripts allow you to view various optimizer statistics and metadata:
6 |
7 | ### show_ext.sql
8 |
9 | Lists extended statistics created manually and those created by the system in response to SQL plan directives.
10 |
11 | ### show_spd.sql
12 |
13 | Lists SQL plan directives relevant to a specific schema.
14 |
15 | ### show_usage.sql
16 |
17 | Lists column usage information. This metadata is generated automatically by the optimizer, so it is important that you have data here that is representative of your workload because it is used by DBMS_STATS.GATHER... to figure out where histograms might be needed (and *not* needed). You can use this script to get an idea of what data is present for a specific schema.
18 |
19 | ### show_hist.sql
20 |
21 | Lists histograms present for a specific schema. It also looks at historical gather stats information to help you understand what initiated the creation of the histogram. For example, if METHOD_OPT was "FOR ALL COLUMNS SIZE AUTO" then it means that the histogram was created automatically in response to skew and column usage information. A simplified 11g version of the script is provided (show_hist_11g.sql) without METHOD_OPT information because this was not available in this release.
22 |
23 | ### DISCLAIMER
24 |
25 | * These scripts are provided for educational purposes only.
26 | * They are NOT supported by Oracle World Wide Technical Support.
27 | * The scripts have been tested and they appear to work as intended.
28 | * You should always run scripts on a test instance.
29 |
30 | ### WARNING
31 |
32 | * These scripts drop and create user accounts. For use on test databases. DBA access is required.
33 |
--------------------------------------------------------------------------------
/upgrading_to_12c/show_stats/show_ext.sql:
--------------------------------------------------------------------------------
1 | PROMPT ======================================================================================
2 | PROMPT Show system-created and user-created extended statistics for a schema
3 | PROMPT ======================================================================================
4 | set trims on
5 | set feedback off
6 | set linesize 200
7 | set pagesize 1000
8 | set long 10000
9 | set verify off
10 | column table_name format a40
11 | column extension format a70
12 | column extension_name format a50
13 | set serveroutput on
14 |
15 | accept schema prompt 'Enter the name of the schema to check: '
16 |
17 | PROMPT
18 | PROMPT System-created extensions...
19 | PROMPT
20 | select table_name,extension,extension_name
21 | from dba_stat_extensions
22 | where owner = upper('&schema')
23 | and creator = 'SYSTEM'
24 | order by table_name,extension_name;
25 |
26 | PROMPT
27 | PROMPT User-created extensions...
28 | PROMPT
29 | select table_name,extension,extension_name
30 | from dba_stat_extensions
31 | where owner = upper('&schema')
32 | and creator = 'USER'
33 | order by table_name,extension_name;
34 |
--------------------------------------------------------------------------------
/upgrading_to_12c/show_stats/show_hist.sql:
--------------------------------------------------------------------------------
1 | PROMPT ======================================================================================
2 | PROMPT Show histograms and the METHOD_OPT used to create them
3 | PROMPT This script works on Oracle Database 12c only.
4 | PROMPT If the METHOD_OPT shown for a histogram is "FOR ALL COLUMNS SIZE AUTO"
5 | PROMPT then this implies that the histogram was created automatically
6 | PROMPT based on column usage.
7 | PROMPT ======================================================================================
8 | set trims on
9 | set feedback off
10 | set linesize 200
11 | set pagesize 1000
12 | set long 10000
13 | set verify off
14 | column table_name format a40
15 | column column_name format a40
16 |
17 | accept schema prompt 'Enter the name of the schema to check: '
18 |
19 | set serveroutput on
20 |
21 | PROMPT
22 | PROMPT Histograms and METHOD_OPT parameter...
23 | PROMPT
24 | declare
25 | n number(10) := 0;
26 | ptarget varchar2(1000);
27 | cursor histograms is
28 | select owner,table_name,column_id,column_name,histogram
29 | from dba_tab_columns
30 | where owner = upper('&schema')
31 | and histogram != 'NONE'
32 | and histogram is not null
33 | order by 1,2,3;
34 | cursor operation is
35 | select to_char(end_time,'yyyy-mm-dd hh24:mi') as end_time
36 | ,doot.target
37 | ,extract(xmltype(notes),'//param[@name="method_opt"]/@val').getStringVal() as operation
38 | from dba_optstat_operations,
39 | ( select opid,
40 | target
41 | from dba_optstat_operation_tasks
42 | where target = ptarget
43 | and status = 'COMPLETED'
44 | and end_time is not null
45 | order by end_time desc fetch first row only) doot
46 | where id = doot.opid
47 | and status = 'COMPLETED'
48 | and end_time is not null;
49 |
50 | begin
51 | for h in histograms
52 | loop
53 | n:=n+1;
54 | if (n=1)
55 | then
56 | dbms_output.put_line(rpad('Table',40)||' '||rpad('Column',40)||' '||rpad('Histogram',15)||' '||rpad('Time',19)||rpad('METHOD_OPT',30));
57 | dbms_output.put_line(rpad('-',150,'-'));
58 | end if;
59 | ptarget := h.owner || '.' || h.table_name;
60 | dbms_output.put(rpad(ptarget,40)||' '||rpad(h.column_name,40)||' '||rpad(h.histogram,15)||' ');
61 | for o in operation
62 | loop
63 | dbms_output.put(o.end_time||' '||'('||o.operation||')');
64 | dbms_output.put_line('');
65 | end loop;
66 | end loop;
67 | if (n=0)
68 | then
69 | dbms_output.put_line('None found');
70 | end if;
71 | end;
72 | /
73 |
--------------------------------------------------------------------------------
/upgrading_to_12c/show_stats/show_hist_11g.sql:
--------------------------------------------------------------------------------
1 | PROMPT ======================================================================================
2 | PROMPT Show histograms
3 | PROMPT This script works on Oracle Database 11g
4 | PROMPT ======================================================================================
5 | set trims on
6 | set feedback off
7 | set linesize 200
8 | set pagesize 1000
9 | set long 10000
10 | set verify off
11 | column table_name format a40
12 | column column_name format a40
13 |
14 | accept schema prompt 'Enter the name of the schema to check: '
15 |
16 | set serveroutput on
17 |
18 | PROMPT
19 | PROMPT Histograms...
20 | PROMPT
21 | declare
22 | n number(10) := 0;
23 | ptarget varchar2(1000);
24 | cursor histograms is
25 | select owner,table_name,column_id,column_name,histogram
26 | from dba_tab_columns
27 | where owner = upper('&schema')
28 | and histogram != 'NONE'
29 | and histogram is not null
30 | order by 1,2,3;
31 | begin
32 | for h in histograms
33 | loop
34 | n:=n+1;
35 | if (n=1)
36 | then
37 | dbms_output.put_line(rpad('Table',40)||' '||rpad('Column',40)||' '||rpad('Histogram',15));
38 | dbms_output.put_line(rpad('-',105,'-'));
39 | end if;
40 | ptarget := h.owner || '.' || h.table_name;
41 | dbms_output.put(rpad(ptarget,40)||' '||rpad(h.column_name,40)||' '||rpad(h.histogram,15)||' ');
42 | dbms_output.put_line(' ');
43 | end loop;
44 | if (n=0)
45 | then
46 | dbms_output.put_line('None found');
47 | end if;
48 | end;
49 | /
50 |
--------------------------------------------------------------------------------
/upgrading_to_12c/show_stats/show_spd.sql:
--------------------------------------------------------------------------------
1 | PROMPT ======================================================================================
2 | PROMPT Show SQL plan directives associated with a chosen schema
3 | PROMPT ======================================================================================
4 | set trims on
5 | set feedback off
6 | set linesize 200
7 | set pagesize 1000
8 | set long 10000
9 | set verify off
10 | column table_name format a40
11 | column column_name format a40
12 |
13 | accept own prompt 'Enter the name of the schema to check: '
14 |
15 | --
16 | -- Ensure directives are flushed
17 | --
18 | exec dbms_spd.flush_sql_plan_directive;
19 |
20 | COLUMN dir_id FORMAT A20
21 | COLUMN owner FORMAT A10
22 | COLUMN object_name FORMAT A10
23 | COLUMN col_name FORMAT A10
24 |
25 | SELECT o.object_type,
26 | o.object_name,
27 | o.subobject_name col_name,
28 | d.type,
29 | d.state,
30 | d.reason
31 | FROM dba_sql_plan_directives d, dba_sql_plan_dir_objects o
32 | WHERE d.directive_id=o.directive_id
33 | AND o.owner = upper('&own')
34 | ORDER BY 1,2,3;
35 |
36 |
--------------------------------------------------------------------------------
/upgrading_to_12c/show_stats/show_usage.sql:
--------------------------------------------------------------------------------
1 | set linesize 200
2 | set pagesize 100
3 | set trims on
4 | set tab off
5 | set verify off
6 | column column_name format a40
7 | column table_name format a40
8 | accept own prompt 'Enter the name of the schema to check: '
9 |
10 | prompt COL_USAGE$ entries for chosen schema
11 | prompt ====================================
12 | select c.TABLE_NAME,
13 | c.COLUMN_NAME,
14 | u.EQUALITY_PREDS,
15 | u.EQUIJOIN_PREDS,
16 | u.NONEQUIJOIN_PREDS,
17 | u.RANGE_PREDS,
18 | u.LIKE_PREDS,
19 | u.NULL_PREDS,
20 | u.TIMESTAMP
21 | from sys.col_usage$ u,
22 | dba_tab_columns c,
23 | dba_objects o
24 | where obj# = o.object_id
25 | and c.owner = upper('&own')
26 | and intcol# = column_id
27 | and o.owner = upper('&own')
28 | and o.object_name = c.table_name;
29 |
--------------------------------------------------------------------------------