This post is related to my previous entry about extracting useful information to aid in diagnosing query performance.
This routine can be passed a statement id (for the plan_table) or a hash_value (for v$sql_plan). It then looks at the base tables and indexes referenced by the plan and extracts statistics. Checking some of these details, such as last analyzed date or the number of rows, may indicate why the optimizer has chosen a particular path.
Like my previous entry on using DBMS_XPLAN with V$SQL_PLAN, it uses invoker rights and dynamic SQL so that an explicit grant is not required for the user on V$SQL_PLAN.
create or replace function optimizer_data
(p_statement_id in varchar2 default null,
p_hash_value in number default null,
p_tab_name in varchar2 default 'PLAN_TABLE')
return sys.DBMS_DEBUG_VC2COLL
authid current_user is
--
cur_plan_det SYS_REFCURSOR;
TYPE rec_plan_det is RECORD
(obj_owner varchar2(50),
obj_name varchar2(50),
access_pred varchar2(4000),
filter_pred varchar2(4000),
operation varchar2(50));
TYPE tab_plan_det is TABLE of rec_plan_det index by pls_integer;
r_last rec_plan_det;
r_curr rec_plan_det;
t_plan_det tab_plan_det;
v_ind number;
v_anal_date date;
--
v_return sys.DBMS_DEBUG_VC2COLL := sys.DBMS_DEBUG_VC2COLL();
v_ret_ind number := 1;
--
FUNCTION hexstr( p_in IN VARCHAR2 ) RETURN VARCHAR2 IS
v_str VARCHAR2(4000);
v_return VARCHAR2(4000);
BEGIN
v_str := p_in;
WHILE ( v_str IS NOT NULL ) LOOP
v_return := v_return || CHR(TO_NUMBER(SUBSTR(v_str,1,2),'xx'));
v_str := SUBSTR( v_str, 3 );
END LOOP;
RETURN v_return;
END;
--
begin
if p_hash_value is not null then
open cur_plan_det for
'select distinct object_owner, object_name, access_predicates, filter_predicates, operation '||
'from v$sql_plan '||
'where operation in (''TABLE ACCESS'',''INDEX'') '||
'and hash_value = :b1 '||
'order by 1,2 ' using p_hash_value;
elsif p_statement_id is not null then
open cur_plan_det for
'select distinct object_owner, object_name, access_predicates, filter_predicates, operation '||
'from '||p_tab_name||' '||
'where operation in (''TABLE ACCESS'',''INDEX'') '||
'and statement_id = :b1 '||
'order by 1,2 ' using p_statement_id;
else
open cur_plan_det for
'select distinct object_owner, object_name, access_predicates, filter_predicates, operation '||
'from '||p_tab_name||' '||
'where operation in (''TABLE ACCESS'',''INDEX'') '||
'and timestamp = (select max(timestamp) from '||p_tab_name||')'||
'order by 1,2 ';
end if;
fetch cur_plan_det bulk collect into t_plan_det;
close cur_plan_det;
--
v_ind := t_plan_det.first;
while v_ind is not null loop
r_curr := t_plan_det(v_ind);
if r_curr.obj_owner||'.'||r_curr.obj_name != r_last.obj_owner||'.'||r_last.obj_name then
if r_curr.operation = 'INDEX' then
v_return.extend(3);
v_return(v_ret_ind) := ' ';
v_ret_ind := v_ret_ind + 1;
select 'INDEX:'||r_curr.obj_owner||'.'||r_curr.obj_name||' ('||index_type||')',
' Lblk:'||to_char(leaf_blocks,'999,999,990')||
' Keys:'||to_char(distinct_keys,'999,999,999,990')||
' Rows:'||to_char(num_rows,'999,999,999,990')||
' Cstr:'||to_char(Clustering_Factor,'999,990.00')||
' Anal:'||to_char(last_analyzed,'DD-MON-YY'), last_analyzed
into v_return(v_ret_ind), v_return(v_ret_ind+1), v_anal_date
from all_indexes
where owner=r_curr.obj_owner
and index_name=r_curr.obj_name;
v_ret_ind := v_ret_ind + 2;
else
v_return.extend(3);
v_return(v_ret_ind) := ' ';
v_return(v_ret_ind+1) := 'TABLE:'||rpad(r_curr.obj_owner||'.'||r_curr.obj_name,64);
v_ret_ind := v_ret_ind + 2;
select ' Blks:'||to_char(blocks,'999,999,990')||
' Rows:'||to_char(num_rows,'999,999,999,990')||
' Smpl:'||to_char(sample_size,'999,999,999,990')||
' Len:'||to_char(avg_row_len,'999,999,990')||
' Anal:'||to_char(last_analyzed,'DD-MON-YY'), last_analyzed
into v_return(v_ret_ind), v_anal_date
from all_tables
where owner = r_curr.obj_owner
and table_name = r_curr.obj_name;
v_ret_ind := v_ret_ind + 1;
end if;
end if;
if r_curr.operation = 'INDEX' then
for c_tcol in
(select t.column_name, t.data_type, t.num_distinct, trunc(t.density,4) density,
t.num_nulls, t.num_buckets, t.last_analyzed, t.avg_col_len,
rawtohex(t.low_value) low, rawtohex(t.high_value) high, i.column_position
from all_tab_columns t, all_ind_columns i
where t.owner = i.table_owner
and t.table_name = i.table_name
and t.column_name = i.column_name
and i.index_owner = r_curr.obj_owner
and i.index_name = r_curr.obj_name
order by column_position)
loop
v_return.extend(2);
if c_tcol.data_type in ('CHAR','VARCHAR2') then
v_return(v_ret_ind) := ' "'||hexstr(c_tcol.low)||'" to "'||hexstr(c_tcol.high)||'"';
elsif c_tcol.data_type in ('NUMBER') then
v_return(v_ret_ind) := ' "'|| to_number(c_tcol.low, 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')||'" to "'||
to_number(c_tcol.high,'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')||'"';
end if;
v_return(v_ret_ind) := ' '||to_char(c_tcol.column_position,'990')||' '||
rpad(c_tcol.column_name,31)||rpad(c_tcol.data_type,31)||v_return(v_ret_ind);
v_return(v_ret_ind+1) := ' Dist:'||to_char(c_tcol.num_distinct,'999,999,990')||
' Null:'||to_char(c_tcol.num_nulls,'999,999,990')||' '||
' Dens:'||to_char(c_tcol.density,'0.00000')||
' Hist:'||to_char(c_tcol.num_buckets,'99,990')||
' Len :'||to_char(c_tcol.avg_col_len,'9990')||
' Anal:'||to_char(c_tcol.last_analyzed,'DD-MON-YY');
v_ret_ind := v_ret_ind + 2;
end loop;
-- For Function based indexes
for c_icol in
(select c.column_position, e.column_expression
from all_ind_columns c, all_ind_expressions e
where e.index_owner = c.index_owner
and e.index_name = c.index_name
and e.column_position = c.column_position
and c.index_owner = r_curr.obj_owner
and c.index_name = r_curr.obj_name
order by 1)
loop
v_return.extend;
v_return(v_ret_ind) := ' '||c_icol.column_position||' '||c_icol.column_expression;
v_ret_ind := v_ret_ind + 1;
end loop;
else
for c_tcol in
(select column_name, data_type, num_distinct, trunc(density,4) density, num_nulls, num_buckets,
last_analyzed, avg_col_len, rawtohex(low_value) low, rawtohex(high_value) high
from all_tab_columns
where owner = r_curr.obj_owner
and table_name = r_curr.obj_name
and (instr(nvl(r_curr.access_pred,'.'),'"'||column_name||'"') > 0
or instr(nvl(r_curr.filter_pred,'.'),'"'||column_name||'"') > 0))
loop
v_return.extend(2);
if c_tcol.data_type in ('CHAR','VARCHAR2') then
v_return(v_ret_ind) := ' "'||hexstr(c_tcol.low)||'" to "'||hexstr(c_tcol.high)||'"';
elsif c_tcol.data_type in ('NUMBER') then
v_return(v_ret_ind) := ' "'|| to_number(c_tcol.low, 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')||'" to "'||
to_number(c_tcol.high,'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')||'"';
end if;
v_return(v_ret_ind) := ' '||rpad(c_tcol.column_name,31)||rpad(c_tcol.data_type,31)||v_return(v_ret_ind);
v_return(v_ret_ind+1) := ' Dist:'||to_char(c_tcol.num_distinct,'999,999,990')||
' Null:'||to_char(c_tcol.num_nulls,'999,999,990')||' '||
' Dens:'||to_char(c_tcol.density,'0.00000')||
' Hist:'||to_char(c_tcol.num_buckets,'99,990')||
' Len :'||to_char(c_tcol.avg_col_len,'9990')||
' Anal:'||to_char(c_tcol.last_analyzed,'DD-MON-YY');
v_ret_ind := v_ret_ind + 2;
end loop;
end if;
if r_curr.access_pred is not null then
v_return.extend;
v_return(v_ret_ind) := 'Access:'||r_curr.access_pred;
v_ret_ind := v_ret_ind + 1;
end if;
if r_curr.filter_pred is not null then
v_return.extend;
v_return(v_ret_ind) := 'Filter:'||r_curr.filter_pred;
v_ret_ind := v_ret_ind + 1;
end if;
r_last := r_curr;
v_ind := t_plan_det.next(v_ind);
end loop;
return v_return;
end;
.
/
show errors
set serveroutput on size 100000
select column_value from table(optimizer_data);
begin
dbms_output.put_line('Done');
end;
.
/
I am a proud Oracle developer and this is my blog.
My website is
here
This blog is OPINION, SUGGESTION and DEBATE.
Please correct anything I write that is misleading.
Monday, August 29, 2005
Thursday, August 25, 2005
10053 - The Optimizer dump
Many seasoned oracle developers will be familiar with the 10046 event used to set trace levels that include WAITs and/or BINDs in the tracefile output.
There is another event, 10053, which can be set which produces a dump from the Query Optimizer. While it isn't a particularly useful tool in day-to-day operations, it can be instructive to look at a dump to see the optimizer's point of view.
A dump can be obtained with :
ALTER SESSION SET EVENTS '10053 trace name context forever, level 1'
Then run the query and fetch the file from the same user_dump_dest you find your trace files.
The query and optimizer parameters
Having a look at the dump and ignoring the header info, we first see the query that is being optimized.
Next there is a list of parameters, documented and undocumented, which may affect which plan is chosen. I won't mention the undocumented ones (mostly prefixed with an underscore), simply because there isn't an authorative source as to effect of changing them. The documented ones can all be found in the Oracle Reference manual. For 9iR2, this can be found here
I will put in some brief comments specific to how these affect the optimizer.
The OPTIMIZER_MODE/GOAL determines the aim of the query; that is whether it should try to deliver the first row(s) of the result set as quickly as possible, or all rows. In some cases, such as a CREATE TABLE AS SELECT.... etc. it is obvious that the entire result set will be needed and so ALL_ROWS is most appropriate.
These six will determine what the options the optimizer can or cannot consider for potential plans.
HASH_JOIN_ENABLED
OPTIMIZER_FEATURES_ENABLE
STAR_TRANSFORMATION_ENABLED
PARTITION_VIEW_ENABLED (obsolete)
QUERY_REWRITE_ENABLED
QUERY_REWRITE_INTEGRITY
Changing the following three can affect both which plan is chosen and the resources necessary to execute that plan. Changing the resources available can directly affect the end response time even if the plan does not change.
DB_FILE_MULTIBLOCK_READ_COUNT
HASH_AREA_SIZE/SORT_AREA_SIZE (see also PGA_AGGREGATE_TARGET and WORKAREA_SIZE_POLICY)
The parameters OPTIMIZER_INDEX_CACHING and OPTIMIZER_INDEX_COST_ADJ will affect how the optimizer balances the speed of index access against table access. While changing them can make the query switch to a different plan, they do not directly affect the query response. That is you can change them but, despite the change in query COST, they will not improve the time necessary to execute the query unless there is a plan change.
OPTIMIZER_MAX_PERMUTATIONS. For queries involving only a few tables, the optimzier can quickly scan all plans to determine the best ones. However as queries include more tables, the number of potential plans will rise exponentially. To stop Oracle spending all day looking at plans, this will tell Oracle to stop after a number of plans plans. This is obsolete in 10G, and there's no real reason to use it in anywhere else. If you've set it really low, it is probably to reduce the amount of CPU time spent parsing, which means you are probably parsing too much.
OPTIMIZER_DYNAMIC_SAMPLING is simply too big for me to consider here. The 10G documentation for it can be found here
Table information
After the parameter values, we get to the tables.
I used the following script to prepare some dummy data;
drop table test_a;
drop table test_b;
create table test_a (col_a number primary key, col_b varchar2(40), col_c varchar2(40), col_d varchar2(40));
create table test_b (col_a number primary key, col_b varchar2(40), col_c varchar2(40), col_d varchar2(40));
insert into test_a
select rownum, to_char(sysdate-rownum,'Dayfm'), to_char(sysdate-rownum,'Monthfm'), rtrim(to_char(sysdate-rownum,'ddspth'))
from all_tab_columns where rownum <>
update test_a set col_b = null where col_b like 'Mon%';
insert into test_b
select rownum, to_char(sysdate+rownum,'Monthfm'), rtrim(to_char(sysdate+rownum,'ddspth')), to_char(sysdate+rownum,'Dayfm')
from all_tab_columns where rownum <>
update test_b set col_b = null where col_d like 'Mon%';
begin
dbms_stats.GATHER_TABLE_STATS(ownname => user, tabname => 'TEST_A',
ESTIMATE_PERCENT=>100,method_opt => 'FOR ALL COLUMNS SIZE 50', cascade => true);
dbms_stats.GATHER_TABLE_STATS(ownname => user, tabname => 'TEST_B',
ESTIMATE_PERCENT=>100,method_opt => 'FOR ALL COLUMNS SIZE 50', cascade => true);
end;
.
/
commit;
I started with the query
select * from test_a where col_b like 'Tue%' and col_c like 'J%'
At the table level, it looks at the cardinality (number of rows), number of blocks and the average row length. Then it shows the indexes,
***********************
Table stats Table: TEST_A Alias: TEST_A
TOTAL :: CDN: 200 NBLKS: 5 AVG_ROW_LEN: 32
-- Index stats
INDEX NAME: SYS_C00109029 COL#: 1
TOTAL :: LVLS: 0 #LB: 1 #DK: 200 LB/K: 1 DB/K: 1 CLUF: 2
For the indexes, it looks at the columns indexed, levels (blevel), number of leaf blocks, number of distinct keys, leaf blocks per key, datablocks per key and clustering factor. Clustering factor is basically how likely records with the same key values are in the same block. If they are clustered, picking up table rows based on the index will be cheaper as the table rows are more likely to be on a block that has already been read and so need less physical IO. In this case, the columns derived from the Month would be closely clustered together (a low cluster factor), with Days less clustered and the spelt out 'DD' least clustered (high cluster factor). In this case, the primary key has a low cluster factor reflecting the fact that the sequentially numbered rows are stored next to each other.
It then lists the 'interesting columns' (ie those used in joins or filter predicates), and shows the number of distinct values, number of nulls, density, low and high values and histogram information (if available).
Column: COL_B Col#: 2 Table: TEST_A Alias: TEST_A
NDV: 6 NULLS: 29 DENS: 2.9240e-03
FREQUENCY HISTOGRAM: #BKT: 171 #VAL: 6
Column: COL_C Col#: 3 Table: TEST_A Alias: TEST_A
NDV: 7 NULLS: 0 DENS: 2.5000e-03
FREQUENCY HISTOGRAM: #BKT: 200 #VAL: 7
The section SINGLE TABLE ACCESS PATH finishes with a determination of how many rows will get thrown back for the given filters on the table.
TABLE: TEST_A ORIG CDN: 200 ROUNDED CDN: 8 CMPTD CDN: 8
Access path: tsc Resc: 2 Resp: 2
BEST_CST: 2.00 PATH: 2 Degree: 1
A cardinality is derived based on the number of row it will think will match the given criteria. In this case it has estimated 8 out of two hundred. Actually around 1/7 will match 'Tuesday' and 2/7 with have a month starting with J (because this was run at the end of August) which will be about 8 rows.
SELECTIVITY and CARDINALITY hints
Two undocumented hints have become known in the Oracle developer community. Being undocumented, there is no definitive statement of what they do or how they work. The 10053 dump is once place where their effects can be directly observed in the line(s) of the SINGLE TABLE ACCESS PATH concerning table cardinality.
The selectivity replaces the filter factor percentages derived from the statistics with its own percentage.
Cardinality replaces the end result with its own absolute value, so not taking into account the actual size of the table.
The following are the adjusted queries and the respective table cardinality rows:
select /*+SELECTIVITY (a 0.5)*/ * from test_a a
where col_b like 'Tue%' and col_c like 'J%'
TABLE: TEST_A ORIG CDN: 200 ROUNDED CDN: 100 CMPTD CDN: 100
select /*+CARDINALITY (a 5)*/ * from test_a a
where col_b like 'Tue%' and col_c like 'J%'
TABLE: TEST_A ORIG CDN: 200 ROUNDED CDN: 5 CMPTD CDN: 5
Given a more complex join query :
select * from test_a a, test_b b
where a.col_b like 'Tue%'
and b.col_b not like 'J%'
and a.col_d=b.col_c
and a.col_b=b.col_d
It still uses the cardinality/selectivity hints on based on the single table access path.
With a Selectivity of 0.5 on test_a, it will assume that half of test_a will be available for match up against the filtered rows from test_b. It does not imply or suggest that half the rows from test_a will match rows on test_b. In fact, the optimizer will apply its estimate of that percentage on top of the given selectivity.
With a cardinality of 5, it assumes 5 rows from test_a will be available for matching. Again, its own join filter percentage will be applied on top.
It is not correct to suggest the selectivty and cardinality hints supply additional information to the optimizer. In fact they override information derived from statistics which could actually mislead the optimizer. They should, like other hints, be used with caution and, like undocumented functionality, as rarely as possible.
This is as far as I have gone with 10053 dump files.
If it has wetted your appetite, then there is a more detailed paper by Wolfgang Breitling available here
Wednesday, August 24, 2005
When empty is not empty
To quote my previous blog entry
"I haven't found a query yet that runs slowly when all the tables are empty."
Actually, some empty tables may actually be quite big, and therefore slow. Just because a table has no rows in it, doesn't mean it doesn't have a lot of space allocated to it.
If the table did have a lot of rows which were deleted, the empty space is not reclaimed and a full scan of the table would necessitate reading all that space.
SQL> create table size_test (col_1 varchar2(2000));
Table created.
SQL>
SQL> select bytes, blocks from user_segments
2 where segment_name = 'SIZE_TEST';
BYTES BLOCKS
----- ------
65536 8
SQL>
SQL> insert into size_test select rpad('A',2000,'B') from all_tab_colums
41060 rows created.
SQL>
SQL> select bytes, blocks from user_segments
2 where segment_name = 'SIZE_TEST';
BYTES BLOCKS
--------- ------
117440512 14336
SQL>
SQL> delete from size_test;
41060 rows deleted.
SQL>
SQL> select bytes, blocks from user_segments
2 where segment_name = 'SIZE_TEST';
BYTES BLOCKS
--------- ------
117440512 14336
If the table is empty, a TRUNCATE will reclaim all that space.
If not, one way is to use ALTER TABLE ... MOVE;
Watch out though, as this will change the ROWIDs of all the rows in the table. Indexes will need to be rebuilt and materialized views fully refreshed.
SQL> alter table size_test move;
Table altered.
SQL> select bytes, blocks from user_segments
2 where segment_name = 'SIZE_TEST';
BYTES BLOCKS
----- ------
65536 8
"I haven't found a query yet that runs slowly when all the tables are empty."
Actually, some empty tables may actually be quite big, and therefore slow. Just because a table has no rows in it, doesn't mean it doesn't have a lot of space allocated to it.
If the table did have a lot of rows which were deleted, the empty space is not reclaimed and a full scan of the table would necessitate reading all that space.
SQL> create table size_test (col_1 varchar2(2000));
Table created.
SQL>
SQL> select bytes, blocks from user_segments
2 where segment_name = 'SIZE_TEST';
BYTES BLOCKS
----- ------
65536 8
SQL>
SQL> insert into size_test select rpad('A',2000,'B') from all_tab_colums
41060 rows created.
SQL>
SQL> select bytes, blocks from user_segments
2 where segment_name = 'SIZE_TEST';
BYTES BLOCKS
--------- ------
117440512 14336
SQL>
SQL> delete from size_test;
41060 rows deleted.
SQL>
SQL> select bytes, blocks from user_segments
2 where segment_name = 'SIZE_TEST';
BYTES BLOCKS
--------- ------
117440512 14336
If the table is empty, a TRUNCATE will reclaim all that space.
If not, one way is to use ALTER TABLE ... MOVE;
Watch out though, as this will change the ROWIDs of all the rows in the table. Indexes will need to be rebuilt and materialized views fully refreshed.
SQL> alter table size_test move;
Table altered.
SQL> select bytes, blocks from user_segments
2 where segment_name = 'SIZE_TEST';
BYTES BLOCKS
----- ------
65536 8
Tuesday, August 23, 2005
My query is slow.... What we need before we can help
In a comment to another blog on "How to be a good Newbie" I expressed the view that my personal bugbear was a request for help which amounted to 'I have a performance problem with this SQL:' and gives an SQL with no explain plan, no table sizes, no indexes.....
Seeing another of these today, I looked for a simple checklist of what is needed to look at an SQL performance problem. I couldn't find one, so here's mine.
1. What are you running ? Show us the SQL.
2. What should it be doing ?
It may be obvious to you, but if you leave it up to the respondant to guess, you may get a less than optimal result.
3. Give us an Explain Plan. That will tell us what the Optimizer thinks it should be doing.
4. Give us the sizes of the tables. I haven't found a query yet that runs slowly when all the tables are empty.
5. What columns are indexed ?
6. If there are views involved, we need the information in all the tables in the view
7. Tell us if there are 'exotica' such as object types, database links, partitions....
8. How long is it taking ? How long do you want it to take ? A performance problem may exist in a query that only takes 30 seconds, if it is run often enough. On the other hand, you may be happy for the query to take hours.
9. First Rows or All Rows. Do you want the shortest time to see the first rows out of the result set, or the shortest time to get them all ?
The following can be run as anonymous PL/SQL with the table names listed in the collection in line 3. While this may report on some table columns which are irrelevant to the issue, it will give all the necessary facts for an initial examination of the issue.
declare
t_tabs sys.DBMS_DEBUG_VC2COLL :=
sys.DBMS_DEBUG_VC2COLL('...table1....','...table2.....');
v_tab varchar2(100);
v_own varchar2(100);
v_typ varchar2(100);
v_user varchar2(100) := user;
v_long varchar2(32000);
v_ind number;
--
cursor c_obj is
select object_type, owner
from all_objects
where object_name = v_tab
and owner in ('PUBLIC',v_own)
order by decode(owner,v_own,1,2);
--
cursor c_syn is
select table_owner, table_name
from all_synonyms
where synonym_name = v_tab
and owner in ('PUBLIC',v_own)
order by decode(owner,v_own,1,2);
--
cursor c_vw is
select text from all_views
where owner = v_own and view_name = v_tab;
--
cursor c_tab is
select table_name, num_rows, blocks, avg_row_len, sample_size, last_analyzed
from all_tables
where table_name = v_tab
and owner = v_own;
--
cursor c_tcols is
select column_name, data_type, num_distinct, density, num_nulls, num_buckets,
last_analyzed, avg_col_len, rawtohex(low_value) low, rawtohex(high_value)
from all_tab_columns
where table_name = v_tab
and owner = v_own;
--
cursor c_ind is
select index_name, index_type, leaf_blocks, distinct_keys,
Clustering_Factor, num_rows, last_analyzed
from all_indexes
where table_name = v_tab
and owner = v_own;
--
cursor c_icols (p_iname varchar2) is
select c.index_name, c.column_position, c.column_name, e.column_expression
from all_ind_columns c, all_ind_expressions e
where e.index_owner(+) = c.index_owner
and e.index_name(+) = c.index_name
and e.column_position(+) = c.column_position
and c.table_name = v_tab
and c.table_owner = v_own
and c.index_name = p_iname
order by 1,2;
--
PROCEDURE sp_out (p_text IN VARCHAR2, p_wrap IN NUMBER DEFAULT 1) IS
v_text VARCHAR2(32500);
v_point NUMBER;
e_overflow exception;
pragma exception_init (e_overflow, -20000);
v_temp_arr dbms_output.chararr;
v_lines number := 150;
BEGIN
v_text := p_text;
--Break it down into lengths up 248 characters in length
-- If p_wrap is 1, it will try to 'wrap' the line at word breaks
WHILE v_text IS NOT NULL LOOP
v_point := LENGTH(v_text)+1;
IF v_point > 248 AND p_wrap = 1 THEN
v_point := INSTR(SUBSTR(v_text,1,248),CHR(10),-1);
END IF;
IF v_point = 0 AND p_wrap = 1 THEN
v_point := INSTR(SUBSTR(v_text,1,248),' ',-1);
END IF;
IF v_point = 0 AND p_wrap = 1 THEN
v_point := INSTR(SUBSTR(v_text,1,248),'/',-1);
END IF;
IF v_point = 0 THEN
v_point := 248;
END IF;
BEGIN
DBMS_OUTPUT.PUT_LINE(SUBSTR(v_text,1,v_point-1));
EXCEPTION
WHEN e_overflow THEN
IF SUBSTR(SQLERRM,12,9) ='ORU-10027' THEN
--Clear out the oldest 150 lines to make room
--in the DBMS_OUTPUT buffer
DBMS_OUTPUT.GET_LINES(V_TEMP_ARR,V_LINES);
DBMS_OUTPUT.PUT_LINE(SUBSTR(v_text,1,v_point-1));
ELSE
RAISE;
END IF;
END;
v_text := SUBSTR(v_text,v_point+1);
END LOOP;
END;
--
begin
v_ind := t_tabs.first;
while v_ind is not null loop
v_tab := t_tabs(v_ind);
sp_out('TABLE:'||v_tab);
v_own := v_user;
open c_obj;
fetch c_obj into v_typ, v_own;
if c_obj%notfound then
close c_obj;
raise no_data_found;
end if;
close c_obj;
--
while v_typ = 'SYNONYM' loop
open c_syn;
fetch c_syn into v_own, v_tab;
close c_syn;
sp_out('SYN:'||v_typ||'/'||v_own||'/'||v_tab);
open c_obj;
fetch c_obj into v_typ, v_own;
close c_obj;
end loop;
--
sp_out(v_typ||'/'||v_own||'/'||v_tab);
if v_typ = 'VIEW' then
open c_vw;
fetch c_vw into v_long;
close c_vw;
sp_out(v_long);
else
for rec in c_tab loop
sp_out('CARD:'||rec.num_rows||
' BLKS:'||rec.blocks||
' AVGLEN:'||rec.avg_row_len||
' SMPL:'||rec.sample_size||
' ANAL:'||to_char(rec.last_analyzed,'DDMMYY'));
sp_out('----------------------------------------------');
end loop;
for rec in c_tcols loop
sp_out('COL:'||rpad(rec.column_name,30)||
' TYP:'||rpad(rec.data_type,10)||
' VALS:'||to_char(rec.num_distinct,'999,999,990')||
' DENS:'||lpad(round(rec.density,4),8)||
' NULLS:'||to_char(rec.num_nulls,'999,999,990')||
' HIST:'||lpad(rec.num_buckets,4));
end loop;
sp_out('----------------------------------------------');
for rec in c_ind loop
sp_out('INAME:'||rpad(rec.index_name,30)||
' ITYP:'||rpad(rec.index_type,20)||
' LBLKS:'||to_char(rec.leaf_blocks,'9999,999')||
' KEYS:'||to_char(rec.distinct_keys,'999,999,990')||
' CLUSTR:'||round(rec.Clustering_Factor,4));
sp_out('..ROWS:'||to_char(rec.num_rows,'999,999,990')||
' ANAL:'||to_char(rec.last_analyzed,'DDMMYY'));
for rec2 in c_icols (rec.index_name) loop
v_long := rec2.column_expression;
if v_long is not null then
v_long := ' EXP:'||substr(v_long,1,150);
end if;
sp_out('....POS:'||to_char(rec2.column_position,'99')||
' COL:'||rpad(rec2.column_name,30)||v_long);
end loop;
end loop;
end if;
v_ind := t_tabs.next(v_ind);
end loop;
dbms_output.put_line('Done.');
end;
Seeing another of these today, I looked for a simple checklist of what is needed to look at an SQL performance problem. I couldn't find one, so here's mine.
1. What are you running ? Show us the SQL.
2. What should it be doing ?
It may be obvious to you, but if you leave it up to the respondant to guess, you may get a less than optimal result.
3. Give us an Explain Plan. That will tell us what the Optimizer thinks it should be doing.
4. Give us the sizes of the tables. I haven't found a query yet that runs slowly when all the tables are empty.
5. What columns are indexed ?
6. If there are views involved, we need the information in all the tables in the view
7. Tell us if there are 'exotica' such as object types, database links, partitions....
8. How long is it taking ? How long do you want it to take ? A performance problem may exist in a query that only takes 30 seconds, if it is run often enough. On the other hand, you may be happy for the query to take hours.
9. First Rows or All Rows. Do you want the shortest time to see the first rows out of the result set, or the shortest time to get them all ?
The following can be run as anonymous PL/SQL with the table names listed in the collection in line 3. While this may report on some table columns which are irrelevant to the issue, it will give all the necessary facts for an initial examination of the issue.
declare
t_tabs sys.DBMS_DEBUG_VC2COLL :=
sys.DBMS_DEBUG_VC2COLL('...table1....','...table2.....');
v_tab varchar2(100);
v_own varchar2(100);
v_typ varchar2(100);
v_user varchar2(100) := user;
v_long varchar2(32000);
v_ind number;
--
cursor c_obj is
select object_type, owner
from all_objects
where object_name = v_tab
and owner in ('PUBLIC',v_own)
order by decode(owner,v_own,1,2);
--
cursor c_syn is
select table_owner, table_name
from all_synonyms
where synonym_name = v_tab
and owner in ('PUBLIC',v_own)
order by decode(owner,v_own,1,2);
--
cursor c_vw is
select text from all_views
where owner = v_own and view_name = v_tab;
--
cursor c_tab is
select table_name, num_rows, blocks, avg_row_len, sample_size, last_analyzed
from all_tables
where table_name = v_tab
and owner = v_own;
--
cursor c_tcols is
select column_name, data_type, num_distinct, density, num_nulls, num_buckets,
last_analyzed, avg_col_len, rawtohex(low_value) low, rawtohex(high_value)
from all_tab_columns
where table_name = v_tab
and owner = v_own;
--
cursor c_ind is
select index_name, index_type, leaf_blocks, distinct_keys,
Clustering_Factor, num_rows, last_analyzed
from all_indexes
where table_name = v_tab
and owner = v_own;
--
cursor c_icols (p_iname varchar2) is
select c.index_name, c.column_position, c.column_name, e.column_expression
from all_ind_columns c, all_ind_expressions e
where e.index_owner(+) = c.index_owner
and e.index_name(+) = c.index_name
and e.column_position(+) = c.column_position
and c.table_name = v_tab
and c.table_owner = v_own
and c.index_name = p_iname
order by 1,2;
--
PROCEDURE sp_out (p_text IN VARCHAR2, p_wrap IN NUMBER DEFAULT 1) IS
v_text VARCHAR2(32500);
v_point NUMBER;
e_overflow exception;
pragma exception_init (e_overflow, -20000);
v_temp_arr dbms_output.chararr;
v_lines number := 150;
BEGIN
v_text := p_text;
--Break it down into lengths up 248 characters in length
-- If p_wrap is 1, it will try to 'wrap' the line at word breaks
WHILE v_text IS NOT NULL LOOP
v_point := LENGTH(v_text)+1;
IF v_point > 248 AND p_wrap = 1 THEN
v_point := INSTR(SUBSTR(v_text,1,248),CHR(10),-1);
END IF;
IF v_point = 0 AND p_wrap = 1 THEN
v_point := INSTR(SUBSTR(v_text,1,248),' ',-1);
END IF;
IF v_point = 0 AND p_wrap = 1 THEN
v_point := INSTR(SUBSTR(v_text,1,248),'/',-1);
END IF;
IF v_point = 0 THEN
v_point := 248;
END IF;
BEGIN
DBMS_OUTPUT.PUT_LINE(SUBSTR(v_text,1,v_point-1));
EXCEPTION
WHEN e_overflow THEN
IF SUBSTR(SQLERRM,12,9) ='ORU-10027' THEN
--Clear out the oldest 150 lines to make room
--in the DBMS_OUTPUT buffer
DBMS_OUTPUT.GET_LINES(V_TEMP_ARR,V_LINES);
DBMS_OUTPUT.PUT_LINE(SUBSTR(v_text,1,v_point-1));
ELSE
RAISE;
END IF;
END;
v_text := SUBSTR(v_text,v_point+1);
END LOOP;
END;
--
begin
v_ind := t_tabs.first;
while v_ind is not null loop
v_tab := t_tabs(v_ind);
sp_out('TABLE:'||v_tab);
v_own := v_user;
open c_obj;
fetch c_obj into v_typ, v_own;
if c_obj%notfound then
close c_obj;
raise no_data_found;
end if;
close c_obj;
--
while v_typ = 'SYNONYM' loop
open c_syn;
fetch c_syn into v_own, v_tab;
close c_syn;
sp_out('SYN:'||v_typ||'/'||v_own||'/'||v_tab);
open c_obj;
fetch c_obj into v_typ, v_own;
close c_obj;
end loop;
--
sp_out(v_typ||'/'||v_own||'/'||v_tab);
if v_typ = 'VIEW' then
open c_vw;
fetch c_vw into v_long;
close c_vw;
sp_out(v_long);
else
for rec in c_tab loop
sp_out('CARD:'||rec.num_rows||
' BLKS:'||rec.blocks||
' AVGLEN:'||rec.avg_row_len||
' SMPL:'||rec.sample_size||
' ANAL:'||to_char(rec.last_analyzed,'DDMMYY'));
sp_out('----------------------------------------------');
end loop;
for rec in c_tcols loop
sp_out('COL:'||rpad(rec.column_name,30)||
' TYP:'||rpad(rec.data_type,10)||
' VALS:'||to_char(rec.num_distinct,'999,999,990')||
' DENS:'||lpad(round(rec.density,4),8)||
' NULLS:'||to_char(rec.num_nulls,'999,999,990')||
' HIST:'||lpad(rec.num_buckets,4));
end loop;
sp_out('----------------------------------------------');
for rec in c_ind loop
sp_out('INAME:'||rpad(rec.index_name,30)||
' ITYP:'||rpad(rec.index_type,20)||
' LBLKS:'||to_char(rec.leaf_blocks,'9999,999')||
' KEYS:'||to_char(rec.distinct_keys,'999,999,990')||
' CLUSTR:'||round(rec.Clustering_Factor,4));
sp_out('..ROWS:'||to_char(rec.num_rows,'999,999,990')||
' ANAL:'||to_char(rec.last_analyzed,'DDMMYY'));
for rec2 in c_icols (rec.index_name) loop
v_long := rec2.column_expression;
if v_long is not null then
v_long := ' EXP:'||substr(v_long,1,150);
end if;
sp_out('....POS:'||to_char(rec2.column_position,'99')||
' COL:'||rpad(rec2.column_name,30)||v_long);
end loop;
end loop;
end if;
v_ind := t_tabs.next(v_ind);
end loop;
dbms_output.put_line('Done.');
end;
Friday, August 19, 2005
Running DBMS_XPLAN against V$SQL_PLAN
If you haven't used it, DBMS_XPLAN gives a much more complete output from PLAN_TABLE that some of the older, more common queries used. V$SQL_PLAN contains the real and actual query plan being used for a query (rather than the one EXPLAIN might give for the same statement). Getting one to work with the other is tricky.
Tom Kyte's Effective Oracle by Design (and probably asktom.oracle.com) gives a mechanism for this, but it requires a view in a schema with an explicit grant on SYS.V_$SQL_PLAN.
If you've got a nice DBA who will set that up, great.
If not, but you still have access to V$SQL_PLAN, and privileges for creating types and functions, here's another mechanism.
First it creates a record type matching the plan_table, then a collection of that record type.
create or replace type rec_plan_table is object (
Statement_Id VARCHAR2(30) ,
Timestamp DATE ,
Remarks VARCHAR2(80) ,
Operation VARCHAR2(30) ,
Options VARCHAR2(30) ,
Object_Node VARCHAR2(128) ,
Object_Owner VARCHAR2(30) ,
Object_Name VARCHAR2(30) ,
Object_Instance NUMBER(38) ,
Object_Type VARCHAR2(30) ,
Optimizer VARCHAR2(255) ,
Search_Columns NUMBER(38) ,
Id NUMBER(38) ,
Parent_Id NUMBER(38) ,
Position NUMBER(38) ,
Cost NUMBER(38) ,
Cardinality NUMBER(38) ,
Bytes NUMBER(38) ,
Other_Tag VARCHAR2(255) ,
Partition_Start VARCHAR2(255) ,
Partition_Stop VARCHAR2(255) ,
Partition_Id NUMBER(38) ,
Other VARCHAR2(2000) ,
Distribution VARCHAR2(30) ,
Cpu_Cost NUMBER(38) ,
Io_Cost NUMBER(38) ,
Temp_Space NUMBER(38) ,
Access_Predicates VARCHAR2(4000) ,
Filter_Predicates VARCHAR2(4000) )
/
create or replace type tab_plan_table is table of rec_plan_table
/
Then there's a function which, using invoker rights and dynamic SQL to avoid the need for the explicit grant. This reads from v$sql_plan and returns the collection type. I've hardcoded the SCOTT user here. Replace that with the user owning the REC_PLAN_TYPE created above.
You can omit that if you are only ever going to run it from the user owning that object, but why restrict yourself ?
create or replace function f_pt (p_sql_hash in number) return tab_plan_table
authid current_user is
c_1 sys_refcursor;
v_return tab_plan_table;
begin
-- YOU WILL NEEED TO PUT YOUR SCHEMA OWNER FOR THE REC_PLAN_TABLE OBJECT
-- IN PLACE OF SCOTT.
open c_1 for
'select '||
' SCOTT.rec_plan_table(''STATEMENT'', '||
' sysdate, to_char(hash_value)||''_''||child_number, '||
' operation, options, object_node, '||
' object_owner, object_name, 0 , '||
' '' '' , optimizer, search_columns, id, parent_id, position, '||
' cost, cardinality, bytes, other_tag, partition_start, '||
' partition_stop, partition_id, other, distribution, '||
' cpu_cost, io_cost, temp_space, access_predicates, '||
' filter_predicates) '||
' from v$sql_plan '||
' where hash_value = :b1 ' using p_sql_hash;
fetch c_1 bulk collect into v_return;
close c_1;
return v_return;
end;
.
/
You'll probably want to make public grants on the function and object type, and possibly a public synonym too.
You can then query as follows :
SELECT plan_table_output
from TABLE( dbms_xplan.display
( 'table(f_pt(2782527334)),dual','STATEMENT','serial' ) )
The number is the hash_value for the SQL owning the plan, which you can get from V$SQL.
The reason DUAL is in there is that I couldn't get it to work without it.
The query which DBMS_XPLAN runs behind the scenes involves a CONNECT BY. Unfortunately, as I tried with this very simple test, 9.2.0.4 wouldn't give me any rows back.
select column_value from table(sys.DBMS_DEBUG_VC2COLL('a','b','c'))
start with column_value = 'a'
connect by (1=1)
Adding the dual seems to materialize the results from the function call. It may not be needed in later patches or releases. I've only tested this in Oracle 9.2.0.4. I believe that it is definately not usable pre-9i.
The TABLE_NAME parameter to DBMS_XPLAN.DISPLAY is size limited to 32 characters.
To get the hash_value in, plus the TABLE function and DUAL, the name of the function doing the work has to be 8 characters or less. In most cases, you won't be able to use a SCHEMA.F_PT (or even make the F_PT a more meaningful name). As such to make this available to multiple users, you'll want either synonyms or to use
ALTER SESSION SET CURRENT_SCHEMA....
Tom Kyte's Effective Oracle by Design (and probably asktom.oracle.com) gives a mechanism for this, but it requires a view in a schema with an explicit grant on SYS.V_$SQL_PLAN.
If you've got a nice DBA who will set that up, great.
If not, but you still have access to V$SQL_PLAN, and privileges for creating types and functions, here's another mechanism.
First it creates a record type matching the plan_table, then a collection of that record type.
create or replace type rec_plan_table is object (
Statement_Id VARCHAR2(30) ,
Timestamp DATE ,
Remarks VARCHAR2(80) ,
Operation VARCHAR2(30) ,
Options VARCHAR2(30) ,
Object_Node VARCHAR2(128) ,
Object_Owner VARCHAR2(30) ,
Object_Name VARCHAR2(30) ,
Object_Instance NUMBER(38) ,
Object_Type VARCHAR2(30) ,
Optimizer VARCHAR2(255) ,
Search_Columns NUMBER(38) ,
Id NUMBER(38) ,
Parent_Id NUMBER(38) ,
Position NUMBER(38) ,
Cost NUMBER(38) ,
Cardinality NUMBER(38) ,
Bytes NUMBER(38) ,
Other_Tag VARCHAR2(255) ,
Partition_Start VARCHAR2(255) ,
Partition_Stop VARCHAR2(255) ,
Partition_Id NUMBER(38) ,
Other VARCHAR2(2000) ,
Distribution VARCHAR2(30) ,
Cpu_Cost NUMBER(38) ,
Io_Cost NUMBER(38) ,
Temp_Space NUMBER(38) ,
Access_Predicates VARCHAR2(4000) ,
Filter_Predicates VARCHAR2(4000) )
/
create or replace type tab_plan_table is table of rec_plan_table
/
Then there's a function which, using invoker rights and dynamic SQL to avoid the need for the explicit grant. This reads from v$sql_plan and returns the collection type. I've hardcoded the SCOTT user here. Replace that with the user owning the REC_PLAN_TYPE created above.
You can omit that if you are only ever going to run it from the user owning that object, but why restrict yourself ?
create or replace function f_pt (p_sql_hash in number) return tab_plan_table
authid current_user is
c_1 sys_refcursor;
v_return tab_plan_table;
begin
-- YOU WILL NEEED TO PUT YOUR SCHEMA OWNER FOR THE REC_PLAN_TABLE OBJECT
-- IN PLACE OF SCOTT.
open c_1 for
'select '||
' SCOTT.rec_plan_table(''STATEMENT'', '||
' sysdate, to_char(hash_value)||''_''||child_number, '||
' operation, options, object_node, '||
' object_owner, object_name, 0 , '||
' '' '' , optimizer, search_columns, id, parent_id, position, '||
' cost, cardinality, bytes, other_tag, partition_start, '||
' partition_stop, partition_id, other, distribution, '||
' cpu_cost, io_cost, temp_space, access_predicates, '||
' filter_predicates) '||
' from v$sql_plan '||
' where hash_value = :b1 ' using p_sql_hash;
fetch c_1 bulk collect into v_return;
close c_1;
return v_return;
end;
.
/
You'll probably want to make public grants on the function and object type, and possibly a public synonym too.
You can then query as follows :
SELECT plan_table_output
from TABLE( dbms_xplan.display
( 'table(f_pt(2782527334)),dual','STATEMENT','serial' ) )
The number is the hash_value for the SQL owning the plan, which you can get from V$SQL.
The reason DUAL is in there is that I couldn't get it to work without it.
The query which DBMS_XPLAN runs behind the scenes involves a CONNECT BY. Unfortunately, as I tried with this very simple test, 9.2.0.4 wouldn't give me any rows back.
select column_value from table(sys.DBMS_DEBUG_VC2COLL('a','b','c'))
start with column_value = 'a'
connect by (1=1)
Adding the dual seems to materialize the results from the function call. It may not be needed in later patches or releases. I've only tested this in Oracle 9.2.0.4. I believe that it is definately not usable pre-9i.
The TABLE_NAME parameter to DBMS_XPLAN.DISPLAY is size limited to 32 characters.
To get the hash_value in, plus the TABLE function and DUAL, the name of the function doing the work has to be 8 characters or less. In most cases, you won't be able to use a SCHEMA.F_PT (or even make the F_PT a more meaningful name). As such to make this available to multiple users, you'll want either synonyms or to use
ALTER SESSION SET CURRENT_SCHEMA....
Subscribe to:
Posts (Atom)