查询所信息
select a.inst_id,
a.process,
a.sid,
a.serial#,
a.event,
a.status,
a.program,
a.machine,
connect_by_isleaf as isleaf,--是否为源头0否,1是
sys_connect_by_path(a.sid || '@' || a.inst_id, '<-') tree,--<-152@2<-153@2<-161@1 表示节点1的会话161是锁的源头
level as tree_level
from gv$session a
start with a.blocking_session is not null
connect by (a.sid || '@' || a.inst_id) = prior
(a.blocking_session || '@' || a.blocking_instance);
复制
锁源头的查杀方法:
1)通过isleaf进行刷选,直接查杀锁源头
select 'alter system kill session ''' || sid || '' || ',' || serial# || ',@' ||
inst_id || ''' immediate;' db_kill_session
from (select a.inst_id,
a.process,
a.sid,
a.serial#,
a.event,
a.status,
a.program,
a.machine,
connect_by_isleaf as isleaf, --是否为源头0否,1是
sys_connect_by_path(a.sid || '@' || a.inst_id, '<-') tree, --<-152@2<-153@2<-161@1 表示节点1的会话161是锁的源头
level as tree_level
from gv$session a
start with a.blocking_session is not null
connect by (a.sid || '@' || a.inst_id) = prior
(a.blocking_session || '@' || a.blocking_instance))
where isleaf=1
order by tree_level asc;
select inst_id, 'kill -9 ' || spid os_kill_session
from (select p.inst_id,
p.spid,
a.sid,
a.serial#,
a.event,
a.status,
a.program,
a.machine,
connect_by_isleaf as isleaf, --是否为源头0否,1是
sys_connect_by_path(a.sid || '@' || a.inst_id, '<-') tree, --<-152@2<-153@2<-161@1 表示节点1的会话161是锁的源头
level as tree_level
from gv$session a, gv$process p
where a.inst_id = p.inst_id
and a.paddr = p.addr
start with a.blocking_session is not null
connect by (a.sid || '@' || a.inst_id) = prior
(a.blocking_session || '@' || a.blocking_instance))
where isleaf = 1
order by tree_level asc;
复制
2)借助v$session 中的final_blocking_instance和final_blocking_session定位锁源头,语句如下:
select 'alter system kill session ''' || ss.sid || '' || ',' || ss.serial# || ',@' ||
ss.inst_id || ''' immediate;' db_kill_session
from gv$session s, gv$session ss
where s.final_blocking_session is not null
and s.final_blocking_instance = ss.inst_id
and s.final_blocking_session = ss.sid
and s.sid <> ss.sid
select p.inst_id, 'kill -9 ' || p.spid os_kill_session
from gv$session s, gv$session ss, gv$process p
where s.final_blocking_session is not null
and s.final_blocking_instance = ss.inst_id
and s.final_blocking_session = ss.sid
and ss.paddr = p.addr
and ss.inst_id = p.inst_id
and s.sid <> ss.sid
复制
高峰期谨慎编译业务对象
后台等待事件为library cache pin
当我们编译过程的时候,Oracle就会在这些对象的handle获得library cache lock,然后在这些对象的heap上获得pin,保证其他进程不会来更改这些对象。
dba_kgllock
kgllkuse:记录持有lock或pin的用户地址
kgllkhdl:记录handle的对象地址
编译对象出现会话堵塞时
select inst_id,sid,event,p1,p1text,p1raw,p2,p2text,p2raw from gv$session where wait_class<>'Idle';
复制
根据等待事件library cache pin获取p1 handle address 000000...f408;关联视图获取锁信息:
select s.sid,s.sql_id,s.event,dk.* from dba_kgllock dk,v$session s where s.saddr=dk.kgllkuse and kgllkhdl='0000...f408';
复制
上面并没有找到pin的持有者,kgllkreq 表示当前会话需要申请的锁模式,kgllkmod表示当前系统中持有的锁模式,由于该系统为RAC,各节点的内存结构不同,handle地址不能公用,因此我们需要定位出owner和object_name
在其他节点持有pin的会话:
select addr,indx,inst_id,kglhdadr,kglnaown,kglnaobj from x$kglob where kglhdadr='0000...f408';
复制
x$kglob为library cache object对象的视图
rac节点2根据object_name 查找对应的handle地址信息,:
select addr,indx,inst_id,kglhdadr,kglnaown,kglnaobj from x$kglob where kglnaobj='xx';
复制
再次查看锁情况
select s.sid,s.sql_id,s.event,dk.* from dba_kgllock dk,v$session s where s.saddr=dk.kgllkuse and kgllkhdl='xx';
复制
上面查询到的kglhdadr
模式2 共享锁堵塞了模式3排他锁的申请
undo闪回查询
select * from emp as of timestamp to_timestamp('2019-11-05 08:00:00','YYYY-MM-DD HH:MI:SS');
复制
LOGMINER挖掘
1)确定dml时间点日志信息,命令:
select t.thread#,t.sequence#,t.name from v$archived_log t where t.first_time>=to_date('2019-11-05 10:24:30' 'yyyy-mm-dd hh24:mi:ss')
and t.next_time<=to_date('2019-11-05 14:24:30' 'yyyy-mm-dd hh24:mi:ss')
复制
2)安装logminer包
sql>@$ORACLE_HOME/rdbms/admin/dbmslm.sql
复制
3)添加挖掘日志,添加命令:
execute dbms_logmnr.add_logfile(logfilename=>'/...dbf',options=>dbms_logmnr.new);
复制
继续添加:
execute dbms_logmnr.add_logfile(logfilename=>'/...dbf',options=>dbms_logmnr.addfile);
复制
第一个添加日志选项是new,后续添加选项是addfile
4)开启logminer
execute dbms_logmnr.start_logmnr(Options=>dbms_logmnr.dict_from_online_catalog);
复制
5)查询v$logmnr_contents 视图获取挖掘信息,命令:
select sql_redo from v$logmnr_contents where seg_owner='SCOTT';
复制
sql_redo用于记录当时DML的操作记录
select sql_undo from v$logmnr_contents where seg_owner='SCOTT';
复制
sql_undo,执行还原操作。
在oracle 11g 中,禁用DRM:
alter system set "_gc_policy_time"=0 scope=spfile;
复制
重启实例生效
如果不想完全禁用DRM,但是需要禁用read-mostly locking 或reader bypass的机制,可以使用如下命令:
alter system set "_gc_read_mostly_locking"=false scope=spfile;
alter system set "_gc_bypass_readers"=false scope=spfile;
复制
row cache lock 案例:
DOC ID:1453425.1 dba_extents 查询缓慢
exec dbms_stats.gather_table_stats('SYS','X$KTFBUE');
数据泵预估导出文件大小
expdp \'/ as sysdba\' schemas=soctt estimate_only=y
1)确保有足够的导出空间
2)字符集是否兼容
3)数据字典统计信息应确保准确,统计数据字典基表(物理存在的基表):
exec dbms_stats.gather_dictionary_stats();
统计基表动态表格的命令:
exec dbms_stats.gather_fixed_objects_stats();
4)低版本向上兼容,而高版本导入低版本需要注意是否正常。
5)导入12C及以上版本时,建议搭配transform=disable_archive_logging:Y参数,在归档模式下不生成归档日志。
cluster=n 建议关闭
1.zhs16gbk导入utf8
utf8 varchar2和char类型字段的存储长度是前者的1.5倍,因此我们只需要先导入表结构,修改字段的长度再导入数据即可
1)导入目标端表结构
impdp \'/ as sysdba\' directory=expdir content=metadata_only dumpfile=scott.dmp logfile=scott.log
复制
2)修改表格中varchar2和char类型的字符长度
select 'alter table ' || owner || '.' || table_name || ' modify (' || column_name || ' ' || data_type || '(' || (case when data_length>=2660 then 4000 else
ceil(data_length*1.5) end) || '));'
from dba_tab_columns where table_name='scott' and data_type in ('VARCHAR2','CHAR');
复制
3)最后导入表格数据
impdp \'/ as sysdba\' directory=expdir content=data_only dumpfile=scott.dmp logfile=scott.log
复制
2.导出部分数据
expdp \'/ as sysdba\' parfile=expdp.par
cat expdp.par
directory=expdir
parallel=4
CLUSTER=N
dumpfile=his %U.dmp
logfile=his.log
schemas=
('HIS','MEDIA')
query="where rownum<=5000"< span="">
复制
导出scott用户下每张表格20%的数据 sample=20
导出SCOTT用户下的所有表格,但只对大表emp抽取20%的数据
expdp \"/ as sysdba\" directory=expdir dumpfile=scoot.dmp logfile=scot.log schemas=scott sample=scott.emp:20
3.使用sqlfile获取源端信息
对于源端损坏或无法查询信息的极端情况,可以使用SQLFILE
目标端导入可以通过SQLFILE生成表空间创建语句,命令如下:
impdp \"/ as sysdba\" directory=impdir dumpfile=dump.dmp full=y sqlfile=expdp_sqlfile.sql
查看SQL语句定位表空间创建语句
对象重定向
impdp \"/ as sysdba\" directory=impdir dumpfile=expdp.dmp remap_schema=jason:yh remap_tablespace=tps_jason:tps_yh
目标端用户和表空间需要提前创建
将源端分区表导入目标端普通表中:
1)源端导出
2)目标端导入,使用SQLFILE预先生成建表语句
impdp \"/ as sysdba\" directory=impdir dumpfile=exp.dmp tables=tbs_objects sqlfile=tbs_objects.sql
获取SQLFILE语句,删除分区内容,并重命名为tbs_objects_bak,再目标端执行创建非分区表
3)将源端分区表导入目标端非分区表中
impdp \"/ as sysdba\" directory=impdir dumpfile=expdp.dmp tables=tps_objects remap_tables= tbs_objects: ts_objects_bak partition_options=merge
逻辑DG 滚动升级
案例:
LINUX 11.2.0.4单机+物理DG,首先将物理DG临时转换为逻辑DG
再在逻辑备库上执行升级,升级完成后,备库切换为主库,然后原主库切换为备库,再进行升级。
步骤:
1.生产库创建强制闪回点
2.物理DG转为逻辑DG
3.逻辑DG升级至12C,利用SQL APPLY 应用增量数据,直至与生产保持同步
4.第一次主备切换,逻辑DG变为生产,接管业务
5.原生产闪回至最初的闪回点,关闭数据库
6.用12C软件将原生产库启动到MOUNT,并转换为物理备库,同步数据
生产段前期准备:
1)生产库创建强制闪回点(需要提前开启闪回):
STARTUP MOUNT;
CREATE RESTORE POINT pre_upgrade guarantee flashback database;
为了回退需要,备份控制文件和在线日志文件
开启SQL APPLY ,主库必须是再最大可用或最大性能模式下运行
alter database set standby database to maximize availability;
alter database open;
复制
2)生产开启附加日志
alter database add supplemental log data(primary key ,unique index) columns;
3)DG端设置一个额外的目录,用于存放逻辑DG产生的归档
alter system set log_archive_dest_3='location=/oracle/arch valid_for=(standby_logfiles,standby_role)';
alter system set log_archive_dest_state_3=enable;
复制
4)检查不支持的数据类型表
select * from dba_logstdby_eds_supported;
execute dbms_logstdby.eds_add_table(schema_name,table_name);
复制
再生产库上捕捉不受SQL APPLY支持的事务,记录到DBA_LOGSTDBY_EVENTS表中:
EXECUTE DBMS_LOGSTDBY.APPLY_SET('MAX_EVENTS_RECORDED',DBMS_LOGSTDBY.MAX_EVENTS);
EXECUTE DBMS_LOGSTDBY.APPLY_SET('RECORD_UNSUPPORTED_OPERATIONS','TRUE');
复制
5)生成SQL APPLY需要的数据字典
从主库上获取备库所需的数据字典信息
EXECUTE DBMS_LOGSTDBY.BUILD;
LOGMNR会根据数据字典信息将redo转换为逻辑DG的SQL.
备端前期准备:
1)DG端创建闪回点
STARTUP MOUNT;
CREATE RESTORE POINT pre_upgrade guarantee flashback database;
2)将物理DG转换为逻辑DG
确认主备同步后,再备库上执行
ALTER DATABASE RECOVER MANAGED STANDBY DATABASE CANCEL;
ALTER DATABASE RECOVER TO LOGICAL STANDBY KEEP IDENTITY;
ADG需关闭,重启到MOUNT状态,如果卡住,则先启用实时应用,然后关闭再尝试。KEEP IDENTITY是为了保持DBID不变,这是11G的新特性
3)关闭逻辑DG的自动删除归档
SQL>EXECUTE DBMS_LOGSTDBY.APPLY_SET('LOG_AUTO_DELETE','FALSE');
4)逻辑DG启用SQL APPLY
ALTER DATABASE START LOGICAL STANDBY APPLY IMMEDIATE;
同时还需要备库重做日志
5)再DG端安装12C软件
6)关闭数据库,停止监听
ALTER DATABASE STOP LOGICAL STANDBY APPLY;
SHUTDOWN IMMEDIATE;
LSNRCTL STOP;
备库关闭监听后,主库无法将日志传输过来,可以再生产端临时停用日志传输
ALTER SYSTEM SET LOG_ARCHIVE_DEST_STATE_2=DEFER;
将逻辑DG升级至12C
1)执行preupgrade.jar升级预检。
export ORACLE_BASE=/oracle/ora11204 --用11g的jdk来执行
export ORACLE_HOME=$ORACLE_BASE/db_1
export ORACLE_SID=
shell>$old_oraclehome/jdk/bin/java -jar $new_oraclehome/rdbms/admin/preupgrade.jar file text dir=./precheck.log
复制
建议删除sec_case_sensitive_logon参数
删除EM组件可以减少升级停机时间(升级过程会自动删除,但是建议提前手动删除):
拷贝$ORACLE_HOME/rdbms/admin/emremove.sql 从12C目录 到11g oraclehome目录下
shell>emctl stop dbconsole
set echo on ;
set serveroutput on;
@emremove.sql
删除OLAP CATALOG再11g中,使用11G下的脚本
$ORACLE_HOME/olap/admin/catnoamd.sql
execute dbms_stats.gather_dictionary_stats;
2)收集数据字典统计信息
EXECUTE DBMS_STATS.GATHER_DICTIONARY_STATS;
EXECUTE DBMS_STATS.GATHER_FIXED_OBJECTS_STATS;
复制
3)确认物化视图都已经停止刷新
select o.name from sys.obj$ o,sys.user$ u.sys.sum$ s where o.type#=42 and bitand(s.mflags,8)=8;
4)确认数据文件不需要介质恢复,其不处于备份模式
SELECT * FROM V$RESTORE_FILE;
SELECT * FROM V$BACKUP WHERE STATUS!='NOT ACTIVE';
5)处理分布式事务
SELECT * FROM DBA_2PC_PENDING;如果查询有数据,则执行下面语句
SELECT LOCAL_TRAN_ID FROM DBA_2PC_PENDING;
EXECUTE DBMS_TRANSACTION.PURGE_LOST_DB_ENTRY('');
COMMIT;
6)创建DBLINK脚本
SELECT 'CREATE '||DECODE(U.NAME,'PUBLIC','public ')||'DATABASE LINK '||CHR(10)||DECODE(U.NAME,'PUBLIC',NULL,'SYS','',U.NAME||'.')|| L.NAME||CHR(10)
||'CONNECT TO ' ||L.USERID||' IDENTIFIED BY "'||L.PASSWORD||'" USING '''||L.HOST||''''||CHR(10)||';' TEXT FROM SYS.LINK$ L,SYS.USER$ U WHERE
L.OWNER#=U.USER#;
复制
7)清理回收站
PURGE DBA_RECYCLEBIN;
8)收集EM信息
shell>emdwgrd -save -sid tt -path home/oracle
降级回退的时候需要收集EM信息。
删除EM组件
9)修改必要参数
*.log_archive_dest_1='location=/oracle/ora11204/arch valid_for=(online_logfiles,all_roles)'
*.log_archive_dest_2='service=prod valid_for=(online_logfile,primary_role)'
*.log_archive_dest_3='location=/oracle/ora11204/arch/std valid_for=(standby_logfiles,standby_role)'
mkdir -p oracle/12c/admin/orcl/adump
复制
10)删除OLAP组件
@?/olap/admin/catnoamd.sql
select distinct(trunc(last_refresh)) from dba_snapshot_refresh_times;
select s.obj#,o.obj#,s.containerobj#,lastrefreshdate,pflags,xpflags,o.name,o.owner#,bitand(s.mflags,8) from obj$ o,sum$ s where
o.obj#=s.obj# and o.type#=42 and bitand(s.mflags,8)=8;
复制
11)检查拥有ADMINISTER DATABASE TRIGGER权限的用户,如果用户创建了数据库级别的触发器,则必须拥有ADMINISTER DATABASE TRIGGER权限
SELECT OWNER,TRIGGER_NAME FROM DBA_TRIGGERS WHERE BASE_OBJECT_TYPE='DATABASE' AND OWNER NOT IN (SELECT GRANTEE FROM DBA_SYS_PRIVS WHERE
PRIVILEGE='ADMINISTER DATABASE TRIGGER');
GRANT ADMINISTER DATABASE TRIGGER TO XXXX;
复制
12)升级APEX,为了减少升级时间,可以提前升级APEX,本例中没有提前升级,而是再数据库升级的过程中一起升级
MOS 1088970.1
13)升级数据库,用12C软件启动逻辑DG
startUP UPGRADE
shell>nohup dbupgrade -oracleHome oracle/12c/product/12.2.0/dbhome_1 &
14)重新打开数据库,刷新无效对象
create spfile from pfile;
startup
cd $ORACLE_HOME/rdbms/admin
$ORACLE_HOME/perl/bin/perl catcon.pl -n 1 -e -b utlrp -d '''.''' utlrp.sql
此时,dba_registry内所有组件的状态都从upgrade变为valid
与之前无效对象对比
select owner,object_name from dba_objects where status<>'VALID' and object_name not in (
select object_name from invalid_object_201911);
执行预检生成的修复脚本,包括重新收集x$基表和字典统计信息
@/home/oracle/precheck.log/postupgrade_fixups.sql
15) 复制tnsnames.ora和密码文件到12C环境
第一次主备切换
1)启用SQL APPLY 追平数据。逻辑DG再次启用SQL APPLY
ALTER DATABASE START LOGICAL STANDBY APPLY IMMEDIATE;
ALTER SESSION SET NLS_DATE_FORMAT='yy-mm-dd HH24:MI:SS';
SELECT SYSDATE,APPLIED_TIME FROM V$LOGSTDBY_PROGRESS;
复制
2)处理不受SQL APPLY 支持的表。正式关闭应用,确保生产库没有连接。
SELECT EVENT_TIMESTAMP,EVENT,STATUS FROM DBA_LOGSTDBY_EVENTS ORDER BY EVENT_TIMESTAMP;
如果业务表上有相关错误,如ORA-16226,ORA-16129 ,则需通过impdp将问题对象同步到逻辑DG上,
impdp \"/ as sysdba\" network_link=xx tables=scott.emp table_exists_action=truncate
3)主备切换
SELECT DATABASE_ROLE,SWITCHOVER_STATUS FROM V$DATABASE;
分别再生产库和备库上检查
之后将主库切换成逻辑DG
ALTER DATABSE COMMIT TO SWITCHOVER TO LOGICAL STANDBY;
这条命令会等事务完成
切换完成后,主备库DATABASE_ROLE都变成了逻辑备库,然后将原先的逻辑DG切换成主库
ALTER DATABASE COMMIT TO SWITCHOVER TO RPIMARY;
现在是由原逻辑DG 12C版本接管应用了
升级原生产库
1)闪回原生产,将原主库闪回到起初创建的还原点上
SHUTDOWN IMMEDIATE;
STARTUP MOUNT;
FLASHBACK DATABASE TO RESOTRE POINT PRE_GRADE;
SHUTDOWN IMMEDIATE;
复制
将原生产库$ORACLE_HOME/dbs目录下的密码文件和listener.ora,tnsnames.ora复制到新的oracle 12c目录中
2)准备参数文件
从当前生产(原逻辑DG)复制相关文件
3)用12C加载原生产库
4)将原生产库切换回物理备库
ALTER DATABASE CONVERT TO PHYSICAL STANDBY;
SHUTDOWN IMMEDIATE;
原生产库只需通过应用升级期间产生的归档日志即可完成之身的升级
STARTUP MOUNT;
RECOVER MANAGED STANDBY DATABASE DISCONNECT;
升级失败回退措施
直接用创建的闪回点闪回u至升级开始前的状态即可
startup MOUNT
FLASHBACK DATABASE TO RESTORE POINT PRE_GRADE;
SHUTDOWN IMMEDIATE;
STARTUP MOUNT;
RECOVER MANAGED STANDBY DATABASE DISCONNECT;
复制
xtts 迁移升级
源端AIX,11.2.0.4 目标端linuxx86 rac 19.3
前置条件
1.源端不能是windows
2.源数据库>=10.2.0.3
3.源端的compatible参数不能大于目标端
4.字符集需保持一致
5.db_block_size一致
6.目标端db_files参数必须大于源端
7.源端归档模式
8.源端的RMAN配置中,DEVICE TYPE DISK不能设置为COMPRESSED
9.源端的RMAN配置中,需要将其配置为BACKUP TYPE TO BACKUPSET
10.源端的RMAN配置中,default channel不能配置为SBT_TAPE
11.迁移的表空间数据文件必须在线
12.再11.2.0.4上使用V3版本,目标端操作系统首选linux
13.备份片不能放在ASM中
14.备份片存放的目录必须具有读写权限
15.排除系统表空间,避免冲突并检查业务表空间是否具有自包含
16.源端不能为物理备库或快照备库
如果目标最终的数据库版本为11.2.0.3或更低版本,则需要再目标系统上允许11.2.0.4实例,用于执行增量备份转换。
源端准备:
1)
生产库必须满足归档开启率略。
版本检查;
如果源端为11.2.0.3,需要安装p14192178_112030_Generic.zip,解决bug14192178,DOC ID 1459833.1
2)
数据信息统计
1.检查数据库时区
select dbtimezone from dual;
!date
2.检查数据库字符集
select * from nls_database_parameters where parameter like '%CHARACTERSET%';
3.检查目标端补丁
select 'opatch',comments from dba_registry_history;
4.检查目标端数据库组件安装情况
select comp_name from dba_registry;
5.检查源端是否使用了索引压缩
select index_name,table_name from dba_indexes where compression='ENABLE';
select owner,table_name from dba_tables where iot_type is not null;
6.检查源端是否存在同名数据文件
select substr(file_name,-6,2) from dba_data_files where tablespace_name='TBS_NAME' order by 1;
7.检查源端compatible参数
show parameter compatible
必须大于10.2
8.统计源端表空间
select distinct(tablespace_name) from dba_data_files;
9.统计源端需要传输的数据文件大小。
select d.file_id,d.tablespace_name,(select (sum(nb.bytes/1024/1024)) from dba_data_files nb
where nb.tablespace_name=d.tablespace_name) ts_size_m,
d.file_name,
(d.bytes/1024/1024) file_size_m,
(d.user_bytes/1024/1024) file_use_size_m from dba_data_files d
where d.tablespace_name not in ('SYSTEM','SYSAUX','UNDOTBS1','UNDOTBS2')
order by file_id;
复制
10.统计需要迁移的用户
alter session set nls_date_format='yyyy-mm-dd hh24:mi:ss';
select d.username,d.default_tablespace,d.temporary_tablespace,d.account_status,d.created from dba_users d
where d.account_status='OPEN' and d.username not like '%SYS%'
order by d.created desc;
select ''''||username||''',' from dba_users where username not like '%SYS%' and account_status not like 'EXPIRED%';
复制
11.用户权限的收集
drop table t_tmp_user_lhr;
create table t_tmp_user_lhr(id number,username varchar2(50),exec_sql varchar2(4000),create_type varchar2(20));
drop sequence s_t_tmp_user_lhr;
create sequence s_t_tmp_user_lhr;
sql>
begin
for cur in (select d.username,
d.default_tablespace,
d.account_status,
'create user ' || d.username || ' identified by ' ||
d.username || ' default tablespace ' ||
d.default_tablespace || ' temporary tablespace ' ||
d.temporary_tablespace || ';' CREATE_USER,
replace(to_char(dbms_metadata.get_ddl('USER',
D.USERNAME)),
chr(10),
'') create_user1
from dba_users d
where d.username in ('业务用户名')) loop
insert into t_tmp_user_lhr
(id, username, exec_sql, create_type)
values
(s_t_tmp_user_lhr.nextval, cur.username, cur.create_user, 'USER');
INSERT INTO t_tmp_user_lhr
(id, username, exec_sql, create_type)
select s_t_tmp_user_lhr.nextval,
cur.username,
case
when d.admin_option = 'YES' then
'GRANT ' || d.privilege || ' TO ' || d.grantee ||
' with grant option ;'
else
'grant ' || d.privilege || ' TO ' || d.grantee || ';'
end priv,
'dba_sys_privs'
from dba_sys_privs d
where d.grantee = cur.username;
insert into t_tmp_user_lhr
(id, username, exec_sql, create_type)
select s_t_tmp_user_lhr.nextval,
cur.username,
case
when d.admin_option = 'YES' then
'grant ' || d.granted_role || ' TO ' || d.grantee ||
' with grant option;'
else
'grant ' || d.granted_role || ' TO ' || d.grantee || ';'
end priv,
'dba_role_privs'
from dba_role_privs d
where d.grantee = cur.username;
insert into t_tmp_user_lhr
(id, username, exec_sql, create_type)
select s_t_tmp_user_lhr.nextval,
cur.username,
case
when d.grantable = 'YES' then
'grant ' || d.privilege || ' ON ' || D.owner || '.' ||
d.table_name || ' to ' || d.grantee ||
' with grant option ;'
else
'grant ' || d.privilege || ' ON ' || D.owner || '.' ||
d.table_name || ' to ' || d.grantee || ';'
end priv,
'dba_tab_privs'
from dba_tab_privs d
where d.grantee = cur.username;
end loop;
commit;
end;
/
select * from t_tmp_user_lhr;
select id,username,create_type,exec_sql from t_tmp_user_lhr where create_type not in ('USER');
复制
12.统计用户表格的规模
select d.owner,(sum(bytes/1024/1024)sizes_m from dba_segments d where d.owner in ('业务用户名')
and not exists (select 1 from dba_recyclebin b where b.object_name=d.segment_name and d.owner=b.owner)
group by d.owner order by sum(bytes) desc;
复制
13.统计用户对象的个数和类型
select d.owner,count(1) from dba_objects d where d.owner in ('业务用户名') and d.owner not in ('PUBLIC')
and not exists (select 1 from dba_recyclebin b where b.object_name=d.object_name and d.owner=b.owner)
group by d.owner order by count(1) desc;
select d.owner,d.object_type,count(1) from dba_objects d where d.owner in ('业务用户名') and d.owner not in ('PUBLIC')
AND not exists (select 1 from dba_recyclebin b where b.object_name=d.object_name and d.owner=b.owner)
group by d.owner,d.object_type
order by d.owner,count(1) desc;
复制
14.备份对象信息,以方便与无效对象进行比对
select owner,object_name,object_type,status from dba_objects where status<>'VALID' and owner in ('业务用户名');
select count(*) ,status from dba_objects where owner in ('业务用户名') group by status;
创建对比表格
create table invalid_object_201911 as select * from dba_objects where status<>'VALID';
查看业务用户的对象状态个数
select count(*) ,status from dba_objects where owner in ('业务用户名') group by status
15.检查无效索引
select owner,index_name,status from dba_indexes where status='UNUSABLE' order by 1,2;
select i.owner,i.index_name,p.partition_name,p.status from dba_ind_subpartitions p,dba_indexes i
where p.index_name=i.index_name and p.status='UNUSABLE' ORDER BY 1,2,3;
select i.owner,i.index_name,s.subpartition_name,s.status from dba_ind_subpartitions s,dba_indexes i where s.index_name=i.index_name and s.status='UNUSABLE'
ORDER BY 1,2,3;
复制
16.确定SYS及SYSTEM下是否存在业务对象。检查SYS和SYSTEM的重复对象
select object_name,object_type from dba_objects where (object_name,object_type) in (select object_name,object_type from dba_objects where owner='SYS') and
owner='SYSTEM';
select owner,segment_name,segment_type,tablespace_name from dba_segments where tablespace_name in ('SYSTEM','SYSAUX') and owner in ('业务用户名');
复制
17.确定平台字节序
select tp.platform_name,tp.endian_format from v$transportable_platform tp where tp.platform_name in ('Linux x86 64-bit','AIX-Based Systems (64-bit)');
复制
18.判断表空间是否具有自包含特性
execute sys.dbms_tts.transport_set_check('所有需要迁移的业务表空间',true);
select * from sys.transport_set_violations;
19.获取需要传输的表空间
set serveroutput on
sql>
declare
tsname varchar(30);
i number := 0;
begin
dbms_output.put('tablespaces=');
for ts in
(select tablespace_name from dba_tablespaces where tablespace_name not in ('SYSTEM','SYSAUX') and contents='PERMANENT'
order by tablespace_name)
loop
if (i!=0) then
dbms_output.put_line(tsname||',');
end if;
i := 1;
tsname := ts.tablespace_name;
end loop;
dbms_output.put_line(tsname);
dbms_output.put_line('');
end;
/
复制
20.检查兼容的高级队列
select owner,queue_table,recipients,compatible from dba_queue_tables where recipients='MULTIPLE' and compatible like '%8.0%';
21.检查基于XMLSCHEMA的XMLTYPE对象
select distinct owner from dba_xml_schemas;
select distinct p.tablespace_name from dba_tablespaces p,dba_xml_schemas x,dba_users u,all_all_tables t
where t.table_name=x.table_name and t.tablespace_name=p.tablespace_name and x.owner=u.username;
22.检查spatial空间组件对象
select owner,index_name from dba_indexes where ityp_name='SPATIAL_INDEX';
select owner,table_name,column_name from dba_tab_columns where data_type='SDO_GEOMETRY' and owner!='MDSYS' order by 1,2,3;
23.检查外部表
select distinct owner from dba_external_tables;
24.检查索引组织表
select distinct owner from dba_tables where iot_type is not null;
25.检查临时表
select owner,table_name from dba_tables where temporary='Y' and owner in ('业务用户名');
26.检查物化视图
select owner,count(*) from dba_mviews group by owner;
27.检查永久表空间
select t.tablespace_name tablespace_name,count(f.file_id),sum(f.bytes/1024/1024/1024) GB
from dba_tablespaces t,dba_data_files f where t.tablespace_name=f.tablespace_name and t.tablespace_name not in (根据需求排除)
and t.contents='PERMANENT' group by t.tablespace_name order by 2;
28.检查回收站
select count(*) from dba_recyclebin;
29.检查用户是否存在使用tstz字段
select c.owner || '.' || c.table_name || '(' || c.column_name || ') -' || c.data_type || ' ' col
from dba_tab_cols c,dba_objects o
where c.data_type like '%WITH TIME ZONE' AND C.owner=o.owner and c.table_name=o.object_name and o.object_type='TABLE'
order by col;
30.检查表空间是否加密
select tablespace_name,encrypted from dba_tablespaces;
31.检查是否存在加密字段
select * from dba_encrypted_columns;
32.检查opaque types类型字段
select distinct owner,data_type from dba_tab_columns where owner in ('业务用户名');
33.检查表空间和数据文件的状态
select tablespace_name,status from dba_tablespaces;
select status,online_status,count(*) from dba_data_files group by status,online_status;
34.比对新旧环境中的profile是否一致
select distinct (t.pro) from (
select s.profile pro,l.profile pro2
from dba_profiles@ttslink s,dba_profiles l
where s.profile=l.profile(+)) t where t.pro2 is null order by t.pro;
3.创建目录
再源端创建目录,用于指向源端数据文件所在的位置,
create directory sourcedir as '/dev';
grant all on directory sourcedir to public;
本案例数据文件是裸设备,所以写/dev
4.使用nfs配置共享文件夹
1)目标端配置
linux6 中开启nfs
service nfs start
chkconfig nfs on
linux7中开启nfs
systemctl start nfs.service
systemctl enable nfs.service
vi /etc/exports
/backup 130.36.21.89(rw,sync,all_squash)
shell>exportfs -a
复制
2)源端配置
mount -o cio,rw,bg,hard,nointr,rsize=32768,wsize=32768,proto=tcp,noac,vers=3,timeo=600 132.151.59.16:/backup/bak backup/bak
至此,源端和目标端均有/backup/bak目录了
再源端挂载目标端网络文件系统时,再目标端exports中需要指定源端IP,强烈建议挂载到二级目录
5.准备XTTS介质
rman-xttconvert_3.0.zip 上传到生产源端的/backup/bak目录下
6.编辑xtt.properties
tablespaces=列出表空间名 --需要迁移的表空间,以逗号隔开,必须为一行,可以使用第19步的SQL语句生成
platformid=6 --源端数据库平台参数,从v$database.platform_id获取
srcdir=源端数据文件所在目录
dstdir=目标端最终数据文件所在目录
srclink=目标端连接源端创建的DBLINK
dfcopydir=/backup/bak 源端RMAN备份目录
backupformat=/backup/bak 源端增量备份目录
stageondest=/backup/bak 目标端存放备份目录
storageondest=+data 目标端RMAN恢复目录
backupondest=+data 目标端增量恢复目录
asm_home=/u01/app/19.3/grid目标端ASM HOME目录
asm_sid=+asm1目标端ASM 实例名
7.获取目标端所需创建的表空间语句
set heading off feedback off trimspool on linesize 500
spool tts_create_ts.sql
prompt /*============================== */
prompt /*create user tablespaces */
prompt /*===========================================*/
select 'create tablespace ' || tablespace_name || ' datafile ' ||'''+data/orajf/'||tablespace_name||'.dbf'''||' size 10m autoextend on;'
from dba_tablespaces where tablespace_name not in ('SYSTEM','SYSAUX','USERS') AND contents='PERMANENT';
prompt /*============================== */
prompt /*create user temporary tablespaces */
prompt /*===========================================*/
select 'create temporary tablespace ' || tablespace_name || ' tempfile ' ||'''+data/orajf/'||tablespace_name||'.dbf'''||' size 10m autoextend on;'
from dba_tablespaces where tablespace_name not in ('TEMP') AND contents='TEMPORARY';
spool off
复制
目标端环境配置
1.安装集群创建数据库
原则上数据库的名称和生产一致,字符集保持一致,目标端开启归档策略
源端和目标端数据库service_names参数设置保持一致。
2.创建目标目录
create directory destdir as '+data/orajf/';
grant all on directory destdir to public;
3.创建DATA_PUMP_DIR
用于导入元数据
create or replace directory data_pump_dir as '/backup/bak';
grant all on directory data_pump_dir to public;
4.创建db_link
cat u01/app/oracle/product/19.3/db/network/admin/tnsnames.ora
xtts =
(description =
(address = (protocol = tcp)(host = 130.36.23.19)(port = 1521))
(connect_data =
(server = dedicated)
(servcie_name = orajf)
)
)
创建连接源端数据库的DBLINK
create public database link ttslink connect to system identified by zj130lt using 'xtts';
5.创建所需表空间
根据上面的SQL语句创建
6.目标端导入生产库用户及权限
impdp \'/ as sysdba\' directory=data_pump_dir logfile=dp_userimp01.log
network_link=ttslink full=y include=user,role,role_grant,profile,function
导入完成后,需要删除相应的表空间,需要明确的是,该操作是再目标端进行的,操作前请再三确认。
set heading off feedback off trimspool on linesize 500
spool tts_drop_ts.sql
prompt /*============================== */
prompt /*drop user tablespaces */
prompt /*=====================================*/
select 'drop tablespace ' || tablespace_name || ' including contents and datafiles;' from dba_tablespaces
where tablespace_name not in ('SYSTEM','SYSAUX','USERS') AND contents='PERMANENT';
prompt /*============================== */
prompt /*drop user temporary tablespaces */
prompt /*=====================================*/
select 'drop tablespace ' || tablespace_name || ' including contents and datafiles;' from dba_tablespaces
where tablespace_name not in ('TEMP') AND contents='TEMPORARY';
spool off
复制
7.删除目标端的USERS表空间
为了保证源端的USER表空间能够顺利传输过来,需要先删除目标端USERS表空间
alter database default tablespace sysaux;
drop tablespace users including contents and datafiles;
8.设置tmpdir
源端生产库export tmpdir=/opt/oracle/tmp
目标端新库export tmpdir=/home/oracle/tmp
数据同步
1.数据全量传输阶段
方法1.dbms_file_transfer 2.rman backup
增量阶段打开块跟踪,加快速度
alter database enable block change tracking using file '/rman/trace.log';
select status from v$block_change_tracking;
1)生成增量所需的文件
export tmpdir=/opt/oracle/tmp
export xttdebug=1
$ORACLE_HOME/perl/bin/perl xttdriver.pl -S
xttplan.txt
xttnewdatafiles.txt
新生成文件需要提前备份
2)手动RMAN复制的方式传输文件。源端查询以下语句,复制源端数据文件至NFS共享目录/backup/bak中:
select 'copy datafile '||''''||file_name||''''||' to '||''''||'/backup/bak/'||substr(file_name,instr(file_name,'/',-1)+1)||'.dbf'''||';' from
dba_data_files where tablespace_name not in ('SYSTEM','SYSAUX','UNDOTBS1');
3)目标端手动修改数据文件权限
chown grid.oinstall *.dbf
源端执行查询操作,目标端执行convert操作
select 'convert from platform '||''''||'AIX-Based Systems (64-bit)'||''''||'
parallelism 10'||' datafile '||''''||'/backup/bak/'||substr(name,instr(name,'/',-1)+1)|| '.dbf'||''''||' format '||''''||'+data/orajf/'||substr(name,instr(name,'/',-1)+1)||'.dbf'''||';' from v$datafile;
用下面这个语句
select 'convert from platform ' || '''' || 'AIX-Based Systems (64-bit)' || '''' || '
parallelism 10' || ' datafile ' || '''' || '/backup/bak/' ||
substr(name, instr(name, '/', -1) + 1) || '' || '''' || ' format ' || '''' ||
'+data/orajf/' || substr(name, instr(name, '/', -1) + 1) || '''' || ';'
from v$datafile;
复制
2.增量恢复
1)源端执行
检查增量备份的路径
源端执行增量备份
export tmpdir=/opt/oracle/tmp
export xttdebug=1
$ORACLE_HOME/perl/bin/perl xttdriver.pl -i (nohoup后台执行)
将以下源端文件复制或替换至目标端
源端:/opt/oracle/tmp
目标端:/home/oracle/tmp/
xttnewdatafiles.txt,xttplan.txt,tsbkupmap.txt
2)目标端前滚
修改增量备份片权限
chown grid.oinstall *_1_1
chmod 775 *_1_1
目标端增量应用日志
export tmpdir=/home/oracle/tmp
export xttdebug=1
$ORACLE_HOME/perl/bin/perl xttdriver.pl -r
可通过查看数据文件的时间来确定增量是否成功
3)在源端确认下一个增量的SCN
export tmpdir=/home/oracle/tmp
export xttdebug=1
$ORACLE_HOME/perl/bin/perl xttdriver.pl -s
继续轮询增量恢复
正式迁移
1.最后一次增量
1)将源端表空间设置为只读,在生产端执行:
set heading off feedback off trimspool on linesize 500
spool tts_tsro.sql
prompt *============================== */
prompt *mark all user tablespaces read only */
prompt *============================== */
select 'alter tablespace ' ||tablespace_name ||' read only;'
drom dba_tablespaces where tablespace_name not in ('SYSTEM','SYSAUX','UNDOTBS1') and contents='PERMANENT';
spool off
在源端执行以上获取到的命令
检查表空间是否只读
select tablespace_name,status,contents from dba_tablespaces;
2)停止业务进行最后一次增量备份,在源端执行:
export tmpdir=/home/oracle/tmp
export xttdebug=1
$ORACLE_HOME/perl/bin/perl xttdriver.pl -i
最后一次增量结束之后关闭块跟踪:
alter database disable block change tracking;
复制文件
源端目录:/opt/oracle/tmp
目标端目录:/home/oracle/tmp
xttnewdatafiles.txt
xttplan.txt
tsbkupmap.txt
3)目标端最后一次前滚,修改权限
chown grid.oinstall *_1_1
chmod 766 *_1_1
目标端增量应用日志
export tmpdir=/home/oracle/tmp
export xttdebug=1
$ORACLE_HOME/perl/bin/perl xttdriver.pl -r
2.元数据同步
1)确认生产是否新增数据文件
select name,creation_time from v$datafile order by creation_time asc;
2)元数据同步
目标端利用脚本生成元数据同步的导入语句xttplugin.txt
export tmpdir=/home/oracle/tmp
export xttdebug=1
$ORACLE_HOME/perl/bin/perl xttdriver.pl -e
备份生成的xttplugin.txt 文件,并自定义修改
以下是笔者的修改相:
directory=data_pump_dir
exclude=STATISTICS
logfile=...
network_link=ttslink
transport_tablespaces和transport_datafiles信息可以通过以下脚本获取
获取transport_tablespaces的命令:
set serveroutput on
declare
tsname varchar(30);
i number := 0;
begin
dbms_output.put('tablespaces=');
for ts in
(select tablespace_name from dba_tablespaces where tablespace_name not in ('SYSTEM','SYSAUX') and contents='PERMANENT'
order by tablespace_name)
loop
if (i!=0) then
dbms_output.put_line(tsname||',');
end if;
i := 1;
tsname := ts.tablespace_name;
end loop;
dbms_output.put_line(tsname);
dbms_output.put_line('');
end;
/
复制
获取transport_datafile的命令:
set serveroutput on
declare
fname varchar(513);
i number := 0;
begin
dbms_output.put('transport_datafile=');
for df in
(select file_name from dba_tablespaces a,dba_data_files b where a.tablespace_name=b.tablespace_name and
a.tablespace_name not in ('SYSTEM','SYSAUX') and contents='PERMANENT'
order by a.tablespace_name)
loop
if (i!=0) then
dbms_output.put_line('''+data/zwhis/'||substr(fname,instr(fname,'/',-1)+1)||''', ');
end if;
i := 1;
fname := df.file_name;
end loop;
dbms_output.put_line('''+data/zwhis/'||substr(fname,instr(fname,'/',-1)+1)||'''');
dbms_output.put_line('');
end;
/
复制
在目标端执行修改之后的脚本,结束后检查目标端的表空间和数据文件状态,
select tablespace_name,status,file_name from dba_data_files;
select name,status from v$datafile;
3)验证数据是否有坏块
rman>validate tablespace 列出传输的表空间 check logical;
4)将目标端表空间设置成read write
select 'alter tablespace '||tablespace_name|| ' read write;' from dba_tablespaces
where tablespace_name not in ('SYSTEM','SYSAUX','UNDOTBS1')
AND contents='PERMANENT';
最后检查表空间状态
select tablespace_name,status,contents from dba_tablespaces;
5)同步临时表
使用impdp的方式同步元数据时,临时表数据时不会自动同步的,需要在目标库中手动创建临时表
获取生产端建表语句
select 'select dbms_metadata.get_ddl(' || ''''||'TABLE'||''''||','||''''||table_name||''''||','||''''||owner||''''||') from dual;'
from dba_tables where owner in ('业务用户名') and temporary='Y';
6)导入源库的权限
1.使用脚本导入
select exec_sql from t_tmp_user_lhr where create_type not in ('USER');
2.使用impdp导入
impdp \'/ as sysdba\' directory=data_pump_dir logfile=dp_userimp01.log network_link=ttslink full=y include=GRANT
7)导入dblink对象
impdp \'/ as sysdba\' directory=data_pump_dir logfile=dp_userimp03.log network_link=ttslink full=y include=DB_LINK
在SYS用户下,对DBLINK缺失问题的处理方法如下:
select dbms_metadata.get_ddl('DB_LINK','DBLINK_NAME','SYS') FROM DUAL;
8)导入其他对象
impdp \'/ as sysdba\' directory=data_pump_dir logfile=dp_userimp02.log network_link=ttslink full=y include=index,function,view,synonym,package_body,sequence,
package,job,trigger,procedure,type
9)编译无效对象
SQL>@?/rdbms/admin/utlrp.sql
10)修改默认表空间
alter database default tablespace users;
11)导入目标端的统计信息
impdp \'/ as sysdba\' directory=data_pump_dir logfile=dp_userimp01.log network_link=ttslink full=y include=STATISTICS
3.数据比对
1)对象数量比对
select d.owner,count(1) from dba_objects d
where d.owner in ('业务用户名') and d.owner not in ('PUBLIC') AND NOT EXISTS (SELECT 1 FROM dba_recyclebin b where b.object_name=d.object_name and d.owner=b.owner)
group by d.owner order by owner desc;
2)对象类型比对
select d.owner,d.object_type,count(1) from dba_objects d where d.owner in ('业务用户名') and NOT EXISTS (SELECT 1 FROM dba_recyclebin b where b.object_name=d.object_name and d.owner=b.owner)
group by d.owner,d.object_type order by d.owner,count(1) desc;
3)账号权限和同义词验证
select synonym_name from dba_synonyms where owner in ('业务用户名');
4)检查数据文件头状态
select status,error,tablespace_name from v$database_header;
5)表空间校验
确认owner用户的DEFAULT_TABLESPACE,TEMPORARY_TABLESPACE,以及所有用户在相关表空间上的配额情况,将之前创建的owner用户的默认表空间修改为正确的默认表空间。
比对清单
测试数据比对
迁移列表的表空间数量比对
schema数量比对
对象数比对
表记录数比对
包,函数,存储过程,索引等状态的比对
权限比对
同义词比对
临时表数量比对
迁移失败回退
只 需将原生产库表空间置换为read write,源端job进程调整为原值,并启动源端监听即可
为避免触发JOB,建议两端禁用JOB
alter system set job_queue_processes=0 scope=both sid='*';
开启
alter system set job_queue_processes=原值 scope=both sid='*';
OGG迁移升级案例
10.2.0.5 rac aix --> x86 11.2.0.4 rac 停机时间1个小时。
1.环境准备
2.搭建OGG,同步数据
3.前期数据比对任务配置及初次比对
4.正式迁移
源端环境准备
同上面的数据信息统计脚本,统计需要同步的对象,以及搭建目标库所需的信息,比如字符集,表空间等。同时收集源端用户权限。在此基础上还需打开源端数据库附加日志,强制日志
1.启用数据库补充日志
select supplemental_log_data_min from v$database;
alter database add supplemental log data;
2.启用数据库强制日志
select force_logging from v$database;
alter database force logging;
3.打开OGG参数
从11.2.0.4或更高版本开始,要使用OGG,需要启用一个新参数
alter system set enable_ogg_replication=true scope=both;
4.创建OGG管理用户
create tablespace odc_tps datafile '+data/orcl/odc01.dbf' size 100m autoextend on ;
create user odc identified by odc default tablespace odc_tps;
grant connect to odc;
grant alter any table to odc;
grant alter session to odc;
grant create session to odc;
grant flashback any table to odc;
grant select any dictionary to odc;
grant select any table to odc;
grant resource to odc;
grant dba to odc;
复制
安装后,可以收回DBA权限,但要赋予UNLIMITED TABLESPACE权限
grant unlimited tablespace to odc;
alter user odc quota unlimited on odc_tps;
如果在11.2.0.4或更高版本数据库构建OGG用户,则可以使用DBMS_OGG_AUTH.GRANT_ADMIN_PRIVILEGE
源端OGG用户需要赋权:dbms_goldengate_auth.grant_admin_privilege(‘C##GGADMIN’,container=>‘all’),同时建议将ogg的用户设置赋权为:grant dba to c##ogg container=all;
exec dbms_goldengate_auth.grant_admin_privilege('ogg','*',TRUE);
http://otn.oracle.com
https://edelivery.oracle.com
3)配置OGG环境变量
AIX 时LIBPATH,HP-UX IA64上是SHLIB_PATH
LINUX
export LD_LIBRARY_PATH=/odc/oggsoft:$ORACLE_HOME/lib:$LD_LIBRARY_PATH
4)创建子目录
./ggsci
ggsci>create subdirs
5)创建并启动MRG进程
GGSCI>edit param mgr
port 7809
dynamicportlist 7800-7810
purgeoldextracts ./dirdat/bt*,usecheckpoints,minkeephours 24
autorestart extract * retries 10 waitminutes 10
GGSCI>START MGR
6.创建捕获进程并启动
必须在数据初始化之前就启动捕获进程,并确认可以正常捕获数据的变更。
1)添加表级别附加日志,OGG用户登录数据库
GGSCI>dblogin userid odc password odc
add trandata hr.*
info trandata hr.*
上面的意思是用户HR下所有的表都添加了附加日志
2)创建捕获进程
add extract e_hr,tranlog,begin now
add exttrail ./dirdat/bt,extract e_hr --bt:trail文件的前缀为bt
3)配置捕获进程的参数文件
GGSCI>edit param e_hr
extract e_hr
userid odc,password odc
setenv (ORACLE_HOME="/u01/....11.2.0/db_1")
setenv (ORACLE_SID="orcl")
tranlogoptions dblogreader --DBLOGREADER选项,该捕获进程将使用11.2.0.2及更高版本中新的可用的API
exttrail ./dirdat/bt
table hr.*;
4)启动或停止捕获进程
GGSCI>start extract e_hr
7.创建传输进程并启动
GGSCI>add extract p_hr,exttrailsource ./dirdat/bt --指定源端前缀为bt的跟踪文件为数据源
add rmttrail ./dirdat/rt,extract p_hr --在目标数据库的指定目录上创建一个前缀为rt的trail文件
2)配置捕获进程参数文件
extract p_hr
passthru
rmthost 192.168.238.57,mgrport 7809
rmttrail ./dirdat/rt
table hr.*;
GGSCI>start extract p_hr
目标端环境准备
1.数据初始化
expdp "'"/ as sysdba"'" directory=exports dumpfile="expdp_%U.dmp" logfile="exp.log" parallel=4 schema=hr flashback_scn=....
导入命令
impdp "'"/ as sysdba"'" directory=imports dumpfile="expdp_%U.dmp" logfile="imp.log" parallel=4 schema=hr
按需求导出数据,然后在目标端导入数据,完成数据的初始化。
数据初始化方法
1.rman 建议同平台环境进行初始化
2.expdp 对主库性能影响较大,只支持10g以后的数据库
3.ogg initial load 跨数据库平台,如SQL SERVER到O
2.创建复制进程并启动
GGSCI>dblogin userid odc password odc
add checkpointtable
add replicat r_hr,exttrail ./dirdat/rt
配置复制进程参数文件
replicate r_hr
setenv (ORACLE_HOME="/u01/....db_1")
setenv (ORACLE_SID="orcl")
userid odc,password odc
assumetargetdefs
map hr.*,target hr.*;
在旧版本的OGG中,两端表结构相同,需要使用参数assumetargetdefs,如果元数据不同,则需要替换为支持映射的sourcedef文件
start replicat p_hr [aftercsn ||atscn ][SCN]
数据比对安装配置
veridata 12c(12.2.1.4),步骤:
1.安装JDK1.8或更高版本
2.安装ORACLE WEBLOGIC SERVER 12C版本
3.为VERIDATA信息库安装受支持的数据库ORACLE或SQL SERVER
4.安装VERIDATA服务
5.安装OGG VERIDATA代理
6.使用RCU创建OGG VERIDATA信息库
7.使用配置向导创建weblogic server域
8.启动OGG VERIDATA服务
9.启动OGG VERIDATA代理
https://docs.oracle.com/en/middleware/goldengate/veridata/12.2.1.4/gvdis/index.html
veridata配置步骤:
1)创建数据库连接
2)创建组
3)配置比较对
4)创建job
5)执行作业
6)查看报告
OGG检查
info all
info 进程名
3.对象检查
检查新老环境的对象是否一致
select d.owner,d.object_type,count(*) from dba_objects@old d
where d.owner in () and d.object_type not in ('TRIGGER','JOB','VIEW','LOB')
AND object_name not like 'MLOG%' AND NOT EXISTS (SELECT *
FROM dba_recyclebin@old b where b.object_name=d.object_name and d.owner=b.owner)
group by d.owner,d.object_type
minus
select d.owner,d.object_type,count(*) from dba_objects d
where d.owner in () and d.object_type not in ('TRIGGER','JOB','VIEW','LOB')
AND object_name not like 'MLOG%' AND NOT EXISTS (SELECT *
FROM dba_recyclebin b where b.object_name=d.object_name and d.owner=b.owner)
group by d.owner,d.object_type
复制
4.索引检查
select table_owner,table_name,count(*) from dba_indexes@old where owner in () group by table_owner,table_name
minus
select table_owner,table_name,count(*) from dba_indexes where owner in () group by table_owner,table_name
复制
根据具体的表进一步比对索引
select index_owner,index_name,table_owner,table_name,listagg(to_char(column_name),',') within group(order by index_name) as
full_column
from dba_ind_columns@old
where table_owner='' and table_name=''
group by index_owner,index_name,table_owner,table_name
minus
select index_owner,index_name,table_owner,table_name,listagg(to_char(column_name),',') within group(order by index_name) as
full_column
from dba_ind_columns
where table_owner='' and table_name=''
group by index_owner,index_name,table_owner,table_name
复制
5.视图检查
select v.owner,v.view_name,b.status from dba_views@old v,dba_objects@old b
where v.owner in () and v.view_name=b.object_name
minus
select v.owner,v.view_name,b.status from dba_views v,dba_objects@old b
where v.owner in () and v.view_name=b.object_name
若缺少视图,则在源端获取DDL后在目标端重建
select sys.dbms_metadata.get_ddl(view,'视图名','用户') from dual;
6.dblink检查
select owner,object_name,status from dba_objects@old where object_type='DATABASE LINK'
minus
select owner,object_name,status from dba_objects where object_type='DATABASE LINK'
若dblink缺失,则采用重导的方式修复差异,
expdp "'"/ as sysdba"'" include=db_link directory=..dumpfile=...dmp logfile=...full=y
impdp "'"/ as sysdba"'" directory= dumpfile= logfile=...log
7.函数检查
select distinct s.name,s.owner,b.status from dba_objects@old s,dba_objects@old b
where s.name=b.object_name and b.object_type='FUNCTION' and s.owner in ()
minus
select distinct s.name,s.owner,b.status from dba_objects s,dba_objects b
where s.name=b.object_name and b.object_type='FUNCTION' and s.owner in ()
8.过程检查
select distinct s.name,s.owner,b.status from dba_source@old s,dba_objects@old b
where s.name=b.object_name and b.object_type='PROCEDURE' and f.owner in ()
minus
select distinct s.name,s.owner,b.status from dba_source s,dba_objects b
where s.name=b.object_name and b.object_type='PROCEDURE' and f.owner in ()
复制
9.包检查
select distinct s.name,s.owner,b.status from dba_objects@old s,dba_objects@old b
where s.name=b.object_name and b.object_type='PACKAGE' and s.owner in ()
minus
select distinct s.name,s.owner,b.status from dba_objects s,dba_objects b
where s.name=b.object_name and b.object_type='PACKAGE' and s.owner in ()
select distinct s.name,s.owner,b.status from dba_objects@old s,dba_objects@old b
where s.name=b.object_name and b.object_type='PACKAGE BODY' and s.owner in ()
minus
select distinct s.name,s.owner,b.status from dba_objects s,dba_objects b
where s.name=b.object_name and b.object_type='PACKAGE BODY' and s.owner in ()
SELECT sys.dbms_metadata.get_ddl('PACKAGE','对象名','用户名') from dual;
SELECT sys.dbms_metadata.get_ddl('PACKAGE BODY','对象名','用户名') from dual;
复制
10.同义词检查
select s.*,b.status from dba_synonyms@test s,dba_objects@test b
where s.synonym_name=b.object_name and b.object_type='SYNONYM' and s.owner in ()
minus
select s.*,b.status from dba_synonyms s,dba_objects b
where s.synonym_name=b.object_name and b.object_type='SYNONYM' and s.owner in ()
复制
11.物化视图检查
select * from dba_mviews
正式迁移
1.老生产端停止业务
cd oracle/crs10g/bin/
./srvctl status nodeapps -n histdb01
./srvctl status nodeapps -n histdb02
./srvctl stop listener -n histdb01
./srvctl stop listener -n histdb02
将监听端口修改为1523,防止写物理IP的应用 连入
listener.ora
并重启数据库
ps -ef|grep "LOCAL=NO"|grep -v grep |awk '{print $2}'|xargs kill -9
2.新老环境停止OGG同步
老生产端多次切换归档后,即可停止生产端OGG进程
alter system switch logfile;
alter system archive log current;
alter system checkpoint;
目标端确认跟踪文件全部应用后,停止OGG
3.老生产端停止作业
alter system set job_queue_processes=0 sid='*';
4.重建触发器,序列和作业
expdp "'"/ as sysdba"'" include=trigger directory= dumpfile=trigger.dmp logfile= schemas='用户'
删除目标库的触发器:
select 'drop trigger '|| '"'||owner || '"'||'.'||trigger_name||';' from dba_triggers where owner in ();
目标端导入触发器
impdp "'"/ as sysdba"'" include=trigger directory= dumpfile=trigger.dmp logfile=
重建序列
expdp "'"/ as sysdba"'" include=sequence directory= dumpfile=trigger.dmp logfile= schemas='用户'
select 'drop sequence '|| '"'||sequence_owner || '"'||'.'||sequence_name||';' from dba_sequences where owner in ();
impdp "'"/ as sysdba"'" include=sequence directory= dumpfile=trigger.dmp logfile=
重建作业
select 'exec dbms_job.remove('||JOB||');' from dba_jobs where log_user in ('用户')
impdp "'"/ as sysdba"'" directory= dumpfile=job.dmp logfile=..
5.比对数据
6.编译无效对象,收集统计信息
1)比对无效对象
select owner,object_name,object_type,status from dba_objects where status<>'VALID' AND owner not in ('SYSMAN','PUBLIC')
AND object_name not in (select object_name from odc.invalid_object_201911@old);
2)重编译无效对象
sql>@?/rdbms/admin/utlrp.sql
3) 最后一次收集统计信息
exec dbms_stats.gather_dictionary_stats;
7.新库设置作业
alter system set job_queue_processes=16 sid='*';
迁移失败的回退措施
打开反向同步,前期部署过反向同步的话
ggsci>alter 抽取进程 begin now
ggsci>start 抽取进程
ggsci>start 传输进程
不打开应用进程
若需要回退,则启动目标端抽取进程和传输进程,启动源端应用进程,确认增量数据追平后,即可停止各OGG进程,若IP发生了变更,则在修改IP后再打开数据库,并启用源端JOB,
禁用目标端JOB
如何保证数据一致性
1.oracle goldengate veridata
2.行数对比
这种方法需要再系统用户下创建对比表格
create table odc.row_count_stats
(schemaname varchar2(30 byte),
tablename varchar2(30 byte),
row_cnt_source number,
row_cnt_target number,
cnt_diff number);
创建连接源端数据库的DBLINK
create public database link ogglink
connect to odc identified by odc
using '(description =
(address=(protocol=tcp)(host=...)(port=1521))
(connect_data=
(server=dedicated)
(service_name=joe)
)
)';
将需要比对的用户和表插入新创建的TABLE_LIST表中
create table odc.table_list
(schemaname varchar2(30 byte),
tablename varchar2(30 byte));
插入对比表格,用户,表名
使用下面的命令比对数据是否一致
declare
v_schemaname varchar2(60);
v_tablename varchar2(60);
v_tarcount number(16) := 0;
v_srccount number(16) := 0;
v_sql1 varchar2(2000);
v_sql2 varchar2(2000);
v_sql3 varchar2(2000);
v_cnt_diff number(16) :=0;
cursor cur_tablist is
select schemaname,tablename from odc.table_list;
begin
open cur_tablist;
loop
fetch cur_tablist
into v_schemaname,v_tablename;
exit when cur_tablist%notfound;
v_sql1:='select count(*) from ' ||v_schemaname||'.'||v_tablename||'';
execute immediate v_sql1 into v_tarcount;
v_sql2:='select count(*) from ' ||v_schemaname||'.'||v_tablename||'@ogglink';
execute immediate v_sql2 into v_srccount;
v_cnt_diff:=v_tarcount-v_srccount;
v_sql3:='insert into odc.row_count_stats(schemaname,tablename,row_cnt_source,row_cnt_target,cnt_diff) values('''||upper(v_schemaname)||''','''||v_tablename||''',' ||
v_srccount||','||v_tarcount||','||v_cntdiff ||')';
execute immediate v_sql3;
end loop;
close cur_tablist;
end;
/
复制
完成后,可以再row_count_stats表中生成报告,确定是否存在差异
3.使用DBMS_COMPARE包
这种方法慢,而且无法用于LOB和没有主键的表
强烈建议使用VERIADATA
迁移前后如何保证性能
数据库重放
1)再生产系统上将工作负载捕获到捕获文件中
2)将捕获文件复制到测试系统并进行预处理
3)再测试系统上重放生产系统的工作负载
4)获取重放分析报告
适用下面场景
1)数据库或操作系统升级
2)PDB级别整合或用户层面整合
3)配置更改,比如从单机转换为RAC
4)存储,网络更改
5)硬件环境迁移
1.负载捕获
1)还原测试环境
RMAN DUPLICATE
快照备库
数据泵导入导出
如果生产环境使用了DATABASE VAULT,则需拥有dbms_workload_capture和dbms_workload_replay 的授权
为了不影响生产系统,需对测试环境中的对象做处理:如数据库链接,外部表,目录对象,URLS,E-MAILS,强烈建议使用隔离专用网络
2)创建捕获目录
可以模拟几分钟时间的允许捕获测试,按比例推断出完全捕获所需要的空间大小
mkdir -p home/oracle/capdir
creat or replace directory capdir as '/home/oracle/capdir';
对于RAC环境,建议使用共享文件系统,也可以再每个实例节点上使用单独的目录,但最终需要合并各个实例下生成的文件,并传输到测试环境
3)创建过滤器(可选)
begin
dbms_workload_capture.add_filter(
fname=>'filter_user1',
fattribute=>'USER',
fvalue=>'JASON');
END;
/
用于过滤用户JASON的所有会话
fattribute:需要过滤的内容分为PROGRAM,MODULE,ACTION,SERVICE,INSTANCE_NUMBER和USER
fvalue具体的值
检查创建的过滤信息
select * from dba_workload_filters;
删除过滤信息
begin
dbms_workload_capture.delete_filter(fname=>'filter_user1');
end;
/
4)允许捕获程序
条件允许,建议重启数据库进行捕获,或以业务周期为单位进行捕获
部分内容无法再测试环境中进行重放,这种情况可以使用SQL*LOADER从外部文件进行数据加载,闪回查询,非SQL对象访问等操作。
1.开启捕获程序。
begin
dbms_workload_capture.start_capture(
name=>'pri_capture_1',
dir=>'capdir',
default_action=>'EXCLUDE',
duration=>NULL);
END;
/
exclude ,表示仅捕获过滤器过滤掉的内容,include 表示捕获过滤器包含的内容,也就是除去JASON用户之外的所有用户信息
null表示未指定时长,需要手动执行停止。
2.查看捕获状态
select id capture_id,name,directory,status,user_calls,transactions,awr_begin_snap,awr_end_snap from dba_workload_captures;
模拟客户端连接执行数据库操作,由于生产环境会产生实时业务数据,因此这里不要创建测试数据
3.停止捕获
begin
dbms_workload_capture.finish_capture;
end;
/
6.导出捕获期间的AWR报告
exec dbms_workload_capture.export_awr(capture_id=>16);
也可以通过awrrpt.sql获取
7.导出捕获期间的详细信息
set pagesize 0 long 30000000 longchunksize 2000 line 1000
spool pri_capture.html
select dbms_workload_capture.report(capture=>16,format=>'HTML') from dual;
spool off
2.初始化重放
1)传输捕获文件
mkdir -p home/ora19c/replay
create or replace directory db_replay_capture_dir as '/home/ora19c/replay';
节点1
scp -r home/oracle/capfile/* oracle@....:/home/oracle/replay/
节点2
scp -r home/oracle/capfile/inst* oracle@...:/home/oracle/replay/capfile/
2)加载捕获日志
exec dbms_workload_capture.process_capture('db_replay_capture_dir');
加载完成后,生成一个pp19.3.0.0的文件夹,用于记录数据库连接信息和执行数据等。
3.执行 重放
条件允许,建议使用单独的客户端,并将目标端重放目录下的所有文件发生到客户端
2) 评估重放客户端数量
shell>wrc mode=calibrate replaydir=/home/ora19c/replay
3)初始化重放数据
begin
dbms_workload_replay.initialize_replay(replay_name=>'replay',replay_dir=>'db_replay_capture_dir');
end;
/
4)重定向连接串和用户
初始化重放数据后,由于捕获文件中记录的客户端连接信息还是指向原生产端
select conn_id,capture_conn,replay_conn from dba_workload_connection_map;
begin
dbms_workload_replay.remap_connection(connection_id=>1,replay_connection=>'192..../ljw');
end;
/
如果目标端不存在和生产端相同的重放用户,就绪重定向操作了
select capture_user,replay_user from dba_workload_user_map;
begin
dbms_workload_replay.set_user_mapping(capture_user=>'PROD',replay_user=>'JASON');end;/
重定向完成后,将目标端initialized模式改为prepare replay
begin
dbms_workload_replay.prepare_replay(synchronization=>true);
end;
/
5)启动重放客户端
scp -r home/oracle/replay/* oracle@192...:/home/oracle/replay/
wrc jason/oracle@192../ljw mode=replay replaydir=/home/oracle/replay
执行完成后,等待开启重放客户端,如出现ORA-15552,ORA-155561 基本上是由于重定向未完成而导致
6)执行重放
begin dbms_workload_replay.start_replay();end;/
4.获取分析报告
1).导出重放期间的详细信息
set pagesize 0 long 30000000 longchunksize 2000 line 1000
spool pri_replay.html
select dbms_workload_replay.report(capture=>1,format=>'HTML') from dual;
spool off
2)导入捕获期间的AWR报告
select dbms_workload_capture.get_capture_info(dir=>'db_replay_capture_dir') capture_id from dual;
select dbms_workload_capture.import_awr(capture_id=>1,staging_schema=>'SYSTEM') from dual;
3)生产比对报告
declare
v_rlt clob;
v_replay_id1 number :=1;
v_replay_id2 number:=null;
v_snum number:=1;
v_length number;
v_char varchar2(32767);
begin
dbms_workload_replay.compare_period_report(replay_id1=>v_replay_id1,
replay_id2=>v_replay_id2,format=>'HTML',result=>v_rlt);
v_length:=dbms_lob.getlength(v_rlt);
while (v_snum<v_length) loop
v_char:=dbms_lob.substr(lob_loc=>v_rlt,amount=>32767,offset=>v_snum);
v_snum:=v_snum+32767;
dbms_output.put_line(v_char);
end loop;
end;
/
4)生产AWR性能对比报告
获取捕获端的dbid,begin_snap和end_snap
select awr_dbid,status,awr_begin_snap,awr_end_snap from dba_workload_captures;
获取重放端的信息
select dbid,status,awr_begin_snap,awr_end_snap from dba_workload_replays;
生成对比报告
select output from table(dbms_workload_repository.awr_diff_report_html(dbid1=>19373648,inst_num1=>1,bid1=>3215,eid1=>3216,
dbid2=>..,inst_num2=>1,bid2=>.,eid2=>3611));
5.收尾清理
select 'exec dbms_workload_capture.delete_capture_info('||id||')' from dba_workload_captures;
select 'exec dbms_workload_replay.delete_replay_info('||id||')' from dba_workload_replays;
sql性能分析SPA
数据库重放是迁移前后对整体工作负载可行性的评估,SPA是对SQL整体业务性能的评估。
工作流程
1)目标环境搭建
2)根据业务周期捕获生产端需要分析的SQL,并将期存储再SQL调优集(SQL TUNING SET,STS)中
3)将生产捕获到的SQL调优集打包传输到测试环境并导入
4)再测试环境上创建SQL性能分析任务
5)执行SQL调优集中的SQL语句,生成变更前的SQL执行信息。
6)执行系统变更(升级,迁移等)
7)重新执行SQL调优集中的SQL语句,生成变更后的SQL执行信息
8)比较和分析变更前后对SQL性能的影响,并生成集体的SQL性能评估报告。
9)调优性能下降的SQL
10)重复执行6-8,直到达到预期的SQL性能目标。
1.创建测试环境
通过RMAN物理同步的方式创建了一套与生产几乎相同的测试环境,并将数据库升级到19C以进行验证
2.采集SQL信息
可以从现有的AWR,SQL跟踪文件和现有的SQL调优集中导入需要的SQL语句
1)创建SPA用户
create user spauser identified by oracle;
grant connect,resource,dba to spauser;
grant advisor to spauser;
grant select any dictionary to spauser;
grant administer sql tuning set to spauser;
2)创建SQL调优集
begin dbms_sqltune.create_sqlset(sqlset_name=>'spa11g',sqlset_owner=>'spauser');end;
select owner,name,statement_count from dba_sqlset;
3。获取业务周期AWR快照点
select min(snap_id) min_id,max(snap_id) max_id from dba_hist_snapshot where end_interval_time between to_date('2020-02-21 00','yyyy-mm-dd hh24') and .....
order by 1;
4)SQL调优集加载数据。通过AWR报告中的SQL语句导入SQL调优集进行整体的SQL性能测试,
declare
own varchar2(30):='spauser';
bid number:='&begin_snap';
eid number:='&end_snap';
stsname varchar2(30):='spa11g';
stsowner varchar2(30):='spauser';
sts_cur dbms_sqltune.sqlset_cursor;
begin
open sts_cur for
select value(p) from table(dbms_sqltune.select_workload_repository(bid,eid,null,null,null,null,null,1,null,'ALL')) p;
dbms_sqltune.load_sqlset(sqlset_name=>stsname,populate_cursor=>sts_cur,load_option=>'MERGE',sqlset_owner=>stsowner);
end;
/
复制
3.获取并分析SQL对比信息
1)新建SQL性能分析任务
SQL>variable t_name varchar2(100);
exec :t_name:=dbms_sqlpa.create_analysis_task(sqlset_name=>'spa11g',task_name=>'spa_task',sqlset_owner=>'spauser');
2)获取升级前SQL的执行信息
begin
dbms_sqlpa.execute_analysis_task(task_name=>'spa_task',execution_name=>'exec_before_upgrade',exection_type=>'CONVERT SQLSET');
END;
/
执行11g 到19c的升级操作
3)获取升级后SQL的执行信息
begin
dbms_sqlpa.execute_analysis_task(task_name=>'spa_task',execution_name=>'exec_after_upgrade',execution_type=>'test execute');
end;
4.执行SQL性能分析对比
1)升级前后SQL性能的对比
对比升级前后SQL执行时间
begin
dbms_sqlpa.execute_analysis_task(task_name=>'spa_task',
execution_type=>'COMPARE PERFORMANCE',
execution_name=>'compare_elapsed_time',
execution_params=>dbms_advisor.arglist
('execution_name1','exec_before_upgrade','execution_name2','exec_after_upgrade','comparison_metric','elapsed_time'));end;/
复制
cpu时间
begin
dbms_sqlpa.execute_analysis_task(task_name=>'spa_task',
execution_type=>'COMPARE PERFORMANCE',
execution_name=>'compare_cpu_time',
execution_params=>dbms_advisor.arglist
('execution_name1','exec_before_upgrade','execution_name2','exec_after_upgrade','comparison_metric','cpu_time'));end;/
复制
逻辑读
begin
dbms_sqlpa.execute_analysis_task(task_name=>'spa_task',
execution_type=>'COMPARE PERFORMANCE',
execution_name=>'compare_buffer_gets_time',
execution_params=>dbms_advisor.arglist
('execution_name1','exec_before_upgrade','execution_name2','exec_after_upgrade','comparison_metric','buffer_gets'));end;/
复制
2)生成性能分析报告
alter session set events '31156 trace name context forever,level 0x400';
set long 9999999 longchunksize 100000 linesize 200 head off feedback off echo off
spool xx.html
select dbms_sqlpa.report_analysis_task('spa_task','HTML','TYPICAL','ALL',NULL,100,NULL,NULL,NULL) FROM DUAL;
SPOOL OFF
set trimspool on
set trim on
set pages 0
set long 999999999
set linesize 1000
复制
对比SQL执行时间,生成对比报告
select dbms_sqlpa.report_analysis_task('spa_task','HTML','ALL','ALL',top_sql=>300,execution_name=>'compare_elapsed_time') from dual;
select dbms_sqlpa.report_analysis_task('spa_task','HTML','ALL','ALL',top_sql=>300,execution_name=>'compare_cpu_time') from dual;
select dbms_sqlpa.report_analysis_task('spa_task','HTML','ALL','ALL',top_sql=>300,execution_name=>'compare_buffer_gets_time') from dual;
复制
获取错误信息
select dbms_sqlpa.report_analysis_task('spa_task','HTML','errors','summary') from dual;
获取不支持的对象
select dbms_sqlpa.report_analysis_task('spa_task','HTML','unsupported','all') from dual;
执行计划
alter session set statistics_level=all;
执行SQL
SELECT * from table(dbms_xplan.display_cursor(null,null,'advanced allstats last'));
alter session set statistics_level=typical;
复制
查看当前SQL再内存中的最后一次执行计划
select rpad('inst:'||v.inst_id,9)||' ' || rpad('child:'||v.child_number,11) inst_child,t.plan_table_output from gv$sql v,
table(dbms_xplan.display('gv$sql_plan_statistics_all',null,'advanced allstats last -projection -outline -note',
'inst_id=' || v.inst_id ||
' and sql_id='''|| v.sql_id||
''' and child_number=' ||v.child_number)) t
where v.sql_id='&sqlid' and v.loaded_versions>0;
复制
查看当前SQL再内存中的所有执行计划
select rpad('inst:'||v.inst_id,9)||' ' || rpad('child:'||v.child_number,11) inst_child,t.plan_table_output from gv$sql v,
table(dbms_xplan.display('gv$sql_plan_statistics_all',null,'advanced allstats last -Projection -Outline -Note',
'inst_id=' || v.inst_id ||
' and sql_id='''|| v.sql_id||
''' and child_number=' ||v.child_number)) t
where v.sql_id='&sqlid' and v.loaded_versions>0 AND v.exections>1;
复制
查看指定SQL的历史执行计划,包含记录再快照中的执行计划
select t.plan_table_output from (select distinct sql_id,plan_hash_value,dbid from dba_hist_sql_plan where sql_id='&sql') v,
table(dbsm_xplan.display_awr(v.sql_id,v.plan_hash_value,null,'advanced allstats')) t;
复制
统计信息收集
begin
dbms_stats.gather_table_stats(ownname=>'SCOTT',tabname=>'JASON',estimate_percent=>100,method_opt=>'for all columns size 1',
no_invalidate=>FALSE,cascade=>TRUE);
END;
/
复制
method_opt:表示列直方图收集,注意
列的基数:
select table_name,column_name,num_distinct cardinality,histogram,last_analyzed
from dba_tab_col_statistics where owner='' and table_name='';
列的选择性:
select /*+ NO_MERGE LEADING(a b) */ b.table_name,a.column_name,a.num_distinct cardinality,b.num_rows,
round(a.num_distinct*100/b.num_rows,1) selectivity from dba_tab_col_statistics a,dba_tables b where
a.owner=b.owner and a.table_name=b.table_name and a.owner='' and a.table_name='';
复制
创建索引的先决条件:基数较大,选择性大于15%,且谓词where条件中包含了该列。
手动刷新系统监控
查询需要创建索引的列
select owner,column_name,num_rows,cardinality,selectivity,'need create index on column'||column_name as suggest
from (select /*+ NO_MERGE LEADING(a b) */ b.owner,b.table_name,a.column_name,b.num_rows,a.num_distinct cardinality,
round(a.num_distinct*100/b.num_rows,1) selectivity from dba_tab_col_statistics a,dba_tables b where
a.owner=b.owner and a.table_name=b.table_name and a.owner='&1' and a.table_name='&2') where selectivity>=15
and column_name not in (select column_name from dba_ind_columns where table_owner='&1' and table_name='&2') and column_name in
(select c.name from sys.col_usage$ u,sys.obj$ o,sys.users$ oo,sys.col$ c where o.obj$=u.obj$
and oo.user#=o.owner# and c.obj#=u.obj# and c.intcol#=u.intcol# and oo.name='&1' and o.name='&2');
复制
统计信息陈旧
看dba_tab_statistics,stale_status 列YES表示陈旧,需收集统计信息
DML变化记录再视图dba_tab_modifications 当DML达到表格行数的10%,则被认为陈旧
获取表的统计信息
sql>var sql_id varchar2(40);
sql>exec :sql_id:='xxxxxxx';
sql>select /*+ NO_MERGE LEADING(pt s t m) */
s.owner,s.table_name,t.partitioned,s.partition_name,t.degree,t.temporary,s.num_rows,
to_char(s.last_analyzed,'yyyy-mm-dd/hh24:mi:ss') last_analyzed,
s.stattype_locked,s.stale_stats,case when s.num_rows>0 then
to_char(round((m.inserts+m.updates+m.deletes)*100/s.num_rows,1),'99999990D0') end stale_stats_perc
from gv$sql_plan pt,dba_tab_statistics s,dba_tables t,sys.dba_tab_modifications m
where pt.object_type='TABLE' and pt.object_owner=s.owner and pt.object_name =s.table_name and pt.object_type=s.object_type
and pt.sql_id=:sql_id and s.table_name not like 'BIN$%' And s.stale_stats='YES' and s.stattype_locked is null and s.owner=t.owner
and s.table_name=t.table_name and t.owner=m.table_owner(+) and t.table_name=m.table_name(+) and m.partition_name is null
order by s.table_name,s.owner;
复制
直方图就是为基数小且数据分布不均匀的列而生的;
method_opt参数
for all columns :统计所有列的直方图,不推荐
for all indexed columns 统计所有索引列的直方图
for all columns size 1 不搜集直方图
for all columns size repeat 仅在已经有直方图的列上进行收集
for all columns size auto ORACLE自行判断WHERE条件中出现的哪些列需要收集直方图
for columns col1,col2,col3....size skewonly 指定列收集直方图
某个查询语句因为带入变量值不同,而出现时快时慢的现象,很大一部分原因是数据存在严重倾斜,从而导致直方图不准确。
查询列的直方图信息
select /*+ NO_MERGE LEADING (a b) */
a.table_name,b.column_name,b.num_rows,a.num_distinct cardinality,round(num_distinct*100/num_rows,1) selectivity,a.histogram,a.num_buckets
from dba_tab_col_statistics a,dba_tables b where a.owner=b.owner and a.owner='' and a.table_name='' and a.table_name=b.table_name order by selectivity desc;
复制
11.2.0.4版本以上不需要关闭ACS 绑定变量窥探
_optimizer_adaptive_cursor_sharing =true
_optimizer_extended_cursor_sharing=udo
_optimizer_extended_cursor_sharing_rel=simple
以上是默认值
哪些列需要收集直方图
select /*+ NO_MERGE LEADING(a b) */
a.owner,a.table_name,a.column_name,b.num_rows,a.num_distinct,round(num_distinct*100/num_rows,1) selectivity,
'column'||a.column_name||'need gather histogram' notice
from dba_tab_col_statistics a,dba_tables b
where a.owner=b.owner and a.owner='' and a.table_name='' and a.table_name=b.table_name and round(num_distinct*100/num_rows,1)<1< span="">
and (a.owner,a.table_name,a.column_name) in
(select oo.name owner,o.name table_name,c.name column_name from sys.col_usage$ u,sys.obj$ o,sys.user$ oo,sys.col$ c
where o.obj#=u.obj# and oo.user#=o.owner# and c.obj#=u.obj# and c.intcol#=u.intcol# and oo.name='' and o.name='') and a.histogram='NONE';
复制
当初始化参数statistics_level被设置为typical或ALL时,默认会启用ORACLE中的表监控
SMON会定期刷新,也可手动发起
begin dbms_stats.flush_database_monitoring_info;
end;
/
收集SCOTT用户下的JASON表,但不搜集直方图
begin dbms_stats.gather_table_stats(ownname=>'SCOTT',
tabname=>'JASON',method_opt=>'for all columns size 1',
no_invalidate=>false,cascade=>true);end;/
收集SCOTT用户下JASON表,同时收集OWNER列的直方图
begin dbms_stats.gather_table_stats(ownname=>'SCOTT',
tabname=>'JASON',method_opt=>'for columns owner size skewonly',
no_invalidate=>false,cascade=>true);end;/
索引跳跃扫描的前提:
复合索引
前导列不在where条件中
前导列基数低
index skip scan,我们应该关注前导列基数,索引跳跃扫描不如索引范围扫描
全索引扫描,我们需要关注是否存在table access by index rowid回表操作,INDEX FULL SCAN是单快读,回表也是单块读,将严重影响性能,此时更应该进行全表扫描。
快速全索引扫描:索引列包含了SQL查询需要的所有数据,而不需要回表访问,且索引包含的列中至少有一列具有NOT NULL约束
,多块读
连接方式:
嵌套循环连接
执行计划中,驱动表在上,被驱动表在下。
示例:
select *+USE_NL(e d) */ e.first_name,e.last_name,e.salary,d.department_name from hr.employees e,hr.departments d
where d.department_name in ('marketing','sales') and e.department_id=d.department_id;
select * from table(dbms_xplan.display_cursor(null,null,'ALLSTATS LAST'));
强制执行使用嵌套循环 的连接方式:/* USE_NL(TABLE1,TABLE2) LEADING(TABLE1) */ 指定TABLE1为驱动表
被驱动表关联字段需要有索引
适合于OLTP系统
驱动表返回的行数要小
两表关联返回的数据要少
哈希连接:
select *+ USE_HASH(o l) */o.customer_id,l.unit_price*l.quantity from oe.orders o,oe.order_items l where l.order_id=o.order_id;
适用于等值连接返回大量数据,连接字段不需要索引
PGA SQL工作区域较小,且驱动表为大表时,容易出现性能问题
排序合并连接
两表非等值关联,数据源自身有序,不必额外执行排序操作。
1)两表根据关联列各自排序
2)在内存中进行合并处理
没有驱动表的概念
/*+USE_MERGE(TABLE1,TABLE2 */
查询转换
1.视图合并
视图合并并不一定总是能够提升性能,如有问题,可以禁用视图合并
select *+ no_merge(v) */ v.employee_id from employees_50_vw v where v.employee_id>150;
添加HINT后,执行计划中会出现视图view关键字,说明未发生视图合并
视图合并是有前提条件的,视图定义中不能出现(包括但不限于)union,union all,minus,rownum等
2.谓词推入
如果涉及的SQL未发生视图合并,那么优化器吧SQL中的视图或子查询当作一个独立的单元,将连接条件推入视图或子查询内部,
目的是尽可能过滤掉无用的数据,提升性能
当执行计划view id列部分带有*号,且谓词信息FILTER部分为外部过滤条件时,说明谓词没有推入到视图中。
一般情况下,常量谓词推入能提升性能。,HINT:push_pred(子查询或视图的别名)
谓词推入一定要注意因与连接字段的类型保持一致
子查询展开
优化器将嵌套查询转换成等效的join语句,然后优化整个SQL连接。
/*+ no_unnest */ 禁用子查询展开
/*+ unnest */ 消除filter
语句中出现exists和not exists(包括但不限于)时,容易产生filter
物化视图查询重写
create materialized view xx_mv enable query rewrite
as select .....
show parameter query_rewrite_enabled 检查系统查询重写参数是否开启
物化视图查询重写实际上是以空间换性能,因此对于查询较为频繁且性能较差的SQL,可以考虑使用物化视图
定位问题SQL
主机资源异常,找出排行靠前的进程
select p.inst_id,p.spid,a.sid,a.serial#,a.sql_id,a.event,a.status,a.program,a.machine from gv$session a,gv$process p where
a.inst_id=p.inst_id and a.paddr=p.addr and p.spid=xxxx;
复制
数据库整体性能下降,检查异常等待事件
select a.inst_id,a.process,a.sid,a.serial#,a.sql_id,a.event,a.status,a.program,a.machine from gv$session a where event ='db file scattered read';
复制
SQLHC sql健康检查
sqldx.sql 仅搜集信息而不进行任何修改操作。
sqlhc.sql 收集SQL相关数据字典信息
sqlhcxec.sql 执行SQL,再收集信息,会产生变更操作。
sql>@sqlhc.sql T &sqlid
T 调优 D诊断
sql profile 是查询中的辅助信息的集合,包括查询中引用的所有表和列
1)允许分析脚本
SQL>@coe_xfr_sql_profile.sql sqlid
2)输入所希望的执行计划哈希值
3)检查profile情况,查看具体的固化情况
select name,sql_text,status from dba_sql_profiles;
删除profile
exec dbms_sqltune.drop_sql_profile('xxxx');
使用SQL调优工具,sql tuning advisor
SQL>var tuning_task varchar2(100);
declare
l_sql_id v$session.prev_sql_id%TYPE;
l_tuning_task varchar2(30);
begin
l_sql_id:='&sql_id';
l_tuning_task:=dbms_sqltune.create_tuning_task(sql_id=>l_sql_id);
:tuning_task:=l_tuning_task;
dbms_sqltune.execute_tuning_task(l_tuning_task);
dbms_output.put_line(l_tuning_task);
end;
SQL>print tuning_task;
task_8233
复制
查看建议内容
select dbms_sqltune.report_tuning_task('task_8233‘) from dual;
2)接受SQL PROFILE
SQL>exec dbms_sqltune.accept_sql_profile(task_name=>'task_8233',replace=>TRUE,force_match=>true);
SQL计划管理
对于异常SQL自动捕获,则需要设置参数optimizer_capture_sql_plan_baselines为true ,默认为false;
alter system set optimizer_capture_sql_plan_baselines=true scope=spfile;
接下来演示将SQL执行计划手动加载到SQL计划基线中的优化案例
1)执行SQL语句
2)查找对应的SQL_ID和plan hash value
select sql_id,plan_hash_value from v$sql where sql_text like 'xxxx';
3)将以上SQL语句加载到SQL计划基线中
var n number
begin
:n:=dbms_spm.load_plans_from_cursor_cache(sql_id=>'xxx',plan_hash_value=>xxxx,fixed='NO',enabled=>'YES');
end;
/
复制
3) 确认
select sql_handle,plan_name,enabled,accepted,sql_text from dba_sql_plan_baselines where accepted='YES' order by last_modified;
4)加入HINT 改变执行计划,强制全表扫描
select *+ full(dept) */ * from scott.dept where deptno=10;
同上加载到SPM中
删除SQL执行计划
variable n number;
exec :n:=dbms_spm.drop_sql_plan_baseline(sql_handle=>'xxx',plan_name=>'xxx');
awrsqrpt.sql 或dbms_xplan获取执行计划
历史执行计划
select s.begin_interval_time,s.end_interval_time,q.snap_id,q.dbid,q.sql_id,q.plan_hash_value,q.optimizer_cost,q.optimizer_mode from
dba_hist_sqlstat q,dba_hist_snapshot s where q.sql_id='xxx' and q.snap_id=s.snap_id order by s.snap_id desc;
复制
查看该表的统计信息
select to_char(last_analyzed,'yyyy-mm-dd hh24:mi:ss'),num_rows,table_name,stale_stats from dba_tables where table_name='';
查看柱状图
select column_name,density,num_buckets,histogram from dba_tab_col_statistics where table_name='' adn column_name in ('xx');
删除该列的直方图
begin
dbms_stats.gather_table_stats(ownname=>'SCOTT',tabname=>'JASON',estimate_percent=>20,method_opt=>'for columns account_no size 1',
no_invalidate=>FALSE,cascade=>TRUE);
END;
/
复制
强制走某个索引
select *+ index(emp idx_name) */ ......
当表上存在多个过滤条件时,字段在表中的选择性只能作为参考,在实际工作中,,应根据业务特点对多个字段进行组合分析
大型表格更新删除优化
利用ROWID对每一行进行按区分片
select a.file_id,a.extent_id,a.block_id,a.blocks,' rowid between ' || '''' ||
dbms_rowid.rowid_create(1,b.data_object_id,a.relative_fno,a.block_id,0) || ''''|| ' and ' || '''' ||
dbms_rowid.rowid_create(1,b.data_object_id,a.relative_fno,a.block_id+blocks-1,999) || ''';'
from dba_extents a,dba_objects b where a.segment_name=b.segment_name and a.owner=b.owner and b.object_name='' and b.owner=''
order by a.relative_fno,a.block_id;
复制
有了以上的信息,我们只需要带入筛选的条件,使用匿名块批量删除即可。
SQL>declare
cursor cur_rowid is
select dbms_rowid.rowid_create(1,b.data_object_id,a.relative_fno,a.block_id,0) begin_rowid,
dbms_rowid.rowid_create(1,b.data_object_id,a.relative_fno,a.block_id+blocks-1,999) end_rowid,
from dba_extents a,dba_objects b where a.segment_name=b.segment_name and a.owner=b.owner and b.object_name='' and b.owner=''
order by a.relative_fno,a.block_id;
r_sql varchar2(4000);
begin
for cur in cur_rowid loop
r_sql:='delete scott.jason where object_type=' || '''' || 'INDEX' || '''' || ' and rowid between :1 and :2';
execute immediate r_sql using cur.begin_rowid,cur.end_rowid;
commit;
end loop;
end;
复制
整个过程不是并行的,需要在不同的窗口进行人工操作。
11GR2版本推出了DBMS_PARALLEL_EXECUTE包,能够并行执行
SQL>SET SERVEROUTPUT ON
SQL>BEGIN
DBMS_PARALLEL_EXECUTE.DROP_TASK('test_task');
exception when others then
null;
end;
/
SQL>declare
l_task varchar2(30) := 'test_task';
l_sql_stmt varchar2(32767);
l_try number;
l_status number;
begin
dbms_parallel_execute.create_task(task_name=>l_task);
dbms_parallel_execute.create_chunks_by_rowid (
task_name=>l_task,
table_onwer=>'joe',
table_name=>'ob2',
by_row=>true, --表示chunk_size为行数,否则表示块数
chunk_size=>2500 --自定义chunk的大小,这里表示2500行为一个chunk
);
l_sql_stmt:='delete ob2 where object_name=''SYNONYM'' and rowid between
:start_id and :end_id';
dbms_parallel_execute.run_task
(
task_name=>l_task,
sql_stmt=>l_sql_stmt,
language_flag=>dbms_sql.native,
parallel_level=>2 --自定义并行度
);
l_try:=0;
l_status:=dbms_parallel_execute.task_status(l_task);
while (l_try<2 and l_status!=dbms_parallel_execute.finished)
loop
l_try:=l_try+1;
dbms_parallel_execute.resume_task(l_task);
l_status:=dbms_parallel_execute.task_status(l_task);
end loop;
dbms_parallel_execute.drop_task(l_task);
exception when others then
dbms_output.put_line('error in the code:'||SQLERRM);
END;
/
复制
以上脚本是通过ROWID进行切割的,还有其他,指定字段create_chunks_by_number_col来切割,指定SQL语句create_chunks_by_sql来切割。
拥有此包的访问权限还要有创建JOB的权限
基本执行流程
调用create_task(),创建任务
调用create_chunk_by_rowid()创建分块规则
编写自己需要执行的DML语句
调用RUN_TASK(),允许任务
调用DROP_TASK(),删除任务
涉及的视图
dba_parallel_execute_tasks
dba_parallel_execute_chunks
dba_scheduler_jobs
开启数据库10035解析失败跟踪
alter system set events '10035 trace name context forever,level 1';
alter system set events '10035 trace name context off';
开启应用客户端SQL跟踪
exec dbms_system.set_sql_trace_in_session(97,2217,true);
exec dbms_system.set_sql_trace_in_session(97,2217,false);