1、告警日志
1.1、外部表查看告警日志文件
SQL> create or replace directory bdump as '/u01/app/oracle/admin/GSP/bdump'; SQL> create table alert_logs (text varchar2(2000) ) organization external ( type oracle_loader default directory bdump access parameters ( records delimited by newline fields reject rows with all null fields ) location ( 'alert_GSP.log' ) ) reject limit unlimited; SQL> select * from alert_logs; |
1.2、定期归档告警日志(alert_log_archive.sh)
#************************************************************* # FileName :alert_log_archive.sh #************************************************************* # Author :hwb # CreateDate :2019-09-02 # Description :this script is achived alert log every day #************************************************************* #! /bin/bash # these solved the oracle variable problem. if [ -f ~/.bash_profile ]; then . ~/.bash_profile fi date=`date +%Y%m%d` backup='/backup/alert_log' alert_log_path="$ORACLE_BASE/diag/rdbms/rfdb/RFDB1/trace" alert_log_file="alert_$ORACLE_SID.log" alert_arc_file="$backup/alert_$ORACLE_SID.log.${date}" cd ${alert_log_path} if [ ! -e "${alert_log_file}" ]; then echo "the alert log didn't exits, please check file path is correct!"; exit; fi if [ -e ${alert_arc_file} ];then echo "the alert log file have been archived!" else cat ${alert_log_file} >> ${alert_arc_file} cat /dev/null > ${alert_log_file} fi |
1.3、监控告警日志文件
这里是采用Perl结合Shell的方式,因为Shell获取错误的时间、行数等不如Perl操作字符串方便。
--1、脚本:monitoring_alert_log.pl #********************************************************************************** # FileName :monitoring_alert_log.pl #********************************************************************************** # Author :hwb # CreateDate :2019-09-02 # Description :check the alert log and find out the ora error #*********************************************************************************** #! /usr/bin/perl use strict; my($argv) = @ARGV; if ( @ARGV != 1) { print ' Parameter error: you must assined the alert log file as a input parameter or the number of prarameter is not right. '; exit } if( ! -e $argv ) { print ' Usage: monitoring_alert_log.pl $ cat alert_[sid].log | monitoring_alert_log.pl $ tail -f alert_[sid].log | monitoring_alert_log.pl $ monitoring_alert_log.pl alert_[sid].log '; exit; } my $err_regex = '^(\w+ \w+ \d{2} \d{2}:\d{2}:\d{2} \d{4})|(ORA-\d+:.+)$'; my $date = ""; my $line_counter = 0; while ( <> ) { $line_counter++; if( m/$err_regex/oi ) { if ($1) { $date = $1; next; } print "$line_counter | $date | $2 \n" if ($2); } } ============================================================================================ --2、脚本:monitoring_alert_log.sh ============================================================================================ #********************************************************************************** # FileName : monitoring_alert_log.sh #********************************************************************************** # Author : hwb # CreateDate : 2019-09-05 # Description: check the alert log and find out the ora error #*********************************************************************************** #!/bin/bash if [ -f ~/.bash_profile ]; then . ~/.bash_profile fi new_log_file="/home/oracle/scripts/new_err_log.txt" old_log_file="/home/oracle/scripts/old_err_log.txt" pl_monitoring_alert="/home/oracle/scripts/monitoring_alert_log.pl" email_content="/home/oracle/scripts/sendmail.txt" alert_logfile="/u01/app/oracle/diag/rdbms/rfdb/RFDB1/trace/alert_RFDB1.log" #delete the old alter error log file rm -f ${old_log_file} mv ${new_log_file} ${old_log_file} #rm -f${pl_sendmail} #run the perl and check if exists the ora error perl ${pl_monitoring_alert} ${alert_logfile}> ${new_log_file} #if have no error in alert log then exit the program if [[ -e "${new_log_file}" && ! -s "${new_log_file}" ]]; then exit; fi new_err_num=`cat ${new_log_file} | wc -l` old_err_num=`cat ${old_log_file} | wc -l` if [ ${new_err_num} -le ${old_err_num} ]; then exit fi date_today=`date +%Y_%m_%d` toemail="huangwb@fslgz.com" subject="Monitoring the Oracle Alert logs and find ora errors" content="The Instance ${ORACLE_SID}\' alert log occured the ora errors ,please see the detail in attachment and take action for it. many th anks! The Error is blow :${new_log_file} " #echo 'Content-Type: text/html' > ${email_content} #echo 'To: huangwb@fslgz.com' >> ${email_content} #echo ${subject} >> ${email_content} #echo '<pre style="font-family: courier; font-size: 9pt">' >> ${email_content} #echo ${content} >> ${email_content} #cat ${new_log_file} >>${email_content} 2>&1 #echo 'Oracle Alert Services' >> ${email_content} #/usr/sbin/sendmail -t -f ${subject} < ${email_content} echo ${content} > t.txt mail -s "${subject}" $toemail < t.txt rm -f ${email_content} #( ${content} ; uuencode ${new_log_file} ${reportname} ) | /bin/mail -s ${subject} "huangwb@fslgz.com" # uuencode 附件文件 附件文件别名(在邮箱中显示的文件名) # # Linux中程序生成的文件默认为utf-8编码,在windows中使用excel打开.csv文件时,默认编码是gb2312,所以直接写文件会导致汉字显示乱码。 # 解决方法:Linux程序写入附件文件前,将汉字转化成gb2312编码(使用),再写入即可。 # sendmail发送带附件的邮件,需要用uuencode命令,这个命令在sharutils组件中,可用yum install sharutils或者rpm -ivh sharutils-4.7-6.1.el6.x86_64.rpm安装。 #(${content};uuencode ${new_log_file} ${reportname})|/bin/mail -s ${subject} huangwb@fslgz.com ============================================================================================ |
2、跟踪文件
2.1、查找当前会话的跟踪文件
select c.value || '/' || d.instance_name || '_ora_' || a.spid || '.trc' trace from v$process a, v$session b, v$parameter c, v$instance d where a.ADDR = b.PADDR and b.SID = sys_context('userenv', 'sid') and c.NAME = 'user_dump_dest'; |
2.2、v$diag_info视图获得控制文件转储文件名及位置
11g中提供了比传统的gettrcname.sql脚本更为给力的诊断文件位置信息汇总的视图V$DIAG_INFO
--通过查询V$diag_info可以很容易找到自身服务进程的trace文件位置,对于其他进程的trace文件则可以查询v$process新加入的tracefile列 select name,value from v$diag_info; select spid,tracefile from v$process; |
3、重做日志
3.1、查看在线日志的平均大小
SELECT Avg(BYTES)/1024/1024 AVG(MB), Count(1), Max(BYTES) Max_Bytes, Min(BYTES) Min_Bytes from v$log; |
3.2、增加redo日志组
//创建3个新的日志组 ALTER DATABASE ADD LOGFILE GROUP 4 ('/u01/app/oracle/oradata/orcl/redo04a.log','/u01/app/oracle/oradata/orcl/redo04b.log') SIZE 80M; ALTER DATABASE ADD LOGFILE GROUP 5 ('/u01/app/oracle/oradata/orcl/redo05a.log','/u01/app/oracle/oradata/orcl/redo05b.log') SIZE 80M; ALTER DATABASE ADD LOGFILE GROUP 6 ('/u01/app/oracle/oradata/orcl/redo06a.log','/u01/app/oracle/oradata/orcl/redo06b.log') SIZE 80M; //切换当前日志到新的日志组 alter system switch logfile; alter system switch logfile; alter system switch logfile; //删除旧的日志组 alter database drop logfile group 1; alter database drop logfile group 2; alter database drop logfile group 3; //操作系统下删除原日志组1、2、3中的文件 cd $oracle_base/oradata/sid rm -rf redo01.log; rm -rf redo02.log; rm -rf redo03.log; //重建日志组1、2、3 ALTER DATABASE ADD LOGFILE GROUP 1 ('/u01/app/oracle/oradata/orcl/redo01a.log','/u01/app/oracle/oradata/orcl/redo01b.log') SIZE 80M; ALTER DATABASE ADD LOGFILE GROUP 2 ('/u01/app/oracle/oradata/orcl/redo02a.log','/u01/app/oracle/oradata/orcl/redo02b.log') SIZE 80M; ALTER DATABASE ADD LOGFILE GROUP 3 ('/u01/app/oracle/oradata/orcl/redo03a.log','/u01/app/oracle/oradata/orcl/redo03b.log') SIZE 80M; //切换日志组 alter system switch logfile; alter system switch logfile; alter system switch logfile; //删除中间过渡用的日志组4、5、6 alter database drop logfile group 4; alter database drop logfile group 5; alter database drop logfile group 6; //到操作系统下删除原日志组4、5、6中的文件 //备份当前的最新的控制文件 alter database backup controlfile to trace as '/home/oracle/ctl'; |
3.3、查看最近几天每小时日志切换次数
比如每个redo log大小是512M,在凌晨1点归档129次,就是129*512/1024=64G,这一小时产生了64G日志量,那么每分钟就有1G的日志量(注意:归档量!=日志量),实际归档可能1个小时只有20G大小
SELECT SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH:MI:SS'),1,5) Day, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'00',1,0)) H00, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'01',1,0)) H01, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'02',1,0)) H02, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'03',1,0)) H03, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'04',1,0)) H04, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'05',1,0)) H05, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'06',1,0)) H06, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'07',1,0)) H07, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'08',1,0)) H08, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'09',1,0)) H09, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'10',1,0)) H10, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'11',1,0)) H11, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'12',1,0)) H12, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'13',1,0)) H13, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'14',1,0)) H14, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'15',1,0)) H15, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'16',1,0)) H16, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'17',1,0)) H17, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'18',1,0)) H18, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'19',1,0)) H19, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'20',1,0)) H20, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'21',1,0)) H21, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'22',1,0)) H22, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'23',1,0)) H23, COUNT(*) TOTAL FROM v$log_history a WHERE first_time>=to_char(sysdate-10) GROUP BY SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH:MI:SS'),1,5) ORDER BY SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH:MI:SS'),1,5) DESC; |
3.4、查找引起redo日志暴增的SQL语句
--1、查看最近2小时"块改变"最多的segment select * from (SELECT to_char(begin_interval_time, 'YYYY_MM_DD HH24:MI') snap_time, dhsso.object_name, SUM(db_block_changes_delta) FROM dba_hist_seg_stat dhss, dba_hist_seg_stat_obj dhsso, dba_hist_snapshot dhs WHERE dhs.snap_id = dhss.snap_id AND dhs.instance_number = dhss.instance_number AND dhss.obj# = dhsso.obj# AND dhss.dataobj# = dhsso.dataobj# AND begin_interval_time > sysdate - 120 / 1440 GROUP BY to_char(begin_interval_time, 'YYYY_MM_DD HH24:MI'), dhsso.object_name order by 3 desc) where rownum <= 5; --2、从awr视图中找出步骤1中排序靠前的对象涉及的SQL --MON_MODS$ SELECT to_char(begin_interval_time, 'YYYY_MM_DD HH24:MI'), dbms_lob.substr(sql_text, 4000, 1), dhss.instance_number, dhss.sql_id, executions_delta, rows_processed_delta FROM dba_hist_sqlstat dhss, dba_hist_snapshot dhs, dba_hist_sqltext dhst WHERE UPPER(dhst.sql_text) LIKE '%MON_MODS$%' AND dhss.snap_id = dhs.snap_id AND dhss.instance_Number = dhs.instance_number AND dhss.sql_id = dhst.sql_id; --3、从ASH相关视图找到执行这些SQL的session、module和machine select * from dba_hist_active_sess_history WHERE sql_id = '350f5yrnnmshs'; select * from v$active_session_history where sql_Id = '350f5yrnnmshs'; --4、dba_soure 看看是否有存储过程包含这个SQL select * from dba_source |
3.5、追踪产生大量redo的来源脚本
SQL: How to Find Sessions Generating Lots of Redo or Archive logs (文档 ID 167492.1)
--1、通过观察block_changes的值来判断哪个会话产生大量redo SELECT s.sid, s.serial#, s.username, s.program, i.block_changes FROM v$session s, v$sess_io i WHERE s.sid = i.sid ORDER BY 5 desc, 1, 2, 3, 4; --2、通过观察USED_UBLK and USED_UREC的值来判断哪个会话产生大量redo SELECT s.sid, s.serial#, s.username, s.program, t.used_ublk, t.used_urec FROM v$session s, v$transaction t WHERE s.taddr = t.addr ORDER BY 5 desc, 6 desc, 1, 2, 3, 4; |
4、归档日志
4.1、查看每天产生归档日志的大小
注意归档日志实际大小<redo日志数据量,因为归档日志需要压缩
alter session set nls_date_format='yyyy.mm.dd hh24:mi:ss'; select trunc(completion_time) as ARC_DATE, count(*) as COUNT, round((sum(blocks * block_size) / 1024 / 1024), 2) as ARC_MB from v$archived_log group by trunc(completion_time) order by trunc(completion_time); |
4.2、查看某时间段DML操作比较多的sql(推荐)
set line 200 col sql_text1 format a40 col module format a30 select b.* from ( select t.sql_id, t.module, (select to_char(dbms_lob.substr(sql_text, 20, 1)) from sys.wrh$_sqltext where sql_id = t.sql_id and rownum <=1) sql_text1, to_char(s.begin_interval_time, 'yyyy-mm-dd hh24') begin_interval_time1, to_char(t.rows_processed_delta, '9999,9999,9999') rows_processed_delta, round((ratio_to_report(rows_processed_delta) over ())*100,2) rtr_row, t.executions_delta, to_char(t.rows_processed_delta/(t.executions_delta+1),'9999,999999') row_exec from sys.wrh$_sqlstat t, wrm$_snapshot s where t.dbid = s.dbid and t.snap_id = s.snap_id and s.begin_interval_time >= to_date(to_char(trunc(sysdate),'yyyy-mm-dd') ||'00:00:00','yyyy-mm-dd hh24:mi:ss') and s.begin_interval_time <= to_date(to_char(trunc(sysdate) ,'yyyy-mm-dd')||'08:00:00','yyyy-mm-dd hh24:mi:ss') order by t.snap_id desc ) b where (upper(sql_text1) like '%INSERT%' or upper(sql_text1) like '%DELETE%' or upper(sql_text1) like '%UPDATE%') order by rows_processed_delta; |
4.3、winserver自动删除Oracle过期归档日志脚本
自动删除Oracle过期归档日志的思路如下:
1)编写自动执行的bat脚本文件;
2)Windows设置定期执行计划
--1、脚本:E:\HRDB_BACKUP\arch\clear_arch.bat rman target / msglog=E:\HRDB_BACKUP\arch\log\%date:~0,10%.log cmdfile=E:\HRDB_BACKUP\arch\clear_arch.txt --2、脚本:E:\HRDB_BACKUP\arch\clear_arch.txt CROSSCHECK ARCHIVELOG ALL; DELETE NOPROMPT ARCHIVELOG ALL COMPLETED BEFORE 'SYSDATE-3'; BACKUP VALIDATE ARCHIVELOG ALL; EXIT; --3、设置windows定时任务 cmd 下 执行 taskschd.msc, 进入到windows的任务计划界面, 或者点击"控制面板",在接下来的控制面板项中找到“管理工具"并打开,在管理工具菜单菜单中找到“任务计划程序”并双击打开后设置定时任务 |
--1、定时任务 30 00 * * * /home/oracle/scripts/del_rf_arch.sh --2、脚本 [oracle@RFDB1 scripts]$ cat del_rf_arch.sh #!/bin/bash #this is rman auto delete archivelog script #Copyright by hwb #date 2019-04-15 if [ -f ~/.bash_profile ]; then . ~/.bash_profile fi backtime=$(date +%Y%m%d%H%M) #删除系统时间7天以前的归档日志 rman target / nocatalog msglog /backup/log/delarch_$backtime.log <<EOF crosscheck archivelog all; delete force noprompt archivelog all completed before 'sysdate-7'; exit; EOF |
5、监听日志
5.1、手动清理监听日志
官方建议:监听日志建议小于4G,到4G的时候,数据库连接就会非常慢。基本上都要1分钟以上。
--1、停止日志 LSNRCTL> set log_status off --2、到目录/u01/app/grid/diag/tnslsnr/RFDB1/listener/trace mv listener.log listener.log.bak --3、开启日志 LSNRCTL> set log_status on --4、重新加载监听器 LSNRCTL> reload --5、监听目录下重新生成新的日志,压缩保存 tar zcvf listener-20131121.log.tar.gz listener.log.bak --6、删除原有的日志 rm listener.log.bak |
6、自动清理oracle的归档日志、alert日志、监听日志、审计日志脚本
#crontab 30 8 * * * /ora/u01/scripts/clean_log.sh > /dev/null 2>&1 #######################################archive log #!/bin/bash . /home/oracle/.bash_profile # -------------------- # every day archivelog about 400G # -------------------- export ORACLE_SID=ORCL $ORACLE_HOME/bin/rman log=/oracle/archlog/ORCL/clean.log <<EOF connect target / run{ crosscheck archivelog all; delete noprompt expired archivelog all; delete noprompt archivelog all completed before 'sysdate - 3'; } exit; EOF #################arlert log #!/bin/bash #--------------------- #alert about 30m every month #--------------------- cd ${ORACLE_BASE}/diag/rdbms/ORCL/$ORACLE_SID/trace export SIZE=`du -m alert_${ORACLE_SID}.log | cut -f1` if [ ${SIZE} -ge 30 ]; then # cp alert_${ORACLE_SID}.log /oracle/rmanbackup/oracle_log/alert/alert_${ORACLE_SID}_`date +%Y%m%d%H%M%S`.log cp alert_${ORACLE_SID}.log /oracle/rmanbackup/oracle_log/alert/alert_${ORACLE_SID}_`date +%Y%m%d%H%M%S`.log # cat /dev/null > alert_${ORACLE_SID}.log truncate -s 0 alert_${ORACLE_SID}.log fi find . -mtime +30 -name "*.trc" | xargs rm -rf find . -mtime +30 -name "*.trm" | xargs rm -rf #################listener log #!/bin/bash #--------------------- #listener.log about 100m every 8 month,listener.xml about 200m every month #--------------------- cd $ORACLE_BASE/diag/tnslsnr/`hostname`/listener/trace export SIZE=`du -m listener.log | cut -f1` if [ $SIZE -ge 100 ]; then # lsnrctl set log_status off # mv listener.log /oracle/rmanbackup/oracle_log/listener/listener_`date +%Y%m%d%H%M%S`.log cp listener.log /oracle/rmanbackup/oracle_log/listener/listener_`date +%Y%m%d%H%M%S`.log truncate -s 0 listener.log # lsnrctl set log_status on fi cd $ORACLE_BASE/diag/tnslsnr/`hostname`/listener_orcl/alert find . -mtime +30 -name "log_*.xml" | xargs rm -rf #################audit log #!/bin/bash #--------------------- #audit log about 2GB every month,but aud$ table will exist system tablespace,please noted #--------------------- cd $ORACLE_HOME/rdbms/audit find . -mtime +30 -exec mv {} /oracle/rmanbackup/oracle_log/audit \; exit |
7、logminer日志挖掘
Oracle LogMiner是oracle公司从产品8i后提供的一个非常有用的分析工具,使用该工具可以轻松获得oracle在线/归档日志文件中的具体内容,特别是该工具可以分析出所有对于数据库操作的DML和DDL语句,该工具特别适用于调试、审计或者回退某个特定的事务。
LogMiner分析工具实际上是由一组PL/SQL包和一些动态视图组成,它作为Oracle数据库的一部分来发布是一个完全免费的工具。但该工具和其他ORACLE内建工具相比使用起来显得有些复杂,主要原因是该工具没有提供任何的图形用户界面(GUI)。
日志文件中存放着所有进行数据库恢复的数据,记录了针对数据库结构的每一个变化,也就是对数据库操作的所有DML语句进行记录。logminer工具既可以用来分析在线,也可以用来分析离线日志文件,即可以分析本身自己数据库的重做日志文件,也可以用来分析其他数据库的重做日志文件。
总的来说。logminer工具的主要用途有:
1)跟踪数据库的变化:可以离线的跟踪数据库的变化,而不会影响在线系统的性能。
2)回退数据库的变化:回退特定的变化数据,减少point-in-time recovery的执行。
3)优化和扩容计划:可通过分析日志文件中的数据以分析数据增长模式。
7.1、安装logminer
$ORACLE_HOME/rdbms/admin/dbmslm.sql:DBMS_LOGMNR
$ORACLE_HOME/rdbms/admin/dbmslmd.sql:DBMS_LOGMNR_D
$ORACLE_HOME/rdbms/admin/dbmslms.sql:会话管理
--安装logminer SQL>@$ORACLE_HOME/rdbms/admin/dbmslm.sql SQL>@$ORACLE_HOME/rdbms/admin/dbmslmd.sql SQL>@$ORACLE_HOME/rdbms/admin/dbmslms.sql --视图 select * from v$logmnr_disctionary; select * from v$logmnr_logs; select * from v$logmnr_contents; |
7.2、使用源数据库的数据字典(Online catalog)分析DML操作
--1、开启补充日志 alter database add supplemental log data; select supplemental_log_data_min from v$database; --2、建立日志分析列表 --第一次添加 execute dbms_logmnr.add_logfile(logfilename=>'日志文件',options=>dbms_logmnr.new) --第二次添加 execute dbms_logmnr.add_logfile(logfilename=>'日志文件',options=>dbms_logmnr.addfile) //execute dbms_logmnr.add_logfile('日志文件',dbms_logmnr.addfile) --移除 execute dbms_logmnr.remove_logfile(logfilename=>'日志文件') --3、启动分析 execute dbms_logmnr.start_logmnr(Options=>dbms_logmnr.dict_from_online_catalog) //execute dbms_logmnr.start_logmnr(Options=>dbms_logmnr.dict_from_online_catalog,startscn=>123,endScn=>124); //exec dbms_logmnr.start_logmnr(Options=>dbms_logmnr.dict_from_online_catalog,starttime=> to_date('2019-02-02 00:00:00','YYYY-MM-DD HH24:MI:SS'),endtime=> to_date('2019-02-03 00:00:00','YYYY-MM-DD HH24:MI:SS')); --有条件分析 --4、查询日志分析结果 select username,scn,timestamp,sql_redo,sql_undo from v$logmnr_contents; --5、结束分析 execute dbms_logmnr.end_logmnr; |
7.3、使用LogMiner数据字典到字典文件分析DDL操作
--1、提取logminer字典 1)设置一个字典文件路径: show parameter utl_file_dir; --需要重启DB alter system set utl_file_dir='/oracle' scope=spfile; 2)创建一个数据字典文件 exec dbms_logmnr_d.build('dict.ora','/oracle'); --2、建立日志分析列表 --第一次添加 execute dbms_logmnr.add_logfile(logfilename=>'日志文件',options=>dbms_logmnr.new) --第二次添加 execute dbms_logmnr.add_logfile(logfilename=>'日志文件',options=>dbms_logmnr.addfile) //execute dbms_logmnr.add_logfile('日志文件',dbms_logmnr.addfile) --移除 execute dbms_logmnr.remove_logfile(logfilename=>'日志文件') --3、启动分析 exec dbms_logmnr.start_logmnr(DictFileName=>'/oracle/dict.ora') --无条件分析 //exec dbms_logmnr.start_logmnr(DictFileName=>'/oracle/dict.ora',startscn=>123,endScn=>124); --有条件分析 //exec dbms_logmnr.start_logmnr(DictFileName=>'/oracle/dict.ora',starttime=> to_date('2019-02-02 00:00:00','YYYY-MM-DD HH24:MI:SS'),endtime=> to_date('2019-02-03 00:00:00','YYYY-MM-DD HH24:MI:SS')); --有条件分析 --4、查询日志分析结果 select username,scn,timestamp,sql_redo,sql_undo from v$logmnr_contents; --5、结束分析 dbms_logmnr.end_logmnr; |