暂无图片
暂无图片
暂无图片
暂无图片
暂无图片

rhel 6.5 搭建Oracle 12.2.0.1 rac

Leo 2025-03-09
26

文档课题:rhel 6.5 搭建Oracle 12.2.0.1 rac.

1、整体规划

1.1、集群规划

1.2、存储空间规划

注意:如下为oracle 12c rac对磁盘大小的要求.

磁盘组类型为"External"时,至少38,860MB;

为"Normal"时,至少77,636MB;

为"High"时,至少116,400MB.

1.3、软件包信息

2、网络配置

分别按如下方式给两台主机添加1块网卡用作节点心跳网络.

按如下方式进行心跳网络配置.

[root@cbdps01 ~]# cd /etc/sysconfig/network-scripts

[root@cbdps01 network-scripts]# cp ifcfg-eth0 ifcfg-eth1

[root@cbdps01 network-scripts]# vi ifcfg-eth1

DEVICE=eth1

HWADDR=00:0C:29:2B:EF:49

TYPE=Ethernet

UUID=920c94a4-a019-4637-b9c4-9f7de148b3a7

IPADDR=192.168.133.170

ONBOOT=yes

NM_CONTROLLED=yes

BOOTPROTO=static

PREFIX=24

GATEWAY=192.168.133.2

DNS1=192.168.133.2

 

[root@cbdps01 network-scripts]# cp ifcfg-eth1 ifcfg-eth2

[root@cbdps01 network-scripts]# vi ifcfg-eth2

DEVICE=eth2

HWADDR=00:0C:29:2B:EF:49

TYPE=Ethernet

UUID=9e90e8ac-797e-4488-bb29-77cfad71832b

IPADDR=192.168.78.170

ONBOOT=yes

NM_CONTROLLED=yes

BOOTPROTO=static

PREFIX=24

 

说明:粗体高亮为更新部分.

 

[root@cbdps01 ~]# service network restart

[root@cbdps01 network-scripts]# ifconfig

eth1      Link encap:Ethernet  HWaddr 00:0C:29:2B:EF:3F 

          inet addr:192.168.133.170  Bcast:192.168.133.255  Mask:255.255.255.0

          inet6 addr: fe80::20c:29ff:fe2b:ef3f/64 Scope:Link

          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1

          RX packets:708 errors:0 dropped:0 overruns:0 frame:0

          TX packets:480 errors:0 dropped:0 overruns:0 carrier:0

          collisions:0 txqueuelen:1000

          RX bytes:87988 (85.9 KiB)  TX bytes:96441 (94.1 KiB)

 

eth2      Link encap:Ethernet  HWaddr 00:0C:29:2B:EF:49 

          inet addr:192.168.78.170  Bcast:192.168.78.255  Mask:255.255.255.0

          inet6 addr: fe80::20c:29ff:fe2b:ef49/64 Scope:Link

          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1

          RX packets:42 errors:0 dropped:0 overruns:0 frame:0

          TX packets:12 errors:0 dropped:0 overruns:0 carrier:0

          collisions:0 txqueuelen:1000

          RX bytes:4319 (4.2 KiB)  TX bytes:1390 (1.3 KiB)

 

lo        Link encap:Local Loopback 

          inet addr:127.0.0.1  Mask:255.0.0.0

          inet6 addr: ::1/128 Scope:Host

          UP LOOPBACK RUNNING  MTU:16436  Metric:1

          RX packets:4 errors:0 dropped:0 overruns:0 frame:0

          TX packets:4 errors:0 dropped:0 overruns:0 carrier:0

          collisions:0 txqueuelen:0

          RX bytes:240 (240.0 b)  TX bytes:240 (240.0 b)

 

[root@cbdps01 network-scripts]# nmcli con list

NAME                      UUID                                   TYPE              SCOPE    TIMESTAMP-REAL                   

System eth1               920c94a4-a019-4637-b9c4-9f7de148b3a7   802-3-ethernet    system   Sun 02 Feb 2025 04:13:40 PM CST  

System eth2               9e90e8ac-797e-4488-bb29-77cfad71832b   802-3-ethernet    system   Sun 02 Feb 2025 04:13:41 PM CST  

Auto eth2                 c17e4157-825f-4065-ad1e-fda97fe34cff   802-3-ethernet    system   Sun 02 Feb 2025 04:10:52 PM CST  

System eth0               540d1f90-f0d3-4cba-a753-a90762d39a0b   802-3-ethernet    system   never                            

NAME                      UUID                                   TYPE              SCOPE    TIMESTAMP-REAL   

 

3、安装准备

注意:除特别说明之外,3.2-3.17均需在两个节点执行.

3.1、节点2修改主机名

[root@cbdps01 ~]# cat /etc/*release

LSB_VERSION=base-4.0-amd64:base-4.0-noarch:core-4.0-amd64:core-4.0-noarch:graphics-4.0-amd64:graphics-4.0-noarch:printing-4.0-amd64:printing-4.0-noarch

Red Hat Enterprise Linux Server release 6.5 (Santiago)

Red Hat Enterprise Linux Server release 6.5 (Santiago)

 

[root@cbdps01 ~]# vi /etc/sysconfig/network

将HOSTNAME修改为cbdps02.

 

3.2、挂载镜像源

# mount /dev/sr0 /mnt

mount: /dev/sr0 is write-protected, mounting read-only

 

3.3、Yum源

[root@cbdps01 ~]# df -h

Filesystem      Size  Used Avail Use% Mounted on

/dev/sda3        90G  2.9G   83G   4% /

tmpfs           3.9G   72K  3.9G   1% /dev/shm

/dev/sda1      1008M   62M  896M   7% /boot

/dev/sr0        3.6G  3.6G     0 100% /mnt

 

# cd /etc/yum.repos.d

# mkdir bak

# mv *.repo bak

# cat <<EOF>>/etc/yum.repos.d/local.repo

[local]

name=local

baseurl=file:///mnt

gpgcheck=0

enabled=1

EOF

 

# yum makecache

 

3.4、依赖包

# yum install -y binutils \

compat-libcap1 \

compat-libstdc++-33 \

e2fsprogs \

e2fsprogs-libs \

glibc \

glibc-devel \

ksh \

libaio-devel \

libaio \

libgcc \

libstdc++ \

libstdc++-devel \

libxcb \

libX11 \

libXau \

libXi \

libXtst \

make \

net-tools \

nfs-utils \

smartmontools \

sysstat \

iotop \

gcc \

gcc-c++ --skip-broken

 

检查依赖包:

# rpm -q binutils \

compat-libcap1 \

compat-libstdc++-33 \

e2fsprogs \

e2fsprogs-libs \

glibc \

glibc-devel \

ksh \

libaio-devel \

libaio \

libgcc \

libstdc++ \

libstdc++-devel \

libxcb \

libX11 \

libXau \

libXi \

libXtst \

make \

net-tools \

nfs-utils \

smartmontools \

sysstat \

iotop \

gcc \

gcc-c++ | grep "not installed"

 

3.5、修改hosts文件

按如下修改.

[root@cbdps01 ~]# cat /etc/hosts

127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4

::1         localhost localhost.localdomain localhost6 localhost6.localdomain6

#Public IP

192.168.133.170 cbdps01

192.168.133.171 cbdps02

 

#Private IP

192.168.78.170 cbdps01-priv

192.168.78.171 cbdps02-priv

 

#Virtual IP

192.168.133.172 cbdps01-vip

192.168.133.173 cbdps02-vip

 

#Scan IP

192.168.133.174 cbdps-scan

 

说明:此时公网、私网能ping通,其它三个不能ping通才正常.

 

3.6、防火墙

# service iptables stop

# chkconfig iptables off

# service iptables status

# chkconfig --list iptables

iptables        0:off   1:off   2:off   3:off   4:off   5:off   6:off

 

3.7、selinux

# sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config

 

注意:需重启主机,才能生效

 

3.8、时间同步配置

从oracle 11gR2 rac开始使用Cluster Time Synchronization Service(CTSS)同步各节点的时间,此处关闭NTP、chrony服务,Oracle会自动启用ctssd进程.

 

各节点系统时间校对

检验时间和时区确认正确

date

 

--关闭ntp服务,移除ntp配置文件(后续使用ctss)

service ntpd status

mv /etc/ntp.conf /etc/ntp.conf_bak

 

3.9、配置NOZEROCONF

[root@cbdps01 ~]# cat <<EOF>>/etc/sysconfig/network

NOZEROCONF=yes

EOF

 

3.10、配置系统参数

修改/etc/sysctl.conf文件,注意:需将如下三行参数注释.

#net.bridge.bridge-nf-call-ip6tables = 0

#net.bridge.bridge-nf-call-iptables = 0

#net.bridge.bridge-nf-call-arptables = 0

 

cat <<EOF>>/etc/sysctl.conf

fs.aio-max-nr = 4194304

fs.file-max = 6815744

kernel.shmall = 1980560

kernel.shmmax = 6489899008

kernel.shmmni = 4096

kernel.sem = 250 32000 100 128

net.ipv4.ip_local_port_range = 9000 65500

net.core.rmem_default = 262144

net.core.rmem_max = 4194304

net.core.wmem_default = 262144

net.core.wmem_max = 1048586

net.ipv4.ipfrag_high_thresh = 16777216

net.ipv4.ipfrag_low_thresh = 15728640

kernel.randomize_va_space = 0

vm.swappiness = 10

vm.min_free_kbytes = 524288

kernel.panic_on_oops = 1

net.ipv4.conf.eth1.rp_filter = 1

net.ipv4.conf.eth2.rp_filter = 2

EOF

 

# /sbin/sysctl -p

 

3.11、创建用户、用户组、目录

注意:

A、创建用户和组之前需确认gid/uid为1000是否被占用;

B、生产中发现个别系统并未创建oper组.

groupadd -g 1000 oinstall

groupadd -g 1001 dba

groupadd -g 1002 oper

groupadd -g 1010 asmadmin

groupadd -g 1011 asmdba

groupadd -g 1012 asmoper

useradd -u 1000 -g oinstall -G dba,oper,asmdba  -m -d /home/oracle oracle

useradd -u 1001 -g oinstall -G asmadmin,asmdba,asmoper,dba,oper -m -d /home/grid grid

echo "oracle4U"| passwd --stdin oracle

echo "grid4U"| passwd --stdin grid

 

usermod -a -G oinstall oracle

usermod -a -G oinstall grid

 

mkdir -p /u01/app/oracle

mkdir -p /u01/app/oraInventory

mkdir -p /u01/app/12.2.0.1/grid

mkdir -p /u01/app/grid

mkdir -p /u01/app/oracle/product/12.2.0.1/db_1

chown -R grid:oinstall /u01

chown -R oracle:oinstall /u01/app/oracle

chmod -R 775 /u01

 

说明:红色高亮为更新部分,若不更新在安装rac前检查会出现以下告警.

a、PRVG-10467 : The default Oracle Inventory group could not be determined.

3.12、系统资源限制配置

配置limits.conf

[root@cbdps01 ~]# cat <<EOF>>/etc/security/limits.conf

grid soft core 0

grid hard core 0

grid soft nproc 400000

grid hard nproc 400000

grid soft memlock 711656100

grid hard memlock 711656100

grid soft nofile 400000

grid hard nofile 400000

grid soft stack 10240

grid hard stack 32768

 

oracle soft core 0

oracle hard core 0

oracle soft nproc 400000

oracle hard nproc 400000

oracle soft memlock unlimited

oracle hard memlock unlimited

oracle soft nofile 400000

oracle hard nofile 400000

oracle soft stack  10240

oracle hard stack  32768

EOF

 

注意:粗体高亮为更新部分,若不更新在安装rac前检查会出现以下告警.

a、"Soft Limit: maximum stack size" could not be fixed on nodes "cbdps02,cbdps01"

 

3.13、添加pam_limits.so模块

--修改/etc/pam.d/login文件

[root@cbdps01 ~]# cat <<EOF>>/etc/pam.d/login

session required pam_limits.so

session required /lib64/security/pam_limits.so

EOF

 

3.14、环境变量配置

grid用户:

$ cat <<EOF>>/home/grid/.bash_profile

# Oracle Grid 12c Environment

export TEMP=/tmp

export TMPDIR=\$TEMP

export ORACLE_SID=+ASM1

export ORACLE_BASE=/u01/app/grid

export ORACLE_HOME=/u01/app/12.2.0.1/grid

export LIBPATH=\$ORACLE_HOME/lib:\$ORACLE_HOME/lib32

export LD_LIBARY_PATH=\$LD_LIBARY_PATH:\$ORACLE_HOME/lib:\$ORACLE_HOME/lib32:.

export LD_LIBARY_PATH=\$LD_LIBARY_PATH:\$ORACLE_HOME/jdk/jre/lib:\$ORACLE_HOME/network/lib:\$ORACLE_HOME/rdbms/lib

export CLASSPATH=\$ORACLE_HOME/jre:\$ORACLE_HOME/jlib:\$ORACLE_HOME/rdbms/jlib:\$ORACLE_HOME/network/jlib

export PATH=\$ORACLE_HOME/bin:\$ORACLE_HOME/OPatch:\$HOME/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/usr/ucb:/usr/bin/X11:/sbin:.

 

umask 022

if [ \$USER = "grid" ]; then

if [ \$SHELL = "/bin/ksh" ]; then

ulimit -p 16384

ulimit -n 65536

else

ulimit -u 16384 -n 65536

fi

fi

alias sas='sqlplus / as sysasm'

#stty erase ^H

EOF

 

$ source .bash_profile

 

注意:节点2为+ASM2.

 

oracle用户:

$ cat <<EOF>>/home/oracle/.bash_profile

# Oracle 12c oracle Environment

export TEMP=/tmp

export TMPDIR=\$TEMP

export ORACLE_SID=cbdps1

export ORACLE_BASE=/u01/app/oracle

export ORACLE_HOME=/u01/app/oracle/product/12.2.0.1/db_1

#export NLS_LANG=AMERICAN_AMERICA.AL32UTF8

export LIBPATH=\$ORACLE_HOME/lib:\$ORACLE_HOME/lib32

export LD_LIBRARY_PATH=\$LD_LIBARY_PATH:\$ORACLE_HOME/jdk/jre/lib:\$ORACLE_HOME/network/lib:\$ORACLE_HOME/rdbms/lib

export LD_LIBRARY_PATH=\$LD_LIBARY_PATH:\$ORACLE_HOME/lib:\$ORACLE_HOME/lib32

export CLASSPATH=\$ORACLE_HOME/jre:\$ORACLE_HOME/jlib:\$ORACLE_HOME/rdbms/jlib:\$ORACLE_HOME/network/jlib

export PATH=\$ORACLE_HOME/bin:\$ORACLE_HOME/OPatch:\$HOME/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/usr/ucb:/usr/bin/X11:/sbin:.

 

umask 022

if [ \$USER = "oracle" ]; then

if [ \$SHELL = "/bin/ksh" ]; then

ulimit -p 16384

ulimit -n 65536

else

ulimit -u 16384 -n 65536

fi

fi

alias sas='sqlplus / as sysdba'

#stty erase ^H

EOF

 

$ source .bash_profile

 

注意:节点2 为cbdps2.

 

3.15、ROOT配置CRSCTL

ROOT配置调用GRID相关命令:

[root@cbdps01 /]# cat >> /etc/profile <<EOF

export PATH=/u01/app/12.2.0.1/grid/bin:\$PATH

EOF

 

3.16、修改/etc/profile

# cat <<EOF>>/etc/profile

    if [ \$USER = "oracle" ] || [ \$USER = "grid" ]; then

            if [ \$SHELL = "/bin/ksh" ]; then

                  ulimit -p 16384

                  ulimit -n 65536

            else

                  ulimit -u 16384 -n 65536

            fi

            umask 022

    fi

EOF

 

# source /etc/profile

 

3.17、关闭透明大页和NUMA

[root@cbdps01 ~]# vi /etc/sysctl.conf

添加如下:

vm.nr_hugepages=0

 

[root@cbdps01 ~]# vi /etc/rc.local

末尾添加如下:

echo never > /sys/kernel/mm/transparent_hugepage/enabled

 

临时关闭:

echo never > /sys/kernel/mm/transparent_hugepage/enabled

以下为重启后数据:

[root@cbdps01 ~]# cat /sys/kernel/mm/transparent_hugepage/enabled

always madvise [never]

4、存储配置

4.1、添加共享存储

共享存储可以使用第三方软件提供的方式来共享,也可以使用 WMware Workstation软件进行存储共享,或者使用ISCSI网络存储服务来配置共享存储,本次使用 WMware Workstation软件方式模拟共享存储.

添加共享磁盘.

节点2在添加共享磁盘时,此处选择"使用现有虚拟磁盘".

两个节点依次添加共享磁盘,此后在两台机器的.vmx文件添加如下内容:

disk.locking = "FALSE"

disk.EnableUUID = "TRUE"

添加磁盘后的截图如下:

[root@cbdps01 ~]# lsblk

NAME   MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT

sda      8:0    0  100G  0 disk

├─sda1   8:1    0    1G  0 part /boot

├─sda2   8:2    0    8G  0 part [SWAP]

└─sda3   8:3    0   91G  0 part /

sdc      8:32   0   40G  0 disk

sdb      8:16   0   40G  0 disk

sr0     11:0    1  3.6G  0 rom

 

[root@cbdps01 ~]# fdisk /dev/sdb

[root@cbdps01 ~]# fdisk /dev/sdc

[root@cbdps01 ~]# partprobe

[root@cbdps02 ~]# partprobe

[root@cbdps01 ~]# lsblk

NAME   MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT

sda      8:0    0  100G  0 disk

├─sda1   8:1    0    1G  0 part /boot

├─sda2   8:2    0    8G  0 part [SWAP]

└─sda3   8:3    0   91G  0 part /

sdb      8:16   0   40G  0 disk

└─sdb1   8:17   0   40G  0 part

sdc      8:32   0   40G  0 disk

└─sdc1   8:33   0   40G  0 part

sr0     11:0    1  3.6G  0 rom  /mnt

 

4.2、I/O 调度器参数配置

注意:两个节点均执行.

方法1:临时生效:

# echo deadline > /sys/block/sdb/queue/scheduler

# echo deadline > /sys/block/sdc/queue/scheduler

 

方法2:开机自动执行脚本:

[root@cbdps01 ~]# vi /etc/rc.local

末尾添加如下:

echo deadline > /sys/block/sdb/queue/scheduler

echo deadline > /sys/block/sdc/queue/scheduler

 

注意:磁盘sdb、sdc根据具体环境进行修改.

 

说明:如果不对I/O调度参数进行配置,安装是出现以下告警.

4.3、raw方式绑盘

注意:两个节点均执行.

[root@cbdps01 rules.d]# vi /etc/udev/rules.d/60-raw.rules

ACTION=="add", KERNEL=="sdb1", RUN+="/bin/raw /dev/raw/raw1 %N"

ACTION=="add", KERNEL=="sdc1", RUN+="/bin/raw /dev/raw/raw2 %N"

KERNEL=="raw[1-2]*", OWNER="grid", GROUP="asmadmin", MODE="0660"

 

[root@cbdps01 rules.d]# start_udev

[root@cbdps01 ~]# lsblk

NAME   MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT

sda      8:0    0  100G  0 disk

├─sda1   8:1    0    1G  0 part /boot

├─sda2   8:2    0    8G  0 part [SWAP]

└─sda3   8:3    0   91G  0 part /

sdb      8:16   0   40G  0 disk

└─sdb1   8:17   0   40G  0 part

sdc      8:32   0   40G  0 disk

└─sdc1   8:33   0   40G  0 part

sr0     11:0    1  3.6G  0 rom    

[root@cbdps01 rules.d]# ls -tlr /dev/raw*

total 0

crw-rw---- 1 root disk     162, 0 Feb  1 22:51 rawctl

crw-rw---- 1 grid asmadmin 162, 1 Feb  1 22:51 raw1

crw-rw---- 1 grid asmadmin 162, 2 Feb  1 22:51 raw2

 

[root@cbdps02 rules.d]# ls -ltr /dev/raw*

total 0

crw-rw---- 1 root disk     162, 0 Feb  1 22:51 rawctl

crw-rw---- 1 grid asmadmin 162, 2 Feb  1 22:51 raw2

crw-rw---- 1 grid asmadmin 162, 1 Feb  1 22:51 raw1

 

注意:使用RAW设备方式绑盘时,若再使用多路径会导致row1、raw2出现异常.

 

5、安装介质处理

5.1、上传安装介质

注意:此步骤仅在节点1执行.

--创建单独的安装文件目录

# mkdir -p /u01/setup/{db,grid}

 

--上传文件

sftp> lcd F:\installmedium\12c

sftp> cd /u01/setup/grid

sftp> put LINUX.X64_122010_grid_home.zip

sftp> cd /u01/setup/db

sftp> put LINUX.X64_122010_db_home.zip

 

5.2、解压安装介质

注意:此步骤仅在节点1执行,12C R2的grid和之前版本不同,压缩包本身是软件的一部分,安装时需直接解压到Grid Home下.

 

grid安装包

# chown -R grid:oinstall /u01/setup/grid

# su - grid -c "unzip -q /u01/setup/grid/LINUX.X64_122010_grid_home.zip -d /u01/app/12.2.0.1/grid/"

 

oracle安装包

# chown -R oracle:oinstall /u01/setup/db

# su - oracle -c "unzip -q /u01/setup/db/LINUX.X64_122010_db_home.zip -d /u01/setup/db"

 

5.3、cvuqdisk安装

--安装cvuqdisk依赖包

[root@cbdps01 ~]# cd /u01/app/12.2.0.1/grid/cv/rpm

[root@cbdps01 rpm]# CVUQDISK_GRP=oinstall; export CVUQDISK_GRP

[root@cbdps01 rpm]# rpm -ivh cvuqdisk-1.0.10-1.rpm

Preparing...                ########################################### [100%]

   1:cvuqdisk               ########################################### [100%]

[root@cbdps01 rpm]# scp cvuqdisk-1.0.10-1.rpm cbdps02:/tmp

[root@cbdps02 ~]# CVUQDISK_GRP=oinstall; export CVUQDISK_GRP

[root@cbdps02 ~]# rpm -ivh /tmp/cvuqdisk-1.0.10-1.rpm

 

6、rac节点互信

注意:仅在节点1执行.

6.1、互信配置

[root@cbdps01 ~]# su - grid

[grid@cbdps01 ~]$ cd $ORACLE_HOME/oui/prov/resources/scripts

[grid@cbdps01 scripts]$ ./sshUserSetup.sh -user grid -hosts "cbdps01 cbdps02" -advanced -noPromptPassphrase

[grid@cbdps01 scripts]$ ./sshUserSetup.sh -user oracle -hosts "cbdps01 cbdps02" -advanced -noPromptPassphrase

 

注意:此处用grid或root用户run互信脚本均可以.

 

6.2、互信测试

说明:两个节点均需测试.

grid用户

[grid@cbdps01 ~]$ ssh cbdps01 date

Mon Jan 27 16:00:23 CST 2025

[grid@cbdps01 ~]$ ssh cbdps02 date

Mon Jan 27 16:00:28 CST 2025

[grid@cbdps01 ~]$ ssh cbdps01-priv date

The authenticity of host 'cbdps01-priv (192.168.78.170)' can't be established.

ECDSA key fingerprint is SHA256:UWtTT+EYQyWcbH30Tcq32+Kd2v+dyPbgPr/7xIcRazk.

Are you sure you want to continue connecting (yes/no/[fingerprint])? yes

Warning: Permanently added 'cbdps01-priv,192.168.78.170' (ECDSA) to the list of known hosts.

Mon Jan 27 16:00:38 CST 2025

[grid@cbdps01 ~]$ ssh cbdps01-priv date

Mon Jan 27 16:00:41 CST 2025

[grid@cbdps01 ~]$ ssh cbdps02-priv date

The authenticity of host 'cbdps02-priv (192.168.78.171)' can't be established.

ECDSA key fingerprint is SHA256:UWtTT+EYQyWcbH30Tcq32+Kd2v+dyPbgPr/7xIcRazk.

Are you sure you want to continue connecting (yes/no/[fingerprint])? yes

Warning: Permanently added 'cbdps02-priv,192.168.78.171' (ECDSA) to the list of known hosts.

Mon Jan 27 16:00:55 CST 2025

[grid@cbdps01 ~]$ ssh cbdps02-priv date

Mon Jan 27 16:00:57 CST 2025

 

oracle用户

[oracle@cbdps01 ~]$ ssh cbdps01 date

Mon Jan 27 16:01:38 CST 2025

[oracle@cbdps01 ~]$ ssh cbdps02 date

Mon Jan 27 16:01:43 CST 2025

[oracle@cbdps01 ~]$ ssh cbdps01-priv date

The authenticity of host 'cbdps01-priv (192.168.78.170)' can't be established.

ECDSA key fingerprint is SHA256:UWtTT+EYQyWcbH30Tcq32+Kd2v+dyPbgPr/7xIcRazk.

Are you sure you want to continue connecting (yes/no/[fingerprint])? yes

Warning: Permanently added 'cbdps01-priv,192.168.78.170' (ECDSA) to the list of known hosts.

Mon Jan 27 16:01:50 CST 2025

[oracle@cbdps01 ~]$ ssh cbdps01-priv date

Mon Jan 27 16:01:52 CST 2025

[oracle@cbdps01 ~]$ ssh cbdps02-priv date

The authenticity of host 'cbdps02-priv (192.168.78.171)' can't be established.

ECDSA key fingerprint is SHA256:UWtTT+EYQyWcbH30Tcq32+Kd2v+dyPbgPr/7xIcRazk.

Are you sure you want to continue connecting (yes/no/[fingerprint])? yes

Warning: Permanently added 'cbdps02-priv,192.168.78.171' (ECDSA) to the list of known hosts.

Mon Jan 27 16:01:59 CST 2025

[oracle@cbdps01 ~]$ ssh cbdps02-priv date

Mon Jan 27 16:02:00 CST 2025

 

注意:需要达到不输入yes.

7、检查 selinux、IO调度器、透明大页、numa

注意:系统重启后,检查selinux等设置是否生效,以下为重启后数据.

[root@cbdps02 ~]# getenforce

Disabled

[root@cbdps01 ~]# getenforce

Disabled

 

[root@cbdps01 ~]# cat /sys/block/sdb/queue/scheduler

noop anticipatory [deadline] cfq

[root@cbdps02 ~]# cat /sys/block/sdb/queue/scheduler

noop anticipatory [deadline] cfq

 

[root@cbdps01 ~]# cat /sys/kernel/mm/transparent_hugepage/enabled

always madvise [never]

[root@cbdps02 ~]# cat /sys/kernel/mm/transparent_hugepage/enabled

always madvise [never]

说明:未对numa进行关闭设置,此处不对其进行检查.

 

8、图形界面安装Grid

说明:安装前建议重启一下系统,图形界面安装GI只在节点1执行.

[grid@cbdps01 ~]$ cd $ORACLE_HOME

[grid@cbdps01 grid]$ export DISPLAY=192.168.133.1:0.0

[grid@cbdps01 grid]$ ./gridSetup.sh

Launching Oracle Grid Infrastructure Setup Wizard...

添加节点2 public hostname和virtual hostname后,点击"SSH connectivity",输入 grid 用户密码,创建用户时两节点密码必须保持一致.先执行setup,再执行test,开始互信.

点击"Setup"后出现如下界面.

点击"Test"后出现如下界面.

确保对应网卡和IP网段对应即可,12C心跳网段默认ASM & Private,用于ASM实例的托管.

创建OCR投票盘,注意:如果选择"High"类型,至少需要5块磁盘.



节点一执行:

# /u01/app/oraInventory/orainstRoot.sh

节点二执行:

# /u01/app/oraInventory/orainstRoot.sh

 

节点一执行:

# /u01/app/12.2.0.1/grid/root.sh

节点二执行:

# /u01/app/12.2.0.1/grid/root.sh

 

注意:执行完root.sh脚本后,/u01的权限变更为如下.

drwxr-xr-x.   4 root oinstall  4096 Feb  5 11:00 u01

 

未执行root.s脚本之前的属性.

drwxrwxr-x.   3 grid oinstall  4096 Feb  5 10:28 u01

执行日志记录

节点1

[root@cbdps01 ~]# /u01/app/12.2.0.1/grid/root.sh

Performing root user operation.

 

The following environment variables are set as:

    ORACLE_OWNER= grid

    ORACLE_HOME=  /u01/app/12.2.0.1/grid

 

Enter the full pathname of the local bin directory: [/usr/local/bin]:

   Copying dbhome to /usr/local/bin ...

   Copying oraenv to /usr/local/bin ...

   Copying coraenv to /usr/local/bin ...

 

 

Creating /etc/oratab file...

Entries will be added to the /etc/oratab file as needed by

Database Configuration Assistant when a database is created

Finished running generic part of root script.

Now product-specific root actions will be performed.

Relinking oracle with rac_on option

Using configuration parameter file: /u01/app/12.2.0.1/grid/crs/install/crsconfig_params

The log of current session can be found at:

  /u01/app/grid/crsdata/cbdps01/crsconfig/rootcrs_cbdps01_2025-02-02_11-39-57PM.log

2025/02/02 23:39:59 CLSRSC-594: Executing installation step 1 of 19: 'SetupTFA'.

2025/02/02 23:39:59 CLSRSC-4001: Installing Oracle Trace File Analyzer (TFA) Collector.

2025/02/02 23:40:22 CLSRSC-4002: Successfully installed Oracle Trace File Analyzer (TFA) Collector.

2025/02/02 23:40:22 CLSRSC-594: Executing installation step 2 of 19: 'ValidateEnv'.

2025/02/02 23:40:25 CLSRSC-363: User ignored prerequisites during installation

2025/02/02 23:40:25 CLSRSC-594: Executing installation step 3 of 19: 'CheckFirstNode'.

2025/02/02 23:40:26 CLSRSC-594: Executing installation step 4 of 19: 'GenSiteGUIDs'.

2025/02/02 23:40:27 CLSRSC-594: Executing installation step 5 of 19: 'SaveParamFile'.

2025/02/02 23:40:32 CLSRSC-594: Executing installation step 6 of 19: 'SetupOSD'.

2025/02/02 23:40:33 CLSRSC-594: Executing installation step 7 of 19: 'CheckCRSConfig'.

2025/02/02 23:40:33 CLSRSC-594: Executing installation step 8 of 19: 'SetupLocalGPNP'.

2025/02/02 23:40:50 CLSRSC-594: Executing installation step 9 of 19: 'ConfigOLR'.

2025/02/02 23:40:55 CLSRSC-594: Executing installation step 10 of 19: 'ConfigCHMOS'.

2025/02/02 23:40:55 CLSRSC-594: Executing installation step 11 of 19: 'CreateOHASD'.

2025/02/02 23:40:59 CLSRSC-594: Executing installation step 12 of 19: 'ConfigOHASD'.

2025/02/02 23:41:14 CLSRSC-330: Adding Clusterware entries to file 'oracle-ohasd.conf'

2025/02/02 23:41:41 CLSRSC-594: Executing installation step 13 of 19: 'InstallAFD'.

2025/02/02 23:41:45 CLSRSC-594: Executing installation step 14 of 19: 'InstallACFS'.

CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'cbdps01'

CRS-2793: Shutdown of Oracle High Availability Services-managed resources on 'cbdps01' has completed

CRS-4133: Oracle High Availability Services has been stopped.

CRS-4123: Oracle High Availability Services has been started.

2025/02/02 23:42:12 CLSRSC-594: Executing installation step 15 of 19: 'InstallKA'.

2025/02/02 23:42:16 CLSRSC-594: Executing installation step 16 of 19: 'InitConfig'.

CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'cbdps01'

CRS-2793: Shutdown of Oracle High Availability Services-managed resources on 'cbdps01' has completed

CRS-4133: Oracle High Availability Services has been stopped.

CRS-4123: Oracle High Availability Services has been started.

CRS-2672: Attempting to start 'ora.evmd' on 'cbdps01'

CRS-2672: Attempting to start 'ora.mdnsd' on 'cbdps01'

CRS-2676: Start of 'ora.mdnsd' on 'cbdps01' succeeded

CRS-2676: Start of 'ora.evmd' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.gpnpd' on 'cbdps01'

CRS-2676: Start of 'ora.gpnpd' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.cssdmonitor' on 'cbdps01'

CRS-2672: Attempting to start 'ora.gipcd' on 'cbdps01'

CRS-2676: Start of 'ora.cssdmonitor' on 'cbdps01' succeeded

CRS-2676: Start of 'ora.gipcd' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.cssd' on 'cbdps01'

CRS-2672: Attempting to start 'ora.diskmon' on 'cbdps01'

CRS-2676: Start of 'ora.diskmon' on 'cbdps01' succeeded

CRS-2676: Start of 'ora.cssd' on 'cbdps01' succeeded

 

Disk groups created successfully. Check /u01/app/grid/cfgtoollogs/asmca/asmca-250202PM114251.log for details.

 

 

2025/02/02 23:43:37 CLSRSC-482: Running command: '/u01/app/12.2.0.1/grid/bin/ocrconfig -upgrade grid oinstall'

CRS-2672: Attempting to start 'ora.crf' on 'cbdps01'

CRS-2672: Attempting to start 'ora.storage' on 'cbdps01'

CRS-2676: Start of 'ora.storage' on 'cbdps01' succeeded

CRS-2676: Start of 'ora.crf' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.crsd' on 'cbdps01'

CRS-2676: Start of 'ora.crsd' on 'cbdps01' succeeded

CRS-4256: Updating the profile

Successful addition of voting disk 3bb6eeb302334fcabf60b8c367f1e592.

Successfully replaced voting disk group with +OCR.

CRS-4256: Updating the profile

CRS-4266: Voting file(s) successfully replaced

##  STATE    File Universal Id                File Name Disk group

--  -----    -----------------                --------- ---------

 1. ONLINE   3bb6eeb302334fcabf60b8c367f1e592 (/dev/raw/raw1) [OCR]

Located 1 voting disk(s).

CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'cbdps01'

CRS-2673: Attempting to stop 'ora.crsd' on 'cbdps01'

CRS-2677: Stop of 'ora.crsd' on 'cbdps01' succeeded

CRS-2673: Attempting to stop 'ora.storage' on 'cbdps01'

CRS-2673: Attempting to stop 'ora.crf' on 'cbdps01'

CRS-2673: Attempting to stop 'ora.drivers.acfs' on 'cbdps01'

CRS-2673: Attempting to stop 'ora.gpnpd' on 'cbdps01'

CRS-2673: Attempting to stop 'ora.mdnsd' on 'cbdps01'

CRS-2677: Stop of 'ora.drivers.acfs' on 'cbdps01' succeeded

CRS-2677: Stop of 'ora.crf' on 'cbdps01' succeeded

CRS-2677: Stop of 'ora.gpnpd' on 'cbdps01' succeeded

CRS-2677: Stop of 'ora.storage' on 'cbdps01' succeeded

CRS-2673: Attempting to stop 'ora.asm' on 'cbdps01'

CRS-2677: Stop of 'ora.mdnsd' on 'cbdps01' succeeded

CRS-2677: Stop of 'ora.asm' on 'cbdps01' succeeded

CRS-2673: Attempting to stop 'ora.cluster_interconnect.haip' on 'cbdps01'

CRS-2677: Stop of 'ora.cluster_interconnect.haip' on 'cbdps01' succeeded

CRS-2673: Attempting to stop 'ora.ctssd' on 'cbdps01'

CRS-2673: Attempting to stop 'ora.evmd' on 'cbdps01'

CRS-2677: Stop of 'ora.ctssd' on 'cbdps01' succeeded

CRS-2677: Stop of 'ora.evmd' on 'cbdps01' succeeded

CRS-2673: Attempting to stop 'ora.cssd' on 'cbdps01'

CRS-2677: Stop of 'ora.cssd' on 'cbdps01' succeeded

CRS-2673: Attempting to stop 'ora.gipcd' on 'cbdps01'

CRS-2677: Stop of 'ora.gipcd' on 'cbdps01' succeeded

CRS-2793: Shutdown of Oracle High Availability Services-managed resources on 'cbdps01' has completed

CRS-4133: Oracle High Availability Services has been stopped.

2025/02/02 23:44:23 CLSRSC-594: Executing installation step 17 of 19: 'StartCluster'.

CRS-4123: Starting Oracle High Availability Services-managed resources

CRS-2672: Attempting to start 'ora.mdnsd' on 'cbdps01'

CRS-2672: Attempting to start 'ora.evmd' on 'cbdps01'

CRS-2676: Start of 'ora.mdnsd' on 'cbdps01' succeeded

CRS-2676: Start of 'ora.evmd' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.gpnpd' on 'cbdps01'

CRS-2676: Start of 'ora.gpnpd' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.gipcd' on 'cbdps01'

CRS-2676: Start of 'ora.gipcd' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.cssdmonitor' on 'cbdps01'

CRS-2676: Start of 'ora.cssdmonitor' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.cssd' on 'cbdps01'

CRS-2672: Attempting to start 'ora.diskmon' on 'cbdps01'

CRS-2676: Start of 'ora.diskmon' on 'cbdps01' succeeded

CRS-2676: Start of 'ora.cssd' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.cluster_interconnect.haip' on 'cbdps01'

CRS-2672: Attempting to start 'ora.ctssd' on 'cbdps01'

CRS-2676: Start of 'ora.ctssd' on 'cbdps01' succeeded

CRS-2676: Start of 'ora.cluster_interconnect.haip' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.asm' on 'cbdps01'

CRS-2676: Start of 'ora.asm' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.storage' on 'cbdps01'

CRS-2676: Start of 'ora.storage' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.crf' on 'cbdps01'

CRS-2676: Start of 'ora.crf' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.crsd' on 'cbdps01'

CRS-2676: Start of 'ora.crsd' on 'cbdps01' succeeded

CRS-6023: Starting Oracle Cluster Ready Services-managed resources

CRS-6017: Processing resource auto-start for servers: cbdps01

CRS-6016: Resource auto-start has completed for server cbdps01

CRS-6024: Completed start of Oracle Cluster Ready Services-managed resources

CRS-4123: Oracle High Availability Services has been started.

2025/02/02 23:45:50 CLSRSC-343: Successfully started Oracle Clusterware stack

2025/02/02 23:45:50 CLSRSC-594: Executing installation step 18 of 19: 'ConfigNode'.

CRS-2672: Attempting to start 'ora.ASMNET1LSNR_ASM.lsnr' on 'cbdps01'

CRS-2676: Start of 'ora.ASMNET1LSNR_ASM.lsnr' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.asm' on 'cbdps01'

CRS-2676: Start of 'ora.asm' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.OCR.dg' on 'cbdps01'

CRS-2676: Start of 'ora.OCR.dg' on 'cbdps01' succeeded

2025/02/02 23:47:03 CLSRSC-594: Executing installation step 19 of 19: 'PostConfig'.

2025/02/02 23:47:21 CLSRSC-325: Configure Oracle Grid Infrastructure for a Cluster ... succeeded

 

节点2

[root@cbdps02 ~]# /u01/app/oraInventory/orainstRoot.sh

Changing permissions of /u01/app/oraInventory.

Adding read,write permissions for group.

Removing read,write,execute permissions for world.

 

Changing groupname of /u01/app/oraInventory to oinstall.

The execution of the script is complete.

 

[root@cbdps02 ~]# /u01/app/12.2.0.1/grid/root.sh

Performing root user operation.

 

The following environment variables are set as:

    ORACLE_OWNER= grid

    ORACLE_HOME=  /u01/app/12.2.0.1/grid

 

Enter the full pathname of the local bin directory: [/usr/local/bin]:

   Copying dbhome to /usr/local/bin ...

   Copying oraenv to /usr/local/bin ...

   Copying coraenv to /usr/local/bin ...

 

 

Creating /etc/oratab file...

Entries will be added to the /etc/oratab file as needed by

Database Configuration Assistant when a database is created

Finished running generic part of root script.

Now product-specific root actions will be performed.

Relinking oracle with rac_on option

Using configuration parameter file: /u01/app/12.2.0.1/grid/crs/install/crsconfig_params

The log of current session can be found at:

  /u01/app/grid/crsdata/cbdps02/crsconfig/rootcrs_cbdps02_2025-02-02_11-47-53PM.log

2025/02/02 23:47:55 CLSRSC-594: Executing installation step 1 of 19: 'SetupTFA'.

2025/02/02 23:47:55 CLSRSC-4001: Installing Oracle Trace File Analyzer (TFA) Collector.

2025/02/02 23:48:18 CLSRSC-4002: Successfully installed Oracle Trace File Analyzer (TFA) Collector.

2025/02/02 23:48:18 CLSRSC-594: Executing installation step 2 of 19: 'ValidateEnv'.

2025/02/02 23:48:20 CLSRSC-363: User ignored prerequisites during installation

2025/02/02 23:48:20 CLSRSC-594: Executing installation step 3 of 19: 'CheckFirstNode'.

2025/02/02 23:48:21 CLSRSC-594: Executing installation step 4 of 19: 'GenSiteGUIDs'.

2025/02/02 23:48:21 CLSRSC-594: Executing installation step 5 of 19: 'SaveParamFile'.

2025/02/02 23:48:23 CLSRSC-594: Executing installation step 6 of 19: 'SetupOSD'.

2025/02/02 23:48:24 CLSRSC-594: Executing installation step 7 of 19: 'CheckCRSConfig'.

2025/02/02 23:48:24 CLSRSC-594: Executing installation step 8 of 19: 'SetupLocalGPNP'.

2025/02/02 23:48:26 CLSRSC-594: Executing installation step 9 of 19: 'ConfigOLR'.

2025/02/02 23:48:27 CLSRSC-594: Executing installation step 10 of 19: 'ConfigCHMOS'.

2025/02/02 23:48:27 CLSRSC-594: Executing installation step 11 of 19: 'CreateOHASD'.

2025/02/02 23:48:29 CLSRSC-594: Executing installation step 12 of 19: 'ConfigOHASD'.

2025/02/02 23:48:44 CLSRSC-330: Adding Clusterware entries to file 'oracle-ohasd.conf'

2025/02/02 23:49:08 CLSRSC-594: Executing installation step 13 of 19: 'InstallAFD'.

2025/02/02 23:49:09 CLSRSC-594: Executing installation step 14 of 19: 'InstallACFS'.

CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'cbdps02'

CRS-2793: Shutdown of Oracle High Availability Services-managed resources on 'cbdps02' has completed

CRS-4133: Oracle High Availability Services has been stopped.

CRS-4123: Oracle High Availability Services has been started.

2025/02/02 23:49:34 CLSRSC-594: Executing installation step 15 of 19: 'InstallKA'.

2025/02/02 23:49:35 CLSRSC-594: Executing installation step 16 of 19: 'InitConfig'.

CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'cbdps02'

CRS-2793: Shutdown of Oracle High Availability Services-managed resources on 'cbdps02' has completed

CRS-4133: Oracle High Availability Services has been stopped.

CRS-4123: Oracle High Availability Services has been started.

CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'cbdps02'

CRS-2673: Attempting to stop 'ora.drivers.acfs' on 'cbdps02'

CRS-2677: Stop of 'ora.drivers.acfs' on 'cbdps02' succeeded

CRS-2793: Shutdown of Oracle High Availability Services-managed resources on 'cbdps02' has completed

CRS-4133: Oracle High Availability Services has been stopped.

2025/02/02 23:49:53 CLSRSC-594: Executing installation step 17 of 19: 'StartCluster'.

CRS-4123: Starting Oracle High Availability Services-managed resources

CRS-2672: Attempting to start 'ora.evmd' on 'cbdps02'

CRS-2672: Attempting to start 'ora.mdnsd' on 'cbdps02'

CRS-2676: Start of 'ora.mdnsd' on 'cbdps02' succeeded

CRS-2676: Start of 'ora.evmd' on 'cbdps02' succeeded

CRS-2672: Attempting to start 'ora.gpnpd' on 'cbdps02'

CRS-2676: Start of 'ora.gpnpd' on 'cbdps02' succeeded

CRS-2672: Attempting to start 'ora.gipcd' on 'cbdps02'

CRS-2676: Start of 'ora.gipcd' on 'cbdps02' succeeded

CRS-2672: Attempting to start 'ora.cssdmonitor' on 'cbdps02'

CRS-2676: Start of 'ora.cssdmonitor' on 'cbdps02' succeeded

CRS-2672: Attempting to start 'ora.cssd' on 'cbdps02'

CRS-2672: Attempting to start 'ora.diskmon' on 'cbdps02'

CRS-2676: Start of 'ora.diskmon' on 'cbdps02' succeeded

CRS-2676: Start of 'ora.cssd' on 'cbdps02' succeeded

CRS-2672: Attempting to start 'ora.cluster_interconnect.haip' on 'cbdps02'

CRS-2672: Attempting to start 'ora.ctssd' on 'cbdps02'

CRS-2676: Start of 'ora.ctssd' on 'cbdps02' succeeded

CRS-2676: Start of 'ora.cluster_interconnect.haip' on 'cbdps02' succeeded

CRS-2672: Attempting to start 'ora.asm' on 'cbdps02'

CRS-2676: Start of 'ora.asm' on 'cbdps02' succeeded

CRS-2672: Attempting to start 'ora.storage' on 'cbdps02'

CRS-2676: Start of 'ora.storage' on 'cbdps02' succeeded

CRS-2672: Attempting to start 'ora.crf' on 'cbdps02'

CRS-2676: Start of 'ora.crf' on 'cbdps02' succeeded

CRS-2672: Attempting to start 'ora.crsd' on 'cbdps02'

CRS-2676: Start of 'ora.crsd' on 'cbdps02' succeeded

CRS-6017: Processing resource auto-start for servers: cbdps02

CRS-2672: Attempting to start 'ora.ASMNET1LSNR_ASM.lsnr' on 'cbdps02'

CRS-2672: Attempting to start 'ora.net1.network' on 'cbdps02'

CRS-2676: Start of 'ora.net1.network' on 'cbdps02' succeeded

CRS-2672: Attempting to start 'ora.ons' on 'cbdps02'

CRS-2676: Start of 'ora.ASMNET1LSNR_ASM.lsnr' on 'cbdps02' succeeded

CRS-2672: Attempting to start 'ora.asm' on 'cbdps02'

CRS-2676: Start of 'ora.ons' on 'cbdps02' succeeded

CRS-2676: Start of 'ora.asm' on 'cbdps02' succeeded

CRS-6016: Resource auto-start has completed for server cbdps02

CRS-6024: Completed start of Oracle Cluster Ready Services-managed resources

CRS-4123: Oracle High Availability Services has been started.

2025/02/02 23:51:22 CLSRSC-343: Successfully started Oracle Clusterware stack

2025/02/02 23:51:22 CLSRSC-594: Executing installation step 18 of 19: 'ConfigNode'.

2025/02/02 23:51:33 CLSRSC-594: Executing installation step 19 of 19: 'PostConfig'.

2025/02/02 23:51:41 CLSRSC-325: Configure Oracle Grid Infrastructure for a Cluster ... succeeded

[grid@cbdps01 ~]$ crsctl stat res -t

--------------------------------------------------------------------------------

Name           Target  State        Server                   State details      

--------------------------------------------------------------------------------

Local Resources

--------------------------------------------------------------------------------

ora.ASMNET1LSNR_ASM.lsnr

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.LISTENER.lsnr

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.OCR.dg

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.chad

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.net1.network

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.ons

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.proxy_advm

               OFFLINE OFFLINE      cbdps01                  STABLE

               OFFLINE OFFLINE      cbdps02                  STABLE

--------------------------------------------------------------------------------

Cluster Resources

--------------------------------------------------------------------------------

ora.LISTENER_SCAN1.lsnr

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.MGMTLSNR

      1        ONLINE  ONLINE       cbdps01                  169.254.253.194 192.

                                                             168.78.170,STABLE

ora.asm

      1        ONLINE  ONLINE       cbdps01                  Started,STABLE

      2        ONLINE  ONLINE       cbdps02                  Started,STABLE

      3        OFFLINE OFFLINE                               STABLE

ora.cbdps01.vip

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.cbdps02.vip

      1        ONLINE  ONLINE       cbdps02                  STABLE

ora.cvu

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.mgmtdb

      1        ONLINE  ONLINE       cbdps01                  Open,STABLE

ora.qosmserver

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.scan1.vip

      1        ONLINE  ONLINE       cbdps01                  STABLE

--------------------------------------------------------------------------------

[grid@cbdps01 ~]$ crsctl stat res -t -init

--------------------------------------------------------------------------------

Name           Target  State        Server                   State details      

--------------------------------------------------------------------------------

Cluster Resources

--------------------------------------------------------------------------------

ora.asm

      1        ONLINE  ONLINE       cbdps01                  Started,STABLE

ora.cluster_interconnect.haip

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.crf

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.crsd

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.cssd

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.cssdmonitor

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.ctssd

      1        ONLINE  ONLINE       cbdps01                  ACTIVE:0,STABLE

ora.diskmon

      1        OFFLINE OFFLINE                               STABLE

ora.drivers.acfs

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.evmd

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.gipcd

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.gpnpd

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.mdnsd

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.storage

      1        ONLINE  ONLINE       cbdps01                  STABLE

--------------------------------------------------------------------------------

 

9、测试集群FAILED OVER功能

9.1、测试节点2

--节点2重启,查看节点1状态.

[grid@cbdps01 ~]$ crsctl stat res -t

--------------------------------------------------------------------------------

Name           Target  State        Server                   State details      

--------------------------------------------------------------------------------

Local Resources

--------------------------------------------------------------------------------

ora.ASMNET1LSNR_ASM.lsnr

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  OFFLINE      cbdps02                  STABLE

ora.LISTENER.lsnr

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  OFFLINE      cbdps02                  STABLE

ora.OCR.dg

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  OFFLINE      cbdps02                  STABLE

ora.chad

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  OFFLINE      cbdps02                  STABLE

ora.net1.network

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  OFFLINE      cbdps02                  STABLE

ora.ons

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  OFFLINE      cbdps02                  STABLE

ora.proxy_advm

               OFFLINE OFFLINE      cbdps01                  STABLE

               OFFLINE OFFLINE      cbdps02                  STABLE

--------------------------------------------------------------------------------

Cluster Resources

--------------------------------------------------------------------------------

ora.LISTENER_SCAN1.lsnr

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.MGMTLSNR

      1        ONLINE  ONLINE       cbdps01                  169.254.253.194 192.

                                                             168.78.170,STABLE

ora.asm

      1        ONLINE  ONLINE       cbdps01                  Started,STABLE

      2        ONLINE  OFFLINE                               STABLE

      3        OFFLINE OFFLINE                               STABLE

ora.cbdps01.vip

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.cbdps02.vip

      1        ONLINE  INTERMEDIATE cbdps01                  FAILED OVER,STABLE

ora.cvu

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.mgmtdb

      1        ONLINE  ONLINE       cbdps01                  Open,STABLE

ora.qosmserver

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.scan1.vip

      1        ONLINE  ONLINE       cbdps01                  STABLE

--------------------------------------------------------------------------------

9.2、测试节点1

--节点1重启,查看节点2状态.

[grid@cbdps02 ~]$ crsctl stat res -t

--------------------------------------------------------------------------------

Name           Target  State        Server                   State details      

--------------------------------------------------------------------------------

Local Resources

--------------------------------------------------------------------------------

ora.ASMNET1LSNR_ASM.lsnr

               ONLINE  OFFLINE      cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.LISTENER.lsnr

               ONLINE  OFFLINE      cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.OCR.dg

               ONLINE  OFFLINE      cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.chad

               ONLINE  OFFLINE      cbdps01                  STABLE

               ONLINE  OFFLINE      cbdps02                  STABLE

ora.net1.network

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.ons

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.proxy_advm

               OFFLINE OFFLINE      cbdps01                  STABLE

               OFFLINE OFFLINE      cbdps02                  STABLE

--------------------------------------------------------------------------------

Cluster Resources

--------------------------------------------------------------------------------

ora.LISTENER_SCAN1.lsnr

      1        ONLINE  OFFLINE                               STABLE

ora.MGMTLSNR

      1        ONLINE  OFFLINE      cbdps02                  169.254.253.194 192.

                                                             168.78.170,STARTING

ora.asm

      1        ONLINE  OFFLINE                               STABLE

      2        ONLINE  ONLINE       cbdps02                  Started,STABLE

      3        OFFLINE OFFLINE                               STABLE

ora.cbdps01.vip

      1        ONLINE  OFFLINE                               STABLE

ora.cbdps02.vip

      1        ONLINE  ONLINE       cbdps02                  STABLE

ora.cvu

      1        ONLINE  OFFLINE                               STABLE

ora.mgmtdb

      1        ONLINE  OFFLINE                               Instance Shutdown,ST

                                                             ABLE

ora.qosmserver

      1        ONLINE  OFFLINE                               STABLE

ora.scan1.vip

      1        ONLINE  OFFLINE                               STABLE

--------------------------------------------------------------------------------

 

10、DB软件安装

[oracle@cbdps01 ~]$ cd /u01/setup/db/database/

[oracle@cbdps01 database]$ export DISPLAY=192.168.133.1:0.0

[oracle@cbdps01 database]$ ./runInstaller

Starting Oracle Universal Installer...

 

Checking Temp space: must be greater than 500 MB.   Actual 64628 MB    Passed

Checking swap space: must be greater than 150 MB.   Actual 8191 MB    Passed

Checking monitor: must be configured to display at least 256 colors.    Actual 16777216    Passed

Preparing to launch Oracle Universal Installer from /tmp/OraInstall2025-02-03_09-38-14AM. Please wait ...[oracle@cbdps01 database]$ 

 依次点击"SSH connectivity",填写grid密码,然后点击"setup"、"Test",分别出现如下弹框.

注意:如下为安装过程中可忽略的检查项(如果检查项中有其它失败项,则不可忽略).

resolv.conf Integrity

Single Client Access Name(SCAN)

[root@cbdps01 ~]# /u01/app/oracle/product/12.2.0.1/db_1/root.sh

Performing root user operation.

 

The following environment variables are set as:

    ORACLE_OWNER= oracle

    ORACLE_HOME=  /u01/app/oracle/product/12.2.0.1/db_1

 

Enter the full pathname of the local bin directory: [/usr/local/bin]:

The contents of "dbhome" have not changed. No need to overwrite.

The contents of "oraenv" have not changed. No need to overwrite.

The contents of "coraenv" have not changed. No need to overwrite.

 

Entries will be added to the /etc/oratab file as needed by

Database Configuration Assistant when a database is created

Finished running generic part of root script.

Now product-specific root actions will be performed.

 

说明:分别在两个节点root用户执行root.sh脚本.

说明:至此DB软件已完成安装.

 

11、配置ASM磁盘

备注:节点1执行,创建DATA磁盘组,冗余选择external(若生产环境选择external,底层存储必须要做成RAID),以后有需求在加入磁盘.

[grid@cbdps01 ~]$ export DISPLAY=192.168.133.1:0.0

[grid@cbdps01 ~]$ asmca

[grid@cbdps01 ~]$ asmcmd

ASMCMD> lsdg

State    Type    Rebal  Sector  Logical_Sector  Block       AU  Total_MB  Free_MB  Req_mir_free_MB  Usable_file_MB  Offline_disks  Voting_files  Name

MOUNTED  EXTERN  N         512             512   4096  4194304     40952    40820                0           40820              0             N  DATA/

MOUNTED  EXTERN  N         512             512   4096  4194304     40952     6932                0            6932              0             Y  OCR/

 

此处可以看到新创建的DATA磁盘组已经创建完成并成功mounted.

 

12、补丁包升级

12.1、升级前补丁包情况

[oracle@cbdps01 ~]$ opatch lspatches

There are no Interim patches installed in this Oracle Home "/u01/app/oracle/product/12.2.0.1/db_1".

 

OPatch succeeded.

 

[grid@cbdps01 ~]$ opatch lspatches

There are no Interim patches installed in this Oracle Home "/u01/app/12.2.0.1/grid".

 

OPatch succeeded.

 

12.2、补丁包升级

12.2.1、升级OPatch

注意:节点1和节点2均需升级Opatch,其版本需大于等于12.2.0.1.28,此处安装12.2.0.1.41.

解压p6880880_122010_Linux-x86-64.zip并替换两个节点的ORACLE_HOME/OPatch和GRID_HOME/OPatch.

升级grid软件OPatch

[grid@cbdps01 ~]$ opatch version

OPatch Version: 12.2.0.1.6

 

OPatch succeeded.

[oracle@cbdps01 ~]$ opatch version

OPatch Version: 12.2.0.1.6

 

OPatch succeeded.

 

sftp> cd /tmp

sftp> lcd F:\installmedium\12c\OPatch\12.2.0.1.41

sftp> put p6880880_122010_Linux-x86-64.zip

[root@cbdps01 ~]# export ORACLE_HOME=/u01/app/12.2.0.1/grid

[root@cbdps01 ~]# mv $ORACLE_HOME/OPatch $ORACLE_HOME/OPatch.bak

[root@cbdps01 ~]# cd /tmp

[root@cbdps01 tmp]# unzip -q -o p6880880_122010_Linux-x86-64.zip -d $ORACLE_HOME

[root@cbdps01 tmp]# cd $ORACLE_HOME

[root@cbdps01 grid]# chown grid:oinstall -R OPatch

[root@cbdps01 grid]# su - grid

Last login: Thu Nov 23 20:43:34 CST 2023 on pts/0

[grid@cbdps01 ~]$ $ORACLE_HOME/OPatch/opatch version

OPatch Version: 12.2.0.1.41

 

OPatch succeeded.

 

升级oracle软件OPatch

[grid@cbdps01 ~]$ su - oracle

[oracle@cbdps01 ~]$ mv $ORACLE_HOME/OPatch $ORACLE_HOME/OPatch.bak

[oracle@cbdps01 ~]$ cd /tmp

[oracle@cbdps01 tmp]$ unzip -q -o p6880880_122010_Linux-x86-64.zip -d $ORACLE_HOME

[oracle@cbdps01 ~]$ $ORACLE_HOME/OPatch/opatch version

OPatch Version: 12.2.0.1.41

 

OPatch succeeded.

 

注意:节点2 操作同上,完成后再依次往下执行.

 

12.2.2、升级RU

GI+DB升级RU(33583921)补丁,root用户执行,注意:若已安装实例,需先将其关闭.

12.2.2.1升级GI RU

注意:两节点均需升级.

[root@cbdps01 ~]# mkdir -p /u01/setup/RU

sftp> cd /u01/setup/RU

sftp> lcd F:\installmedium\12c\RU

sftp> put p33583921_122010_Linux-x86-64.zip

[root@cbdps01 ~]# chown -R grid:oinstall /u01/setup/RU

[root@cbdps01 ~]# su - grid

[grid@cbdps01 ~]$ cd /u01/setup/RU

[grid@cbdps01 RU]$ unzip -q p33583921_122010_Linux-x86-64.zip

 

使用root用户安装GI补丁(集群不用停库)

[root@cbdps01 setup]# /u01/app/12.2.0.1/grid/OPatch/opatchauto apply /u01/setup/RU/33583921

 

OPatchauto session is initiated at Fri Feb  7 11:24:25 2025

 

System initialization log file is /u01/app/12.2.0.1/grid/cfgtoollogs/opatchautodb/systemconfig2025-02-07_11-24-29AM.log.

 

Session log file is /u01/app/12.2.0.1/grid/cfgtoollogs/opatchauto/opatchauto2025-02-07_11-24-41AM.log

The id for this session is N7PK

 

Executing OPatch prereq operations to verify patch applicability on home /u01/app/12.2.0.1/grid

Patch applicability verified successfully on home /u01/app/12.2.0.1/grid

 

 

Executing patch validation checks on home /u01/app/12.2.0.1/grid

Patch validation checks successfully completed on home /u01/app/12.2.0.1/grid

 

 

Performing prepatch operations on CRS - bringing down CRS service on home /u01/app/12.2.0.1/grid

Prepatch operation log file location: /u01/app/grid/crsdata/cbdps01/crsconfig/crspatch_cbdps01_2025-02-07_11-25-09AM.log

CRS service brought down successfully on home /u01/app/12.2.0.1/grid

 

 

Start applying binary patch on home /u01/app/12.2.0.1/grid

Binary patch applied successfully on home /u01/app/12.2.0.1/grid

 

 

Running rootadd_rdbms.sh on home /u01/app/12.2.0.1/grid

Successfully executed rootadd_rdbms.sh on home /u01/app/12.2.0.1/grid

 

 

Performing postpatch operations on CRS - starting CRS service on home /u01/app/12.2.0.1/grid

Postpatch operation log file location: /u01/app/grid/crsdata/cbdps01/crsconfig/crspatch_cbdps01_2025-02-07_11-33-12AM.log

CRS service started successfully on home /u01/app/12.2.0.1/grid

 

OPatchAuto successful.

 

--------------------------------Summary--------------------------------

 

Patching is completed successfully. Please find the summary as follows:

 

Host:cbdps01

CRS Home:/u01/app/12.2.0.1/grid

Version:12.2.0.1.0

Summary:

 

==Following patches were SUCCESSFULLY applied:

 

Patch: /u01/setup/RU/33583921/26839277

Log: /u01/app/12.2.0.1/grid/cfgtoollogs/opatchauto/core/opatch/opatch2025-02-07_11-26-15AM_1.log

 

Patch: /u01/setup/RU/33583921/33116894

Log: /u01/app/12.2.0.1/grid/cfgtoollogs/opatchauto/core/opatch/opatch2025-02-07_11-26-15AM_1.log

 

Patch: /u01/setup/RU/33583921/33587128

Log: /u01/app/12.2.0.1/grid/cfgtoollogs/opatchauto/core/opatch/opatch2025-02-07_11-26-15AM_1.log

 

Patch: /u01/setup/RU/33583921/33610989

Log: /u01/app/12.2.0.1/grid/cfgtoollogs/opatchauto/core/opatch/opatch2025-02-07_11-26-15AM_1.log

 

Patch: /u01/setup/RU/33583921/33678030

Log: /u01/app/12.2.0.1/grid/cfgtoollogs/opatchauto/core/opatch/opatch2025-02-07_11-26-15AM_1.log

 

 

 

OPatchauto session completed at Fri Feb  7 11:43:38 2025

Time taken to complete the session 19 minutes, 13 seconds

 

[grid@cbdps01 ~]$ opatch lspatches

33678030;OCW JAN 2022 RELEASE UPDATE 12.2.0.1.220118 (33678030)

33610989;TOMCAT RELEASE UPDATE 12.2.0.1.0(ID:RELEASE) (33610989)

33587128;Database Jan 2022 Release Update : 12.2.0.1.220118 (33587128)

33116894;ACFS JUL 2021 RELEASE UPDATE 12.2.0.1.210720 (33116894)

26839277;DBWLM RELEASE UPDATE 12.2.0.1.0(ID:170913) (26839277)

 

OPatch succeeded.

 

12.2.2.2、升级Oracle RU

使用root用户安装DB补丁(集群不用停,若安装有oracle实例,需将停止实例)

[root@cbdps01 ~]# chown -R oracle:oinstall /u01/setup/RU/33583921

12.2.2.2.1、OPatch冲突检查
[oracle@cbdps01 ~]$ $ORACLE_HOME/OPatch/opatch prereq CheckConflictAgainstOHWithDetail -phBaseDir /u01/setup/RU/33583921/33587128
复制

[oracle@cbdps01 ~]$ $ORACLE_HOME/OPatch/opatch prereq CheckConflictAgainstOHWithDetail -phBaseDir /u01/setup/RU/33583921/33678030

 

12.2.2.2.2、OPatch系统空间检查

[oracle@cbdps01 ~]$ vi /tmp/patch_list_dbhome.txt

/u01/setup/RU/33583921/33587128

/u01/setup/RU/33583921/33678030

 

[oracle@cbdps01 ~]$ $ORACLE_HOME/OPatch/opatch prereq CheckSystemSpace -phBaseFile /tmp/patch_list_dbhome.txt

 

12.2.2.2.3、DB RU升级

[root@cbdps01 ~]# /u01/app/oracle/product/12.2.0.1/db_1/OPatch/opatchauto apply /u01/setup/RU/33583921/33587128 -oh /u01/app/oracle/product/12.2.0.1/db_1/

[root@cbdps01 ~]# /u01/app/oracle/product/12.2.0.1/db_1/OPatch/opatchauto apply /u01/setup/RU/33583921/33678030 -oh /u01/app/oracle/product/12.2.0.1/db_1/

 

注:分别消耗12/1分钟,具体路径根据系统环境修改.

 

[oracle@cbdps01 ~]$ opatch lspatches

33678030;OCW JAN 2022 RELEASE UPDATE 12.2.0.1.220118 (33678030)

33587128;Database Jan 2022 Release Update : 12.2.0.1.220118 (33587128)

 

OPatch succeeded.

 

说明:节点2 操作同上,完成后再依次往下执行.

 

12.3、安装OJVM RU

[root@cbdps01 ~]# mkdir /u01/setup/OJVM

sftp> cd /u01/setup/OJVM

sftp> lcd F:\installmedium\12c\RU

sftp> put p33561275_122010_Linux-x86-64.zip

[root@cbdps01 ~]# chown -R oracle:oinstall /u01/setup/OJVM

[root@cbdps01 ~]# su - oracle

[oracle@cbdps01 ~]$ opatch lsinventory

[oracle@cbdps01 ~]$ cd /u01/setup/OJVM

[oracle@cbdps01 OJVM]$ unzip -q p33561275_122010_Linux-x86-64.zip

[oracle@cbdps01 OJVM]$ cd 33561275

[oracle@cbdps01 33561275]$ opatch prereq CheckConflictAgainstOHWithDetail -ph ./
复制

[oracle@cbdps01 33561275]$ $ORACLE_HOME/OPatch/opatch apply -silent

Oracle Interim Patch Installer version 12.2.0.1.41

Copyright (c) 2025, Oracle Corporation.  All rights reserved.

 

 

Oracle Home       : /u01/app/oracle/product/12.2.0.1/db_1

Central Inventory : /u01/app/oraInventory

   from           : /u01/app/oracle/product/12.2.0.1/db_1/oraInst.loc

OPatch version    : 12.2.0.1.41

OUI version       : 12.2.0.1.4

Log file location : /u01/app/oracle/product/12.2.0.1/db_1/cfgtoollogs/opatch/opatch2025-02-07_16-23-50PM_1.log

 

Verifying environment and performing prerequisite checks...

OPatch continues with these patches:   33561275 

 

Do you want to proceed? [y|n]

Y (auto-answered by -silent)

User Responded with: Y

All checks passed.

 

Please shutdown Oracle instances running out of this ORACLE_HOME on the local system.

(Oracle Home = '/u01/app/oracle/product/12.2.0.1/db_1')

 

 

Is the local system ready for patching? [y|n]

Y (auto-answered by -silent)

User Responded with: Y

 

Backing up files...

Applying interim patch '33561275' to OH '/u01/app/oracle/product/12.2.0.1/db_1'

 

Patching component oracle.javavm.server, 12.2.0.1.0...

 

Patching component oracle.javavm.server.core, 12.2.0.1.0...

 

Patching component oracle.rdbms.dbscripts, 12.2.0.1.0...

 

Patching component oracle.javavm.client, 12.2.0.1.0...

 

Patching component oracle.rdbms, 12.2.0.1.0...

 

Patching component oracle.dbjava.jdbc, 12.2.0.1.0...

 

Patching component oracle.dbjava.ic, 12.2.0.1.0...

Patch 33561275 successfully applied.

Log file location: /u01/app/oracle/product/12.2.0.1/db_1/cfgtoollogs/opatch/opatch2025-02-07_16-23-50PM_1.log

 

OPatch succeeded.

[oracle@cbdps01 33561275]$ opatch lsinventory
复制

[oracle@cbdps01 33561275]$ opatch lspatches

33561275;OJVM RELEASE UPDATE 12.2.0.1.220118 (33561275)

33678030;OCW JAN 2022 RELEASE UPDATE 12.2.0.1.220118 (33678030)

33587128;Database Jan 2022 Release Update : 12.2.0.1.220118 (33587128)

 

OPatch succeeded.

 

说明:节点2 操作同上,完成后再依次往下执行.

 

12.4、安装DB Oneoff补丁

上传补丁包

[root@cbdps01 ~]# mkdir -p /u01/setup/oneoff_patch/

sftp> cd /u01/setup/oneoff_patch/

sftp> lcd F:\installmedium\12c\RU

sftp> put p24921392_12201230117DBJAN2023RU_Linux-x86-64.zip

sftp> put p26878028_12201240416DBAPR2024RU_Linux-x86-64.zip

sftp> put p27873364_121020_Linux-x86-64.zip

sftp> put p27882764_122010_Linux-x86-64.zip

sftp> put p30666479_12201220118DBJAN2022RU_Linux-x86-64.zip

12.4.1、24921392补丁安装

注意:For a RAC environment, shut down all the services (database, ASM, listeners, nodeapps, and CRS daemons) running from the Oracle home of the node you want to patch. After you patch this node, start the services on this node.Repeat this process for each of the other nodes of the Oracle RAC system. OPatch is used on only one node at a time.

[root@cbdps01 setup]# crsctl stop crs

[root@cbdps01 setup]# chown -R oracle:oinstall /u01/setup/oneoff_patch/

[root@cbdps01 setup]# su - oracle

[oracle@cbdps01 ~]$ cd /u01/setup/oneoff_patch/

$ unzip p24921392_12201230117DBJAN2023RU_Linux-x86-64.zip

$ cd 24921392

$ opatch prereq CheckConflictAgainstOHWithDetail -ph ./

Oracle Interim Patch Installer version 12.2.0.1.41

Copyright (c) 2025, Oracle Corporation.  All rights reserved.

 

PREREQ session

 

Oracle Home       : /u01/app/oracle/product/12.2.0.1/db_1

Central Inventory : /u01/app/oraInventory

   from           : /u01/app/oracle/product/12.2.0.1/db_1/oraInst.loc

OPatch version    : 12.2.0.1.41

OUI version       : 12.2.0.1.4

Log file location : /u01/app/oracle/product/12.2.0.1/db_1/cfgtoollogs/opatch/opatch2025-02-07_17-50-57PM_1.log

 

Invoking prereq "checkconflictagainstohwithdetail"

 

Prereq "checkConflictAgainstOHWithDetail" failed.

 

The details are:

Interim patch 24921392  requires prerequisite patch(es) [34850184] which are not present in the Oracle Home.

Apply prerequisite patch(es) [34850184] before applying interim patch 24921392.

Summary of Conflict Analysis:

 

There are no patches that can be applied now.

 

Conflicts/Supersets for each patch are:

 

Patch : 24921392

 

        Conflict with 33587128

        Conflict details:

        /u01/app/oracle/product/12.2.0.1/db_1/lib/libgeneric12.a:sskgm.o

$ opatch apply

$ opatch lsinventory

 

注意24921392补丁安装时检查发现存在异常,暂停安装.

12.4.2、26878028补丁安装

[root@cbdps01 setup]# su - oracle

[oracle@cbdps01 ~]$ cd /u01/setup/oneoff_patch/

$ unzip p26878028_12201240416DBAPR2024RU_Linux-x86-64.zip

$ cd 26878028

$ opatch prereq CheckConflictAgainstOHWithDetail -ph ./

Oracle Interim Patch Installer version 12.2.0.1.41

Copyright (c) 2025, Oracle Corporation.  All rights reserved.

 

PREREQ session

 

Oracle Home       : /u01/app/oracle/product/12.2.0.1/db_1

Central Inventory : /u01/app/oraInventory

   from           : /u01/app/oracle/product/12.2.0.1/db_1/oraInst.loc

OPatch version    : 12.2.0.1.41

OUI version       : 12.2.0.1.4

Log file location : /u01/app/oracle/product/12.2.0.1/db_1/cfgtoollogs/opatch/opatch2025-02-07_17-59-23PM_1.log

 

Invoking prereq "checkconflictagainstohwithdetail"

 

Prereq "checkConflictAgainstOHWithDetail" failed.

 

The details are:

Interim patch 26878028  requires prerequisite patch(es) [36325581] which are not present in the Oracle Home.

Apply prerequisite patch(es) [36325581] before applying interim patch 26878028.

Summary of Conflict Analysis:

 

There are no patches that can be applied now.

 

Conflicts/Supersets for each patch are:

 

Patch : 26878028

 

        Conflict with 33587128

        Conflict details:

        /u01/app/oracle/product/12.2.0.1/db_1/lib/libgeneric12.a:kgl.o

 

OPatch succeeded.

 

$ opatch apply

$ opatch lsinventory

 

注意:26878028补丁安装时检查发现存在冲突,暂停安装.

 

12.4.3、27873364补丁安装

[root@cbdps01 setup]# su - oracle

[oracle@cbdps01 ~]$ cd /u01/setup/oneoff_patch/

$ unzip p27873364_121020_Linux-x86-64.zip

$ cd 27873364

$ opatch prereq CheckConflictAgainstOHWithDetail -ph ./

Oracle Interim Patch Installer version 12.2.0.1.41

Copyright (c) 2025, Oracle Corporation.  All rights reserved.

 

PREREQ session

 

Oracle Home       : /u01/app/oracle/product/12.2.0.1/db_1

Central Inventory : /u01/app/oraInventory

   from           : /u01/app/oracle/product/12.2.0.1/db_1/oraInst.loc

OPatch version    : 12.2.0.1.41

OUI version       : 12.2.0.1.4

Log file location : /u01/app/oracle/product/12.2.0.1/db_1/cfgtoollogs/opatch/opatch2025-02-07_18-09-28PM_1.log

 

Invoking prereq "checkconflictagainstohwithdetail"

 

ZOP-40: The patch(es) has conflicts with other patches installed in the Oracle Home (or) among themselves.

 

Prereq "checkConflictAgainstOHWithDetail" failed.

 

Summary of Conflict Analysis:

 

There are no patches that can be applied now.

 

Following patches have conflicts. Please contact Oracle Support and get the merged patch of the patches :

27873364, 33587128

 

Conflicts/Supersets for each patch are:

 

Patch : 27873364

 

        Conflict with 33587128

        Conflict details:

        /u01/app/oracle/product/12.2.0.1/db_1/lib/libserver12.a:ktli.o

 

OPatch succeeded.

 

$ opatch apply

$ opatch lsinventory

 

注意27873364补丁安装时检查发现存在冲突,暂停安装.

 

12.4.4、27882764补丁安装

[root@cbdps01 setup]# su - oracle

[oracle@cbdps01 ~]$ cd /u01/setup/oneoff_patch/

$ unzip p27882764_122010_Linux-x86-64.zip

$ cd 27882764

$ opatch prereq CheckConflictAgainstOHWithDetail -ph ./

Oracle Interim Patch Installer version 12.2.0.1.41

Copyright (c) 2025, Oracle Corporation.  All rights reserved.

 

PREREQ session

 

Oracle Home       : /u01/app/oracle/product/12.2.0.1/db_1

Central Inventory : /u01/app/oraInventory

   from           : /u01/app/oracle/product/12.2.0.1/db_1/oraInst.loc

OPatch version    : 12.2.0.1.41

OUI version       : 12.2.0.1.4

Log file location : /u01/app/oracle/product/12.2.0.1/db_1/cfgtoollogs/opatch/opatch2025-02-07_18-16-27PM_1.log

 

Invoking prereq "checkconflictagainstohwithdetail"

 

ZOP-40: The patch(es) has conflicts with other patches installed in the Oracle Home (or) among themselves.

 

Prereq "checkConflictAgainstOHWithDetail" failed.

 

Summary of Conflict Analysis:

 

There are no patches that can be applied now.

 

Following patches have conflicts. Please contact Oracle Support and get the merged patch of the patches :

27882764, 33587128

 

Conflicts/Supersets for each patch are:

 

Patch : 27882764

 

        Conflict with 33587128

        Conflict details:

        /u01/app/oracle/product/12.2.0.1/db_1/lib/libserver12.a:ksu.o

 

OPatch succeeded.

 

$ opatch apply

$ opatch lsinventory

 

注意27882764补丁安装时检查发现存在冲突,暂停安装.

 

12.4.5、30666479补丁安装

[root@cbdps01 setup]# su - oracle

[oracle@cbdps01 ~]$ cd /u01/setup/oneoff_patch/

$ unzip p30666479_12201220118DBJAN2022RU_Linux-x86-64.zip

$ cd 30666479

$ opatch prereq CheckConflictAgainstOHWithDetail -ph ./

$ opatch apply

$ opatch lsinventory

[oracle@cbdps01 30666479]$ opatch lspatches

30666479;

33561275;OJVM RELEASE UPDATE 12.2.0.1.220118 (33561275)

33678030;OCW JAN 2022 RELEASE UPDATE 12.2.0.1.220118 (33678030)

33587128;Database Jan 2022 Release Update : 12.2.0.1.220118 (33587128)

 

OPatch succeeded.

 

说明:某生产库oracle补丁如下,判断27882764、27873364、26878028、24921392补丁可能在33587128之前已安装,所以没检测出补丁冲突.

[oracle@hisdb01 ~]$ opatch lspatches

30666479;

27882764;

27873364;

26878028;

24921392;

33561275;OJVM RELEASE UPDATE 12.2.0.1.220118 (33561275)

30118419;OCW Interim patch for 30118419

33587128;Database Jan 2022 Release Update : 12.2.0.1.220118 (33587128)

12.4.6、启动节点1集群

[root@cbdps01 ~]# crsctl start crs

 

说明:节点2 操作同上,完成后再依次往下执行.

13、DBCA建库

说明:仅在节点1执行.

打开Xmanager软件,dbca图形界面创建数据库,数据库字符集选择ZHS16GBK.

[oracle@cbdps01 ~]$ export DISPLAY=192.168.133.1:0.0

[oracle@cbdps01 ~]$ dbca

说明:至此Oracle 12.2.0.1 RAC数据库已成功创建完成.

 

[grid@cbdps01 ~]$ crsctl stat res -t

--------------------------------------------------------------------------------

Name           Target  State        Server                   State details      

--------------------------------------------------------------------------------

Local Resources

--------------------------------------------------------------------------------

ora.ASMNET1LSNR_ASM.lsnr

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.DATA.dg

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.LISTENER.lsnr

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.OCR.dg

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.chad

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.net1.network

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.ons

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.proxy_advm

               OFFLINE OFFLINE      cbdps01                  STABLE

               OFFLINE OFFLINE      cbdps02                  STABLE

--------------------------------------------------------------------------------

Cluster Resources

--------------------------------------------------------------------------------

ora.LISTENER_SCAN1.lsnr

      1        ONLINE  ONLINE       cbdps02                  STABLE

ora.MGMTLSNR

      1        ONLINE  ONLINE       cbdps02                  169.254.240.165 192.

                                                             168.78.171,STABLE

ora.asm

      1        ONLINE  ONLINE       cbdps01                  Started,STABLE

      2        ONLINE  ONLINE       cbdps02                  Started,STABLE

      3        OFFLINE OFFLINE                               STABLE

ora.cbdps.db

      1        ONLINE  ONLINE       cbdps01                  Open,HOME=/u01/app/o

                                                             racle/product/12.2.0

                                                             .1/db_1,STABLE

      2        ONLINE  ONLINE       cbdps02                  Open,HOME=/u01/app/o

                                                             racle/product/12.2.0

                                                             .1/db_1,STABLE

ora.cbdps01.vip

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.cbdps02.vip

      1        ONLINE  ONLINE       cbdps02                  STABLE

ora.cvu

      1        ONLINE  ONLINE       cbdps02                  STABLE

ora.mgmtdb

      1        ONLINE  ONLINE       cbdps02                  Open,STABLE

ora.qosmserver

      1        ONLINE  ONLINE       cbdps02                  STABLE

ora.scan1.vip

      1        ONLINE  ONLINE       cbdps02                  STABLE

--------------------------------------------------------------------------------

[grid@cbdps01 ~]$ crsctl stat res -t -init

--------------------------------------------------------------------------------

Name           Target  State        Server                   State details      

--------------------------------------------------------------------------------

Cluster Resources

--------------------------------------------------------------------------------

ora.asm

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.cluster_interconnect.haip

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.crf

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.crsd

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.cssd

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.cssdmonitor

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.ctssd

      1        ONLINE  ONLINE       cbdps01                  ACTIVE:0,STABLE

ora.diskmon

      1        OFFLINE OFFLINE                               STABLE

ora.drivers.acfs

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.evmd

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.gipcd

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.gpnpd

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.mdnsd

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.storage

      1        ONLINE  ONLINE       cbdps01                  STABLE

--------------------------------------------------------------------------------

 

说明:所有资源均正常,至此在RHEL 6.5上整个安装Oracle 12.2.0.1 GI&RAC的操作已结束.

 

特别说明:有些生产库并没有安装mgmtdb,查资料后建议安装此组件.

参考网址:

https://www.cnblogs.com/aegis1019/p/8866756.html

https://cloud.tencent.com/developer/article/1431555

https://cloud.tencent.com/developer/article/1431536

https://cloud.tencent.com/developer/article/1431538

https://cloud.tencent.com/developer/user/1955618/search/article-Linux%E5%B9%B3%E5%8F%B0%20Oracle%2012cR2%20RAC



「喜欢这篇文章,您的关注和赞赏是给作者最好的鼓励」
关注作者
【版权声明】本文为墨天轮用户原创内容,转载时必须标注文章的来源(墨天轮),文章链接,文章作者等基本信息,否则作者和墨天轮有权追究责任。如果您发现墨天轮中有涉嫌抄袭或者侵权的内容,欢迎发送邮件至:contact@modb.pro进行举报,并提供相关证据,一经查实,墨天轮将立刻删除相关内容。

评论