暂无图片
暂无图片
暂无图片
暂无图片
暂无图片

rhel 7.9 搭建Oracle 12.2.0.1 rac

Leo 2025-03-11
25

文档课题:rhel 7.9 搭建Oracle 12.2.0.1 rac.

说明:共享磁盘为ASMLIB绑定.

1、整体规划

集群规划

存储空间规划

注意:oracle 12c rac对磁盘大小有要求,磁盘组类型为"External"时,至少38,860MB.当为"Normal"时,至少77,636MB. 当为"High"时,至少116,400MB.

软件包信息

2、网络配置

分别按如下方式给两台主机添加1块网卡用作节点心跳.

节点1

[root@cbdps01 network-scripts]# cat ifcfg-ens33

TYPE=Ethernet

PROXY_METHOD=none

BROWSER_ONLY=no

BOOTPROTO=none

DEFROUTE=yes

IPV4_FAILURE_FATAL=no

IPV6INIT=yes

IPV6_AUTOCONF=yes

IPV6_DEFROUTE=yes

IPV6_FAILURE_FATAL=no

IPV6_ADDR_GEN_MODE=stable-privacy

NAME=ens33

UUID=c125a8cd-d0c5-4ef3-95df-17cceb1d71f5

DEVICE=ens33

ONBOOT=yes

IPADDR=192.168.133.170

HWADDR=00:50:56:37:16:5e

PREFIX=24

GATEWAY=192.168.133.2

DNS1=192.168.133.2

DOMAIN=192.168.133.2

IPV6_PRIVACY=no

[root@cbdps01 network-scripts]# cat ifcfg-ens37

TYPE=Ethernet

PROXY_METHOD=none

BROWSER_ONLY=no

BOOTPROTO=none

DEFROUTE=yes

IPV4_FAILURE_FATAL=no

IPV6INIT=yes

IPV6_AUTOCONF=yes

IPV6_DEFROUTE=yes

IPV6_FAILURE_FATAL=no

IPV6_ADDR_GEN_MODE=stable-privacy

NAME=ens37

UUID=f3c3a596-ae8a-49be-a371-254a219fec84

DEVICE=ens37

ONBOOT=yes

IPADDR=192.168.78.170

HWADDR=00:50:56:29:47:46

PREFIX=24

IPV6_PRIVACY=no

[root@cbdps01 network-scripts]# ifconfig

ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500

        inet 192.168.133.170  netmask 255.255.255.0  broadcast 192.168.133.255

        inet6 fe80::70d3:4333:ca57:c64e  prefixlen 64  scopeid 0x20<link>

        ether 00:50:56:37:16:5e  txqueuelen 1000  (Ethernet)

        RX packets 195  bytes 25477 (24.8 KiB)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 145  bytes 28749 (28.0 KiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

ens37: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500

        inet 192.168.78.170  netmask 255.255.255.0  broadcast 192.168.78.255

        inet6 fe80::67a8:6d02:8ab5:41  prefixlen 64  scopeid 0x20<link>

        ether 00:50:56:29:47:46  txqueuelen 1000  (Ethernet)

        RX packets 15  bytes 3076 (3.0 KiB)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 30  bytes 4218 (4.1 KiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536

        inet 127.0.0.1  netmask 255.0.0.0

        inet6 ::1  prefixlen 128  scopeid 0x10<host>

        loop  txqueuelen 1000  (Local Loopback)

        RX packets 60  bytes 5100 (4.9 KiB)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 60  bytes 5100 (4.9 KiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

virbr0: flags=4099<UP,BROADCAST,MULTICAST>  mtu 1500

        inet 192.168.122.1  netmask 255.255.255.0  broadcast 192.168.122.255

        ether 52:54:00:74:03:ed  txqueuelen 1000  (Ethernet)

        RX packets 0  bytes 0 (0.0 B)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 0  bytes 0 (0.0 B)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

[root@cbdps01 network-scripts]# nmcli con show

NAME    UUID                                  TYPE      DEVICE

ens33   c125a8cd-d0c5-4ef3-95df-17cceb1d71f5  ethernet  ens33 

ens37   f3c3a596-ae8a-49be-a371-254a219fec84  ethernet  ens37 

virbr0  084ace8e-1a93-4772-84d5-252e9719a3e7  bridge    virbr0

 

root.sh脚本执行后情况

[root@cbdps01 ~]# ifconfig

ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500

        inet 192.168.133.170  netmask 255.255.255.0  broadcast 192.168.133.255

        inet6 fe80::70d3:4333:ca57:c64e  prefixlen 64  scopeid 0x20<link>

        ether 00:50:56:37:16:5e  txqueuelen 1000  (Ethernet)

        RX packets 381261  bytes 36652525 (34.9 MiB)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 8177015  bytes 22719473748 (21.1 GiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

ens33:1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500

        inet 192.168.133.172  netmask 255.255.255.0  broadcast 192.168.133.255

        ether 00:50:56:37:16:5e  txqueuelen 1000  (Ethernet)

 

ens33:3: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500

        inet 192.168.133.175  netmask 255.255.255.0  broadcast 192.168.133.255

        ether 00:50:56:37:16:5e  txqueuelen 1000  (Ethernet)

 

ens33:4: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500

        inet 192.168.133.176  netmask 255.255.255.0  broadcast 192.168.133.255

        ether 00:50:56:37:16:5e  txqueuelen 1000  (Ethernet)

 

ens37: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500

        inet 192.168.78.170  netmask 255.255.255.0  broadcast 192.168.78.255

        inet6 fe80::67a8:6d02:8ab5:41  prefixlen 64  scopeid 0x20<link>

        ether 00:50:56:29:47:46  txqueuelen 1000  (Ethernet)

        RX packets 32888  bytes 33605137 (32.0 MiB)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 22589  bytes 18078846 (17.2 MiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

ens37:1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500

        inet 169.254.148.15  netmask 255.255.0.0  broadcast 169.254.255.255

        ether 00:50:56:29:47:46  txqueuelen 1000  (Ethernet)

 

lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 16436

        inet 127.0.0.1  netmask 255.0.0.0

        inet6 ::1  prefixlen 128  scopeid 0x10<host>

        loop  txqueuelen 1000  (Local Loopback)

        RX packets 14564  bytes 53493220 (51.0 MiB)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 14564  bytes 53493220 (51.0 MiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

节点2

[root@cbdps02 network-scripts]# cat ifcfg-ens33

TYPE=Ethernet

PROXY_METHOD=none

BROWSER_ONLY=no

BOOTPROTO=none

DEFROUTE=yes

IPV4_FAILURE_FATAL=no

IPV6INIT=yes

IPV6_AUTOCONF=yes

IPV6_DEFROUTE=yes

IPV6_FAILURE_FATAL=no

IPV6_ADDR_GEN_MODE=stable-privacy

NAME=ens33

UUID=0eda23ef-213a-4b48-aa74-c8f1e70f33e3

DEVICE=ens33

ONBOOT=yes

IPADDR=192.168.133.171

HWADDR=00:50:56:27:65:13

PREFIX=24

GATEWAY=192.168.133.2

DNS1=192.168.133.2

DOMAIN=192.168.133.2

IPV6_PRIVACY=no

[root@cbdps02 network-scripts]# cat ifcfg-ens37

TYPE=Ethernet

PROXY_METHOD=none

BROWSER_ONLY=no

BOOTPROTO=none

DEFROUTE=yes

IPV4_FAILURE_FATAL=no

IPV6INIT=yes

IPV6_AUTOCONF=yes

IPV6_DEFROUTE=yes

IPV6_FAILURE_FATAL=no

IPV6_ADDR_GEN_MODE=stable-privacy

NAME=ens37

UUID=de3160f7-841a-4756-8def-e88fa9b5dc83

DEVICE=ens37

ONBOOT=yes

IPADDR=192.168.78.171

HWADDR=00:50:56:27:84:b1

PREFIX=24

IPV6_PRIVACY=no

 

[root@cbdps02 ~]# ifconfig

ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500

        inet 192.168.133.171  netmask 255.255.255.0  broadcast 192.168.133.255

        inet6 fe80::6144:faf2:5861:b165  prefixlen 64  scopeid 0x20<link>

        ether 00:50:56:27:65:13  txqueuelen 1000  (Ethernet)

        RX packets 7892636  bytes 11846062244 (11.0 GiB)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 225966  bytes 18677960 (17.8 MiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

ens37: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500

        inet 192.168.78.171  netmask 255.255.255.0  broadcast 192.168.78.255

        inet6 fe80::7420:7cdd:36d7:e11e  prefixlen 64  scopeid 0x20<link>

        ether 00:50:56:27:84:b1  txqueuelen 1000  (Ethernet)

        RX packets 19231  bytes 15978333 (15.2 MiB)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 26980  bytes 34251870 (32.6 MiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 16436

        inet 127.0.0.1  netmask 255.0.0.0

        inet6 ::1  prefixlen 128  scopeid 0x10<host>

        loop  txqueuelen 1000  (Local Loopback)

        RX packets 3618  bytes 2625855 (2.5 MiB)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 3618  bytes 2625855 (2.5 MiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

[root@cbdps02 network-scripts]# nmcli con show

NAME    UUID                                  TYPE      DEVICE

ens33   0eda23ef-213a-4b48-aa74-c8f1e70f33e3  ethernet  ens33 

ens37   de3160f7-841a-4756-8def-e88fa9b5dc83  ethernet  ens37 

virbr0  c2a4e0cd-e0ff-4fd6-ad4f-60f061748d26  bridge    virbr0

 

root.sh脚本执行后情况

[root@cbdps02 ~]# ifconfig

ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500

        inet 192.168.133.171  netmask 255.255.255.0  broadcast 192.168.133.255

        inet6 fe80::6144:faf2:5861:b165  prefixlen 64  scopeid 0x20<link>

        ether 00:50:56:27:65:13  txqueuelen 1000  (Ethernet)

        RX packets 7892636  bytes 11846062244 (11.0 GiB)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 225966  bytes 18677960 (17.8 MiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

ens33:1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500

        inet 192.168.133.174  netmask 255.255.255.0  broadcast 192.168.133.255

        ether 00:50:56:27:65:13  txqueuelen 1000  (Ethernet)

 

ens33:2: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500

        inet 192.168.133.173  netmask 255.255.255.0  broadcast 192.168.133.255

        ether 00:50:56:27:65:13  txqueuelen 1000  (Ethernet)

 

ens37: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500

        inet 192.168.78.171  netmask 255.255.255.0  broadcast 192.168.78.255

        inet6 fe80::7420:7cdd:36d7:e11e  prefixlen 64  scopeid 0x20<link>

        ether 00:50:56:27:84:b1  txqueuelen 1000  (Ethernet)

        RX packets 19231  bytes 15978333 (15.2 MiB)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 26980  bytes 34251870 (32.6 MiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

ens37:1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500

        inet 169.254.218.225  netmask 255.255.0.0  broadcast 169.254.255.255

        ether 00:50:56:27:84:b1  txqueuelen 1000  (Ethernet)

 

lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 16436

        inet 127.0.0.1  netmask 255.0.0.0

        inet6 ::1  prefixlen 128  scopeid 0x10<host>

        loop  txqueuelen 1000  (Local Loopback)

        RX packets 3618  bytes 2625855 (2.5 MiB)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 3618  bytes 2625855 (2.5 MiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

3、安装准备

说明:除特别说明之外,1.2-1.32均需在两个节点执行.

1.1、系统版本

[root@cbdps01 ~]# cat /etc/*release

NAME="Red Hat Enterprise Linux Server"

VERSION="7.9 (Maipo)"

ID="rhel"

ID_LIKE="fedora"

VARIANT="Server"

VARIANT_ID="server"

VERSION_ID="7.9"

PRETTY_NAME="Red Hat Enterprise Linux Server 7.9 (Maipo)"

ANSI_COLOR="0;31"

CPE_NAME="cpe:/o:redhat:enterprise_linux:7.9:GA:server"

HOME_URL="https://www.redhat.com/"

BUG_REPORT_URL="https://bugzilla.redhat.com/"

 

REDHAT_BUGZILLA_PRODUCT="Red Hat Enterprise Linux 7"

REDHAT_BUGZILLA_PRODUCT_VERSION=7.9

REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux"

REDHAT_SUPPORT_PRODUCT_VERSION="7.9"

Red Hat Enterprise Linux Server release 7.9 (Maipo)

Red Hat Enterprise Linux Server release 7.9 (Maipo)

 

1.2、移除virbr0

[root@cbdps01 ~]# yum remove libvirt-libs

 

1.3、依赖包安装

1.3.1、挂载镜像源

# mount /dev/sr0 /mnt

mount: /dev/sr0 is write-protected, mounting read-only

1.3.2、配置yum源

[root@cbdps01 ~]# cd /etc/yum.repos.d/

[root@cbdps01 yum.repos.d]# mkdir repo.bak

[root@cbdps01 yum.repos.d]# mv *.repo repo.bak

[root@cbdps01 yum.repos.d]# cat <<EOF>>/etc/yum.repos.d/local.repo

[local]

name=local

baseurl=file:///mnt

gpgcheck=0

enabled=1

EOF

[root@cbdps01 yum.repos.d]# yum makecache

 

1.3.3、安装依赖包

# yum install -y bc \

binutils \

compat-libcap1 \

compat-libstdc++-33 \

gcc \

gcc-c++ \

elfutils-libelf \

elfutils-libelf-devel \

glibc \

glibc-devel \

ksh \

libaio \

libaio-devel \

libgcc \

libstdc++ \

libstdc++-devel \

libxcb \

libX11 \

libXau \

libXi \

libXtst \

libXrender \

libXrender-devel \

make \

net-tools \

nfs-utils \

smartmontools \

sysstat \

e2fsprogs \

e2fsprogs-libs \

fontconfig-devel \

expect \

unzip \

openssh-clients \

readline* \

tigervnc* \

psmisc \

iotop --skip-broken

 

1.3.4、上传依赖包

# mkdir /soft

sftp> cd /soft

sftp> lcd F:\package

sftp> put compat-libstdc++-33-3.2.3-72.el7.x86_64.rpm

# cd /soft

# rpm -ivh compat-libstdc++-33-3.2.3-72.el7.x86_64.rpm

 

检查依赖包

# rpm -q bc \

binutils \

compat-libcap1 \

compat-libstdc++-33 \

gcc \

gcc-c++ \

elfutils-libelf \

elfutils-libelf-devel \

glibc \

glibc-devel \

ksh \

libaio \

libaio-devel \

libgcc \

libstdc++ \

libstdc++-devel \

libxcb \

libX11 \

libXau \

libXi \

libXtst \

libXrender \

libXrender-devel \

make \

net-tools \

nfs-utils \

smartmontools \

sysstat \

e2fsprogs \

e2fsprogs-libs \

fontconfig-devel \

expect \

unzip \

openssh-clients \

readline \

tigervnc \

psmisc \

iotop | grep "not installed"

 

1.4、修改hosts文件

按如下修改.

[root@cbdps01 ~]# cat <<EOF>>/etc/hosts

#Public IP

192.168.133.170 cbdps01

192.168.133.171 cbdps02

 

#Private IP

192.168.78.170 cbdps01-priv

192.168.78.171 cbdps02-priv

 

#Virtual IP

192.168.133.172 cbdps01-vip

192.168.133.173 cbdps02-vip

 

#Scan IP

192.168.133.174 cbdps-scan

192.168.133.175 cbdps-scan

192.168.133.176 cbdps-scan

EOF

 

说明:此时公网、私网能ping通,其它三个不能ping通才正常.

 

1.5、防火墙配置

# systemctl status firewalld.service

# systemctl stop firewalld.service

# systemctl disable firewalld.service

 

1.6、禁用selinux

将SELINUX修改为disabled

[root@cbdps01 ~]# sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config

[root@cbdps01 ~]# cat /etc/selinux/config

 

# This file controls the state of SELinux on the system.

# SELINUX= can take one of these three values:

#     enforcing - SELinux security policy is enforced.

#     permissive - SELinux prints warnings instead of enforcing.

#     disabled - No SELinux policy is loaded.

SELINUX=disabled

# SELINUXTYPE= can take one of three values:

#     targeted - Targeted processes are protected,

#     minimum - Modification of targeted policy. Only selected processes are protected.

#     mls - Multi Level Security protection.

SELINUXTYPE=targeted

 

注意:需重启主机,才能生效

 

1.7、时间同步配置

从oracle 11gR2 rac开始使用Cluster Time Synchronization Service(CTSS)同步各节点的时间,此处关闭NTP、chrony服务,Oracle会自动启用ctssd进程.

[root@cbdps01 ~]# systemctl stop ntpd

[root@cbdps01 ~]# systemctl disable ntpd.service

[root@cbdps01 ~]# mv /etc/ntp.conf /etc/ntp.conf.bak

[root@cbdps01 ~]# systemctl disable chronyd

[root@cbdps01 ~]# systemctl stop chronyd

[root@cbdps01 ~]# mv /etc/chrony.conf /etc/chrony.conf_bak

 

1.8、avahi-daemon与NetworkManager配置

[root@cbdps01 ~]# yum install -y avahi*

[root@cbdps01 ~]# systemctl stop avahi-daemon.socket

[root@cbdps01 ~]# systemctl stop avahi-daemon.service

[root@cbdps01 ~]# pgrep -f avahi-daemon | awk '{print "kill -9 "$2}'

[root@cbdps01 ~]# systemctl disable avahi-daemon.socket

[root@cbdps01 ~]# systemctl disable avahi-daemon.service

[root@cbdps01 ~]# systemctl status avahi-daemon

1.9、配置NOZEROCONF

[root@cbdps01 ~]# cat <<EOF>>/etc/sysconfig/network

NOZEROCONF=yes

EOF

 

1.10、配置系统参数

修改/etc/sysctl.conf文件

# cat <<EOF>>/etc/sysctl.conf

fs.aio-max-nr = 4194304

fs.file-max = 6815744

kernel.shmall = 1980560

kernel.shmmax = 6489899008

kernel.shmmni = 4096

kernel.sem = 250 32000 100 128

net.ipv4.ip_local_port_range = 9000 65500

net.core.rmem_default = 262144

net.core.rmem_max = 4194304

net.core.wmem_default = 262144

net.core.wmem_max = 1048586

net.ipv4.ipfrag_high_thresh = 16777216

net.ipv4.ipfrag_low_thresh = 15728640

kernel.randomize_va_space = 0

vm.swappiness = 10

vm.min_free_kbytes = 524288

kernel.panic_on_oops = 1

net.ipv4.conf.ens33.rp_filter = 1

net.ipv4.conf.ens37.rp_filter = 2

EOF

 

# /sbin/sysctl -p

 

1.11、创建用户、用户组、目录

注意:

A、创建用户和组之前需确认gid/uid为1000是否被占用.

# userdel -r liujun

B、某核心生产库并未创建oper组.

groupadd -g 1000 oinstall

groupadd -g 1001 dba

groupadd -g 1002 oper

groupadd -g 1010 asmadmin

groupadd -g 1011 asmdba

groupadd -g 1012 asmoper

useradd -u 1000 -g oinstall -G dba,oper,asmdba  -m -d /home/oracle oracle

useradd -u 1001 -g oinstall -G asmadmin,asmdba,asmoper,dba,oper -m -d /home/grid grid

echo "oracle4U"| passwd --stdin oracle

echo "grid4U"| passwd --stdin grid

 

usermod -a -G oinstall oracle

usermod -a -G oinstall grid

 

mkdir -p /u01/app/oracle

mkdir -p /u01/app/oraInventory

mkdir -p /u01/app/12.2.0.1/grid

mkdir -p /u01/app/grid

mkdir -p /u01/app/oracle/product/12.2.0.1/db_1

chown -R grid:oinstall /u01

chown -R oracle:oinstall /u01/app/oracle

chmod -R 775 /u01

 

说明:红色高亮为更新部分,若不更新在安装rac前检查会出现以下告警.

a、PRVG-10467 : The default Oracle Inventory group could not be determined.

 

1.12、系统资源限制配置

配置limits.conf

[root@cbdps01 ~]# cat <<EOF>>/etc/security/limits.conf

grid soft core 0

grid hard core 0

grid soft nproc 400000

grid hard nproc 400000

grid soft memlock 711656100

grid hard memlock 711656100

grid soft nofile 400000

grid hard nofile 400000

grid soft stack 10240

grid hard stack 32768

 

oracle soft core 0

oracle hard core 0

oracle soft nproc 400000

oracle hard nproc 400000

oracle soft memlock unlimited

oracle hard memlock unlimited

oracle soft nofile 400000

oracle hard nofile 400000

oracle soft stack  10240

oracle hard stack  32768

EOF

 

注意:红色高亮为更新部分,若不更新在安装rac前检查会出现以下告警.

a、"Soft Limit: maximum stack size" could not be fixed on nodes "cbdps02,cbdps01"

 

1.13、添加pam_limits.so模块

--修改/etc/pam.d/login文件

[root@cbdps01 ~]# cat <<EOF>>/etc/pam.d/login

session required pam_limits.so

session required /lib64/security/pam_limits.so

EOF

 

1.14、环境变量配置

grid用户

$ cat <<EOF>>/home/grid/.bash_profile

# Oracle Grid 12c Environment

export TEMP=/tmp

export TMPDIR=\$TEMP

export ORACLE_SID=+ASM1

export ORACLE_BASE=/u01/app/grid

export ORACLE_HOME=/u01/app/12.2.0.1/grid

export LIBPATH=\$ORACLE_HOME/lib:\$ORACLE_HOME/lib32

export LD_LIBARY_PATH=\$LD_LIBARY_PATH:\$ORACLE_HOME/lib:\$ORACLE_HOME/lib32:.

export LD_LIBARY_PATH=\$LD_LIBARY_PATH:\$ORACLE_HOME/jdk/jre/lib:\$ORACLE_HOME/network/lib:\$ORACLE_HOME/rdbms/lib

export CLASSPATH=\$ORACLE_HOME/jre:\$ORACLE_HOME/jlib:\$ORACLE_HOME/rdbms/jlib:\$ORACLE_HOME/network/jlib

export PATH=\$ORACLE_HOME/bin:\$ORACLE_HOME/OPatch:\$HOME/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/usr/ucb:/usr/bin/X11:/sbin:.

 

umask 022

if [ \$USER = "grid" ]; then

if [ \$SHELL = "/bin/ksh" ]; then

ulimit -p 16384

ulimit -n 65536

else

ulimit -u 16384 -n 65536

fi

fi

alias sas='sqlplus / as sysasm'

#stty erase ^H

EOF

 

$ source .bash_profile

 

注意:节点2为+ASM2.

 

oracle用户

$ cat <<EOF>>/home/oracle/.bash_profile

# Oracle 12c oracle Environment

export TEMP=/tmp

export TMPDIR=\$TEMP

export ORACLE_SID=cbdps1

export ORACLE_BASE=/u01/app/oracle

export ORACLE_HOME=/u01/app/oracle/product/12.2.0.1/db_1

#export NLS_LANG=AMERICAN_AMERICA.AL32UTF8

export LIBPATH=\$ORACLE_HOME/lib:\$ORACLE_HOME/lib32

export LD_LIBRARY_PATH=\$LD_LIBARY_PATH:\$ORACLE_HOME/jdk/jre/lib:\$ORACLE_HOME/network/lib:\$ORACLE_HOME/rdbms/lib

export LD_LIBRARY_PATH=\$LD_LIBARY_PATH:\$ORACLE_HOME/lib:\$ORACLE_HOME/lib32

export CLASSPATH=\$ORACLE_HOME/jre:\$ORACLE_HOME/jlib:\$ORACLE_HOME/rdbms/jlib:\$ORACLE_HOME/network/jlib

export PATH=\$ORACLE_HOME/bin:\$ORACLE_HOME/OPatch:\$HOME/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/usr/ucb:/usr/bin/X11:/sbin:.

 

umask 022

if [ \$USER = "oracle" ]; then

if [ \$SHELL = "/bin/ksh" ]; then

ulimit -p 16384

ulimit -n 65536

else

ulimit -u 16384 -n 65536

fi

fi

alias sas='sqlplus / as sysdba'

#stty erase ^H

EOF

 

$ source .bash_profile

 

注意:节点2 为cbdps2.

 

1.15、ROOT配置CRSCTL

ROOT配置调用GRID相关命令:

[root@cbdps01 /]# cat >> /etc/profile <<EOF

export PATH=/u01/app/12.2.0.1/grid/bin:\$PATH

EOF

 

1.16、修改/etc/profile

# cd /root

# cat <<EOF>>/etc/profile

    if [ \$USER = "oracle" ] || [ \$USER = "grid" ]; then

            if [ \$SHELL = "/bin/ksh" ]; then

                  ulimit -p 16384

                  ulimit -n 65536

            else

                  ulimit -u 16384 -n 65536

            fi

            umask 022

    fi

EOF

 

# source /etc/profile

 

1.17、关闭透明大页和NUMA

[root@cbdps01 ~]# sed -i 's/quiet/quiet transparent_hugepage=never numa=off/' /etc/default/grub

[root@cbdps01 ~]# grub2-mkconfig -o /boot/grub2/grub.cfg

 

以下为重启后数据:

[root@cbdps01 ~]# cat /sys/kernel/mm/transparent_hugepage/enabled

always madvise [never]

[root@cbdps01 ~]# cat /proc/cmdline

BOOT_IMAGE=/vmlinuz-5.4.17-2011.6.2.el7uek.x86_64 root=/dev/mapper/ol-root ro crashkernel=auto rd.lvm.lv=ol/root rd.lvm.lv=ol/swap rhgb quiet transparent_hugepage=never numa=off

 

1.18、修改LO网卡的MTU值

检查LO网卡的MTU,如果是65536,需要修改为16436:

参见:ORA-27301: OS Failure Message: No Buffer Space Available / ORA-27302: failure occurred at: sskgxpsnd2 Source Script (Doc ID 2322410.1)    

--临时生效:

[root@cbdps01 ~]# ifconfig lo mtu 16436

--修改配置文件,永久生效

[root@cbdps01 ~]# vi /etc/sysconfig/network-scripts/ifcfg-lo

65536修改为16436

--如果没有则新增

MTU=16436

 

说明:此处重启系统后发现mtu值依然为65536.

 

4、存储配置

4.1、添加共享存储

共享存储可以使用第三方软件提供的方式来共享,也可以使用 WMware Workstation软件进行存储共享,或者使用ISCSI网络存储服务来配置共享存储.本次使用 WMware Workstation软件方式进行模拟共享存储.

添加共享磁盘

两个节点依次添加共享磁盘,之后在两台机器的.vmx文件添加如下内容:

disk.locking = "FALSE"

disk.EnableUUID = "TRUE"

4.2、磁盘分区

注意:仅在节点1将新加磁盘进行分区.

[root@cbdps01 ~]# lsblk -p

NAME                      MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT

/dev/sda                    8:0    0  100G  0 disk

├─/dev/sda1                 8:1    0    1G  0 part /boot

└─/dev/sda2                 8:2    0   99G  0 part

  ├─/dev/mapper/rhel-root 253:0    0   91G  0 lvm  /

  └─/dev/mapper/rhel-swap 253:1    0    8G  0 lvm  [SWAP]

/dev/sdb                    8:16   0   40G  0 disk

/dev/sdc                    8:32   0   40G  0 disk

/dev/sr0                   11:0    1  4.2G  0 rom  /mnt

 

说明:如上所示,sdb至sbc为新添加磁盘.

 

[root@cbdps01 ~]# fdisk /dev/sdb

….

 

备注:在节点1对其依次对sdb、sdc进行分区.

 

[root@cbdps01 ~]# partprobe

[root@cbdps02 ~]# partprobe

 

查看分区结果:

[root@cbdps01 ~]# lsblk -p

NAME                      MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT

/dev/sda                    8:0    0  100G  0 disk

├─/dev/sda1                 8:1    0    1G  0 part /boot

└─/dev/sda2                 8:2    0   99G  0 part

  ├─/dev/mapper/rhel-root 253:0    0   91G  0 lvm  /

  └─/dev/mapper/rhel-swap 253:1    0    8G  0 lvm  [SWAP]

/dev/sdb                    8:16   0   40G  0 disk

└─/dev/sdb1                 8:17   0   40G  0 part

/dev/sdc                    8:32   0   40G  0 disk

└─/dev/sdc1                 8:33   0   40G  0 part

/dev/sr0                   11:0    1  4.2G  0 rom  /mnt

 

5、multipath

注意:5.1-5.5两个节点执行.

5.1、安装multipath

[root@cbdps01 ~]# yum -y install device-mapper*

[root@cbdps01 ~]# mpathconf --enable --with_multipathd y

 

5.2、查看共享盘的scsi_id

[root@cbdps01 ~]# /usr/lib/udev/scsi_id -g -u /dev/sdb

36000c2995ef56878da7aa84032d71478

[root@cbdps01 ~]# /usr/lib/udev/scsi_id -g -u /dev/sdc

36000c2993e578695b3ab417ea58d91be

 

5.3、配置multipath

wwid值为上面获取的scsi_id,alias可自定义.

[root@cbdps01 ~]# cat <<EOF>/etc/multipath.conf

defaults {

    user_friendly_names yes

}

 

blacklist {

  devnode "^sda"

}

 

multipaths {

  multipath {

  wwid "36000c2995ef56878da7aa84032d71478"

  alias ocr01

  }

  multipath {

  wwid "36000c2993e578695b3ab417ea58d91be"

  alias data01

  }

}

EOF

 

5.4、激活multipath多路径

[root@cbdps01 ~]# multipath -F

[root@cbdps01 ~]# multipath -v2

[root@cbdps01 ~]# multipath -ll

data01 (36000c2993e578695b3ab417ea58d91be) dm-4 VMware, ,VMware Virtual S

size=40G features='0' hwhandler='0' wp=rw

`-+- policy='service-time 0' prio=1 status=active

  `- 0:0:2:0 sdc 8:32 active ready running

ocr01 (36000c2995ef56878da7aa84032d71478) dm-2 VMware, ,VMware Virtual S

size=40G features='0' hwhandler='0' wp=rw

`-+- policy='service-time 0' prio=1 status=active

  `- 0:0:1:0 sdb 8:16 active ready running

 

[root@cbdps01 ~]# lsblk -p

NAME                      MAJ:MIN RM  SIZE RO TYPE  MOUNTPOINT

/dev/sda                    8:0    0  100G  0 disk 

├─/dev/sda1                 8:1    0    1G  0 part  /boot

└─/dev/sda2                 8:2    0   99G  0 part 

  ├─/dev/mapper/rhel-root 253:0    0   91G  0 lvm   /

  └─/dev/mapper/rhel-swap 253:1    0    8G  0 lvm   [SWAP]

/dev/sdb                    8:16   0   40G  0 disk 

└─/dev/mapper/ocr01       253:2    0   40G  0 mpath

  └─/dev/mapper/ocr01p1   253:3    0   40G  0 part 

/dev/sdc                    8:32   0   40G  0 disk 

└─/dev/mapper/data01      253:4    0   40G  0 mpath

  └─/dev/mapper/data01p1  253:5    0   40G  0 part 

/dev/sr0                   11:0    1  4.2G  0 rom   /mnt

说明:安装GI执行root.sh时出现以上告警为ACFS Bug导致,并非多路径的配置的原因.

 

6、asmlib

注意:6.1-6.5 中除6.4均在双节点执行.

6.1、文件上传

sftp> cd /root

sftp> lcd F:\package\asmlib\el7

sftp> lls

kmod-oracleasm-2.0.8-21.0.1.el7.x86_64.rpm  oracleasm-support-2.1.11-2.el7.x86_64.rpm

oracleasmlib-2.0.12-1.el7.x86_64.rpm

sftp> put *.rpm

 

6.2、安装asmlib

[root@cbdps01 ~]# rpm -ivh oracleasm-support-2.1.11-2.el7.x86_64.rpm

[root@cbdps01 ~]# rpm -ivh kmod-oracleasm-2.0.8-21.0.1.el7.x86_64.rpm

[root@cbdps01 ~]# rpm -ivh oracleasmlib-2.0.12-1.el7.x86_64.rpm

 

6.3、配置asmlib

[root@cbdps01 ~]# /usr/sbin/oracleasm configure -i

Configuring the Oracle ASM library driver.

 

This will configure the on-boot properties of the Oracle ASM library

driver.  The following questions will determine whether the driver is

loaded on boot and what permissions it will have.  The current values

will be shown in brackets ('[]').  Hitting <ENTER> without typing an

answer will keep that current value.  Ctrl-C will abort.

 

Default user to own the driver interface []: grid

Default group to own the driver interface []: asmadmin

Start Oracle ASM library driver on boot (y/n) [n]: y

Scan for Oracle ASM disks on boot (y/n) [y]: y

Writing Oracle ASM library driver configuration: done

 

[root@cbdps01 ~]# /usr/sbin/oracleasm init

Creating /dev/oracleasm mount point: /dev/oracleasm

Loading module "oracleasm": oracleasm

Configuring "oracleasm" to use device physical block size

Mounting ASMlib driver filesystem: /dev/oracleasm

 

6.4、创建asm磁盘

注意:此步仅在节点1执行.

[root@cbdps01 ~]# oracleasm createdisk ocr01 /dev/mapper/ocr01p1

Writing disk header: done

Instantiating disk: done

[root@cbdps01 ~]# oracleasm createdisk data01 /dev/mapper/data01p1

Writing disk header: done

Instantiating disk: done

 

6.5、验证磁盘

[root@cbdps01 ~]# oracleasm scandisks

Reloading disk partitions: done

Cleaning any stale ASM disks...

Scanning system for ASM disks...

[root@cbdps01 ~]# oracleasm listdisks

DATA01

OCR01

[root@cbdps01 ~]# oracleasm querydisk OCR01

Disk "OCR01" is a valid ASM disk

[root@cbdps01 ~]# oracleasm querydisk DATA01

Disk "DATA01" is a valid ASM disk

[root@cbdps01 ~]# ls -ltr /dev/oracleasm/disks

total 0

brw-rw----. 1 grid asmadmin 253, 3 Feb 12 19:35 OCR01

brw-rw----. 1 grid asmadmin 253, 5 Feb 12 19:35 DATA01

 

[root@cbdps02 ~]# oracleasm scandisks

Reloading disk partitions: done

Cleaning any stale ASM disks...

Scanning system for ASM disks...

Instantiating disk "OCR01"

Instantiating disk "DATA01"

[root@cbdps02 ~]# oracleasm listdisks

DATA01

OCR01

 

7、上传安装介质

注意:此步骤仅在节点1执行.

创建单独的安装文件目录

# mkdir -p /u01/setup/{db,grid}

 

上传文件

sftp> lcd F:\installmedium\12c

sftp> cd /u01/setup/grid

sftp> put linuxx64_12201_grid_home.zip

sftp> cd /u01/setup/db

sftp> put linuxx64_12201_database.zip

 

注意:LINUX.X64_122010_grid_home.zip包可能存在问题,导致执行root.sh总是失败.

 

8、上传RU升级包

注意:此步骤在两个节点执行.

[root@cbdps01 ~]# mkdir -p /u01/setup/{RU,OJVM,oneoff_patch,OPatch}

sftp> cd /u01/setup/OPatch

sftp> lcd F:\installmedium\12c\OPatch\12.2.0.1.41

sftp> put p6880880_122010_Linux-x86-64.zip

sftp> cd /u01/setup/RU

sftp> lcd F:\installmedium\12c\RU

sftp> put p33583921_122010_Linux-x86-64.zip

sftp> cd /u01/setup/OJVM

sftp> put p33561275_122010_Linux-x86-64.zip

sftp> cd /u01/setup/oneoff_patch/

sftp> put p30666479_12201220118DBJAN2022RU_Linux-x86-64.zip

 

9、解压安装介质

注意:安装介质仅在节点1解压,12C R2的grid和之前版本不同,压缩包本身就是软件的一部分,安装时需直接解压到Grid Home下.

 

grid安装包

# chown -R grid:oinstall /u01/setup/grid

# su - grid -c "unzip -q /u01/setup/grid/linuxx64_12201_grid_home.zip -d /u01/app/12.2.0.1/grid/"

 

oracle安装包

# chown -R oracle:oinstall /u01/setup/db

# su - oracle -c "unzip -q /u01/setup/db/linuxx64_12201_database.zip -d /u01/setup/db"

 

解压补丁包

注意:此处仅节点1升级Grid 用户的OPatch补丁包.

# chown -R grid:oinstall /u01/setup/OPatch

# su - grid -c "unzip -q -o /u01/setup/OPatch/p6880880_122010_Linux-x86-64.zip -d /u01/app/12.2.0.1/grid"

 

RU补丁包

# chown -R grid:oinstall /u01/setup/RU

# su - grid -c "unzip -q /u01/setup/RU/p33583921_122010_Linux-x86-64.zip -d /u01/setup/RU"

查看补丁版本

[grid@cbdps01 ~]$ opatch version

OPatch Version: 12.2.0.1.41

 

OPatch succeeded.

 

特别说明:GI安装完成后,节点2的Grid用户Opatch版本自动升级为12.2.0.1.41,如下所示.

[grid@cbdps02 ~]$ opatch version

OPatch Version: 12.2.0.1.41

 

OPatch succeeded.

 

10、cvuqdisk安装

安装cvuqdisk依赖包

[root@cbdps01 ~]# cd /u01/app/12.2.0.1/grid/cv/rpm

[root@cbdps01 rpm]# CVUQDISK_GRP=oinstall; export CVUQDISK_GRP

[root@cbdps01 rpm]# rpm -ivh cvuqdisk-1.0.10-1.rpm

Preparing...                ########################################### [100%]

   1:cvuqdisk               ########################################### [100%]

[root@cbdps01 rpm]# scp cvuqdisk-1.0.10-1.rpm cbdps02:/tmp

[root@cbdps02 ~]# CVUQDISK_GRP=oinstall; export CVUQDISK_GRP

[root@cbdps02 ~]# rpm -ivh /tmp/cvuqdisk-1.0.10-1.rpm

 

11、互信配置

注意:仅在节点1执行.

[root@cbdps01 ~]# su - grid

[grid@cbdps01 ~]$ cd $ORACLE_HOME/oui/prov/resources/scripts

[grid@cbdps01 scripts]$ ./sshUserSetup.sh -user grid -hosts "cbdps01 cbdps02" -advanced -noPromptPassphrase

[grid@cbdps01 scripts]$ ./sshUserSetup.sh -user oracle -hosts "cbdps01 cbdps02" -advanced -noPromptPassphrase

 

注意:此处用grid或root用户run互信脚本均可以.

 

测试互信

说明:两个节点均需测试.

grid用户

[grid@cbdps01 ~]$ ssh cbdps01 date

Mon Jan 27 16:00:23 CST 2025

[grid@cbdps01 ~]$ ssh cbdps02 date

Mon Jan 27 16:00:28 CST 2025

[grid@cbdps01 ~]$ ssh cbdps01-priv date

The authenticity of host 'cbdps01-priv (192.168.78.170)' can't be established.

ECDSA key fingerprint is SHA256:UWtTT+EYQyWcbH30Tcq32+Kd2v+dyPbgPr/7xIcRazk.

Are you sure you want to continue connecting (yes/no/[fingerprint])? yes

Warning: Permanently added 'cbdps01-priv,192.168.78.170' (ECDSA) to the list of known hosts.

Mon Jan 27 16:00:38 CST 2025

[grid@cbdps01 ~]$ ssh cbdps01-priv date

Mon Jan 27 16:00:41 CST 2025

[grid@cbdps01 ~]$ ssh cbdps02-priv date

The authenticity of host 'cbdps02-priv (192.168.78.171)' can't be established.

ECDSA key fingerprint is SHA256:UWtTT+EYQyWcbH30Tcq32+Kd2v+dyPbgPr/7xIcRazk.

Are you sure you want to continue connecting (yes/no/[fingerprint])? yes

Warning: Permanently added 'cbdps02-priv,192.168.78.171' (ECDSA) to the list of known hosts.

Mon Jan 27 16:00:55 CST 2025

[grid@cbdps01 ~]$ ssh cbdps02-priv date

Mon Jan 27 16:00:57 CST 2025

 

oracle用户

[oracle@cbdps01 ~]$ ssh cbdps01 date

Mon Jan 27 16:01:38 CST 2025

[oracle@cbdps01 ~]$ ssh cbdps02 date

Mon Jan 27 16:01:43 CST 2025

[oracle@cbdps01 ~]$ ssh cbdps01-priv date

The authenticity of host 'cbdps01-priv (192.168.78.170)' can't be established.

ECDSA key fingerprint is SHA256:UWtTT+EYQyWcbH30Tcq32+Kd2v+dyPbgPr/7xIcRazk.

Are you sure you want to continue connecting (yes/no/[fingerprint])? yes

Warning: Permanently added 'cbdps01-priv,192.168.78.170' (ECDSA) to the list of known hosts.

Mon Jan 27 16:01:50 CST 2025

[oracle@cbdps01 ~]$ ssh cbdps01-priv date

Mon Jan 27 16:01:52 CST 2025

[oracle@cbdps01 ~]$ ssh cbdps02-priv date

The authenticity of host 'cbdps02-priv (192.168.78.171)' can't be established.

ECDSA key fingerprint is SHA256:UWtTT+EYQyWcbH30Tcq32+Kd2v+dyPbgPr/7xIcRazk.

Are you sure you want to continue connecting (yes/no/[fingerprint])? yes

Warning: Permanently added 'cbdps02-priv,192.168.78.171' (ECDSA) to the list of known hosts.

Mon Jan 27 16:01:59 CST 2025

[oracle@cbdps01 ~]$ ssh cbdps02-priv date

Mon Jan 27 16:02:00 CST 2025

 

注意:需要达到不输入yes.

 

12、检查 selinux、numa、IO调度器和透明大页

注意:重启系统后检查相关配置.

[root@cbdps02 ~]# getenforce

Disabled

[root@cbdps01 ~]# getenforce

Disabled

[root@cbdps01 ~]# cat /proc/cmdline

BOOT_IMAGE=/vmlinuz-5.4.17-2011.6.2.el7uek.x86_64 root=/dev/mapper/ol-root ro crashkernel=auto rd.lvm.lv=ol/root rd.lvm.lv=ol/swap rhgb quiet transparent_hugepage=never numa=off

[root@cbdps02 ~]# cat /proc/cmdline

BOOT_IMAGE=/vmlinuz-5.4.17-2011.6.2.el7uek.x86_64 root=/dev/mapper/ol-root ro crashkernel=auto rd.lvm.lv=ol/root rd.lvm.lv=ol/swap rhgb quiet transparent_hugepage=never numa=off

 

[root@cbdps01 ~]# cat /sys/block/sdb/queue/scheduler

noop [deadline] cfq

[root@cbdps02 ~]# cat /sys/block/sdb/queue/scheduler

noop [deadline] cfq

[root@cbdps01 ~]# cat /sys/block/sdc/queue/scheduler

noop [deadline] cfq

[root@cbdps02 ~]# cat /sys/block/sdc/queue/scheduler

noop [deadline] cfq

 

[root@cbdps01 ~]# cat /sys/kernel/mm/transparent_hugepage/enabled

always madvise [never]

[root@cbdps02 ~]# cat /sys/kernel/mm/transparent_hugepage/enabled

always madvise [never]

 

检查LO网卡的MTU

如果是65536,需要修改为16436

[root@cbdps02 ~]# ifconfig | grep -i mtu | grep -i lo

lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 16436

13、图形界面安装GI

注意:安装前建议重启一下系统,图形界面安装GI只在节点1执行.

[grid@cbdps01 ~]$ cd $ORACLE_HOME

[grid@cbdps01 grid]$ export DISPLAY=192.168.133.1:0.0

[grid@cbdps01 grid]$ ./gridSetup.sh -applyOneOffs /u01/setup/RU/33583921/33116894/

 

说明:虽然此处仅在节点一升级33116894补丁,但GI安装成功后,节点二也会相应的打上33116894补丁.

 

Preparing the home to patch...

Applying the patch /u01/setup/RU/33583921/33116894/...

Successfully applied the patch.

The log can be found at: /tmp/GridSetupActions2025-02-12_07-58-43PM/installerPatchActions_2025-02-12_07-58-43PM.log

Launching Oracle Grid Infrastructure Setup Wizard...

选择集群安装

添加上节点2 public hostname和virtual hostname后,点击"SSH connectivity",输入 grid 用户密码,创建用户时两节点密码必须保持一致.先执行setup,再执行 test,开始互信.

确保对应网卡和IP网段对应即可,12C 心跳网段默认 ASM & Private,用于 ASM 实例的托管.

安装时填创建OCR盘,选择五块盘,冗余High.

注意:如果选择"High"类型,至少需要5块磁盘.

注意:此处可以勾选"Automatically run configuration scripts",如此就不用手动执行root.sh脚本.

节点一执行:

# /u01/app/oraInventory/orainstRoot.sh

节点二执行:

# /u01/app/oraInventory/orainstRoot.sh

 

节点一执行:

# /u01/app/12.2.0.1/grid/root.sh

节点二执行:

# /u01/app/12.2.0.1/grid/root.sh

 

注意:执行完root.sh脚本后,/u01的权限变更为如下.

drwxr-xr-x.   4 root oinstall  4096 Feb  5 11:00 u01

 

未执行root.s脚本之前的属性.

drwxrwxr-x.   3 grid oinstall  4096 Feb  5 10:28 u01

 

说明:查某核心生产库4个节点后,/u01确实为root:oinstall属性,判断在执行root.sh后,目录属性会发生改变,因为后面在打补丁时遇到权限问题导致总是失败,刚开始判断是权限问题引起,所以再此特别强调该目录.

执行root.sh总是出现如下告警,确认为ACFS Bug导致,需打33116894补丁.

The command '/u01/app/12.2.0.1/grid/perl/bin/perl -I/u01/app/12.2.0.1/grid/perl/lib -I/u01/app/12.2.0.1/grid/crs/install /u01/app/12.2.0.1/grid/crs/install/rootcrs.pl ' execution failed

 

执行日志记录

节点1

[root@cbdps01 ~]# /u01/app/oraInventory/orainstRoot.sh

Changing permissions of /u01/app/oraInventory.

Adding read,write permissions for group.

Removing read,write,execute permissions for world.

 

Changing groupname of /u01/app/oraInventory to oinstall.

The execution of the script is complete.

[root@cbdps01 ~]# /u01/app/12.2.0.1/grid/root.sh

Performing root user operation.

 

The following environment variables are set as:

    ORACLE_OWNER= grid

    ORACLE_HOME=  /u01/app/12.2.0.1/grid

 

Enter the full pathname of the local bin directory: [/usr/local/bin]:

   Copying dbhome to /usr/local/bin ...

   Copying oraenv to /usr/local/bin ...

   Copying coraenv to /usr/local/bin ...

 

 

Creating /etc/oratab file...

Entries will be added to the /etc/oratab file as needed by

Database Configuration Assistant when a database is created

Finished running generic part of root script.

Now product-specific root actions will be performed.

Relinking oracle with rac_on option

Using configuration parameter file: /u01/app/12.2.0.1/grid/crs/install/crsconfig_params

The log of current session can be found at:

  /u01/app/grid/crsdata/cbdps01/crsconfig/rootcrs_cbdps01_2025-02-25_11-33-58PM.log

2025/02/25 23:34:00 CLSRSC-594: Executing installation step 1 of 19: 'SetupTFA'.

2025/02/25 23:34:00 CLSRSC-4001: Installing Oracle Trace File Analyzer (TFA) Collector.

2025/02/25 23:34:25 CLSRSC-4002: Successfully installed Oracle Trace File Analyzer (TFA) Collector.

2025/02/25 23:34:25 CLSRSC-594: Executing installation step 2 of 19: 'ValidateEnv'.

2025/02/25 23:34:29 CLSRSC-363: User ignored prerequisites during installation

2025/02/25 23:34:29 CLSRSC-594: Executing installation step 3 of 19: 'CheckFirstNode'.

2025/02/25 23:34:31 CLSRSC-594: Executing installation step 4 of 19: 'GenSiteGUIDs'.

2025/02/25 23:34:31 CLSRSC-594: Executing installation step 5 of 19: 'SaveParamFile'.

2025/02/25 23:34:37 CLSRSC-594: Executing installation step 6 of 19: 'SetupOSD'.

2025/02/25 23:34:38 CLSRSC-594: Executing installation step 7 of 19: 'CheckCRSConfig'.

2025/02/25 23:34:38 CLSRSC-594: Executing installation step 8 of 19: 'SetupLocalGPNP'.

2025/02/25 23:34:57 CLSRSC-594: Executing installation step 9 of 19: 'ConfigOLR'.

2025/02/25 23:35:04 CLSRSC-594: Executing installation step 10 of 19: 'ConfigCHMOS'.

2025/02/25 23:35:04 CLSRSC-594: Executing installation step 11 of 19: 'CreateOHASD'.

2025/02/25 23:35:09 CLSRSC-594: Executing installation step 12 of 19: 'ConfigOHASD'.

2025/02/25 23:35:25 CLSRSC-330: Adding Clusterware entries to file 'oracle-ohasd.service'

2025/02/25 23:36:13 CLSRSC-594: Executing installation step 13 of 19: 'InstallAFD'.

2025/02/25 23:36:18 CLSRSC-594: Executing installation step 14 of 19: 'InstallACFS'.

CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'cbdps01'

CRS-2793: Shutdown of Oracle High Availability Services-managed resources on 'cbdps01' has completed

CRS-4133: Oracle High Availability Services has been stopped.

CRS-4123: Oracle High Availability Services has been started.

2025/02/25 23:37:15 CLSRSC-594: Executing installation step 15 of 19: 'InstallKA'.

2025/02/25 23:37:20 CLSRSC-594: Executing installation step 16 of 19: 'InitConfig'.

CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'cbdps01'

CRS-2793: Shutdown of Oracle High Availability Services-managed resources on 'cbdps01' has completed

CRS-4133: Oracle High Availability Services has been stopped.

CRS-4123: Oracle High Availability Services has been started.

CRS-2672: Attempting to start 'ora.evmd' on 'cbdps01'

CRS-2672: Attempting to start 'ora.mdnsd' on 'cbdps01'

CRS-2676: Start of 'ora.evmd' on 'cbdps01' succeeded

CRS-2676: Start of 'ora.mdnsd' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.gpnpd' on 'cbdps01'

CRS-2676: Start of 'ora.gpnpd' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.cssdmonitor' on 'cbdps01'

CRS-2672: Attempting to start 'ora.gipcd' on 'cbdps01'

CRS-2676: Start of 'ora.cssdmonitor' on 'cbdps01' succeeded

CRS-2676: Start of 'ora.gipcd' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.cssd' on 'cbdps01'

CRS-2672: Attempting to start 'ora.diskmon' on 'cbdps01'

CRS-2676: Start of 'ora.diskmon' on 'cbdps01' succeeded

CRS-2676: Start of 'ora.cssd' on 'cbdps01' succeeded

 

Disk groups created successfully. Check /u01/app/grid/cfgtoollogs/asmca/asmca-250225PM113756.log for details.

 

 

2025/02/25 23:38:29 CLSRSC-482: Running command: '/u01/app/12.2.0.1/grid/bin/ocrconfig -upgrade grid oinstall'

CRS-2672: Attempting to start 'ora.crf' on 'cbdps01'

CRS-2672: Attempting to start 'ora.storage' on 'cbdps01'

CRS-2676: Start of 'ora.storage' on 'cbdps01' succeeded

CRS-2676: Start of 'ora.crf' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.crsd' on 'cbdps01'

CRS-2676: Start of 'ora.crsd' on 'cbdps01' succeeded

CRS-4256: Updating the profile

Successful addition of voting disk 3c83a144650a4fabbf5d860e6538ddfa.

Successfully replaced voting disk group with +OCR.

CRS-4256: Updating the profile

CRS-4266: Voting file(s) successfully replaced

##  STATE    File Universal Id                File Name Disk group

--  -----    -----------------                --------- ---------

 1. ONLINE   3c83a144650a4fabbf5d860e6538ddfa (ORCL:OCR01) [OCR]

Located 1 voting disk(s).

CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'cbdps01'

CRS-2673: Attempting to stop 'ora.crsd' on 'cbdps01'

CRS-2677: Stop of 'ora.crsd' on 'cbdps01' succeeded

CRS-2673: Attempting to stop 'ora.storage' on 'cbdps01'

CRS-2673: Attempting to stop 'ora.crf' on 'cbdps01'

CRS-2673: Attempting to stop 'ora.drivers.acfs' on 'cbdps01'

CRS-2673: Attempting to stop 'ora.gpnpd' on 'cbdps01'

CRS-2673: Attempting to stop 'ora.mdnsd' on 'cbdps01'

CRS-2677: Stop of 'ora.drivers.acfs' on 'cbdps01' succeeded

CRS-2677: Stop of 'ora.crf' on 'cbdps01' succeeded

CRS-2677: Stop of 'ora.gpnpd' on 'cbdps01' succeeded

CRS-2677: Stop of 'ora.storage' on 'cbdps01' succeeded

CRS-2673: Attempting to stop 'ora.asm' on 'cbdps01'

CRS-2677: Stop of 'ora.mdnsd' on 'cbdps01' succeeded

CRS-2677: Stop of 'ora.asm' on 'cbdps01' succeeded

CRS-2673: Attempting to stop 'ora.cluster_interconnect.haip' on 'cbdps01'

CRS-2677: Stop of 'ora.cluster_interconnect.haip' on 'cbdps01' succeeded

CRS-2673: Attempting to stop 'ora.ctssd' on 'cbdps01'

CRS-2673: Attempting to stop 'ora.evmd' on 'cbdps01'

CRS-2677: Stop of 'ora.evmd' on 'cbdps01' succeeded

CRS-2677: Stop of 'ora.ctssd' on 'cbdps01' succeeded

CRS-2673: Attempting to stop 'ora.cssd' on 'cbdps01'

CRS-2677: Stop of 'ora.cssd' on 'cbdps01' succeeded

CRS-2673: Attempting to stop 'ora.gipcd' on 'cbdps01'

CRS-2677: Stop of 'ora.gipcd' on 'cbdps01' succeeded

CRS-2793: Shutdown of Oracle High Availability Services-managed resources on 'cbdps01' has completed

CRS-4133: Oracle High Availability Services has been stopped.

2025/02/25 23:39:36 CLSRSC-594: Executing installation step 17 of 19: 'StartCluster'.

CRS-4123: Starting Oracle High Availability Services-managed resources

CRS-2672: Attempting to start 'ora.evmd' on 'cbdps01'

CRS-2672: Attempting to start 'ora.mdnsd' on 'cbdps01'

CRS-2676: Start of 'ora.mdnsd' on 'cbdps01' succeeded

CRS-2676: Start of 'ora.evmd' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.gpnpd' on 'cbdps01'

CRS-2676: Start of 'ora.gpnpd' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.gipcd' on 'cbdps01'

CRS-2676: Start of 'ora.gipcd' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.cssdmonitor' on 'cbdps01'

CRS-2676: Start of 'ora.cssdmonitor' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.cssd' on 'cbdps01'

CRS-2672: Attempting to start 'ora.diskmon' on 'cbdps01'

CRS-2676: Start of 'ora.diskmon' on 'cbdps01' succeeded

CRS-2676: Start of 'ora.cssd' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.cluster_interconnect.haip' on 'cbdps01'

CRS-2672: Attempting to start 'ora.ctssd' on 'cbdps01'

CRS-2676: Start of 'ora.ctssd' on 'cbdps01' succeeded

CRS-2676: Start of 'ora.cluster_interconnect.haip' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.asm' on 'cbdps01'

CRS-2676: Start of 'ora.asm' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.storage' on 'cbdps01'

CRS-2676: Start of 'ora.storage' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.crf' on 'cbdps01'

CRS-2676: Start of 'ora.crf' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.crsd' on 'cbdps01'

CRS-2676: Start of 'ora.crsd' on 'cbdps01' succeeded

CRS-6023: Starting Oracle Cluster Ready Services-managed resources

CRS-6017: Processing resource auto-start for servers: cbdps01

CRS-6016: Resource auto-start has completed for server cbdps01

CRS-6024: Completed start of Oracle Cluster Ready Services-managed resources

CRS-4123: Oracle High Availability Services has been started.

2025/02/25 23:41:07 CLSRSC-343: Successfully started Oracle Clusterware stack

2025/02/25 23:41:07 CLSRSC-594: Executing installation step 18 of 19: 'ConfigNode'.

CRS-2672: Attempting to start 'ora.ASMNET1LSNR_ASM.lsnr' on 'cbdps01'

CRS-2676: Start of 'ora.ASMNET1LSNR_ASM.lsnr' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.asm' on 'cbdps01'

CRS-2676: Start of 'ora.asm' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.OCR.dg' on 'cbdps01'

CRS-2676: Start of 'ora.OCR.dg' on 'cbdps01' succeeded

2025/02/25 23:42:44 CLSRSC-594: Executing installation step 19 of 19: 'PostConfig'.

2025/02/25 23:43:08 CLSRSC-325: Configure Oracle Grid Infrastructure for a Cluster ... succeeded

 

节点2

[root@cbdps02 ~]# /u01/app/12.2.0.1/grid/root.sh

Performing root user operation.

 

The following environment variables are set as:

    ORACLE_OWNER= grid

    ORACLE_HOME=  /u01/app/12.2.0.1/grid

 

Enter the full pathname of the local bin directory: [/usr/local/bin]:

   Copying dbhome to /usr/local/bin ...

   Copying oraenv to /usr/local/bin ...

   Copying coraenv to /usr/local/bin ...

 

 

Creating /etc/oratab file...

Entries will be added to the /etc/oratab file as needed by

Database Configuration Assistant when a database is created

Finished running generic part of root script.

Now product-specific root actions will be performed.

Relinking oracle with rac_on option

Using configuration parameter file: /u01/app/12.2.0.1/grid/crs/install/crsconfig_params

The log of current session can be found at:

  /u01/app/grid/crsdata/cbdps02/crsconfig/rootcrs_cbdps02_2025-02-25_11-43-35PM.log

2025/02/25 23:43:37 CLSRSC-594: Executing installation step 1 of 19: 'SetupTFA'.

2025/02/25 23:43:37 CLSRSC-4001: Installing Oracle Trace File Analyzer (TFA) Collector.

2025/02/25 23:44:03 CLSRSC-4002: Successfully installed Oracle Trace File Analyzer (TFA) Collector.

2025/02/25 23:44:03 CLSRSC-594: Executing installation step 2 of 19: 'ValidateEnv'.

2025/02/25 23:44:05 CLSRSC-363: User ignored prerequisites during installation

2025/02/25 23:44:05 CLSRSC-594: Executing installation step 3 of 19: 'CheckFirstNode'.

2025/02/25 23:44:06 CLSRSC-594: Executing installation step 4 of 19: 'GenSiteGUIDs'.

2025/02/25 23:44:06 CLSRSC-594: Executing installation step 5 of 19: 'SaveParamFile'.

2025/02/25 23:44:10 CLSRSC-594: Executing installation step 6 of 19: 'SetupOSD'.

2025/02/25 23:44:10 CLSRSC-594: Executing installation step 7 of 19: 'CheckCRSConfig'.

2025/02/25 23:44:10 CLSRSC-594: Executing installation step 8 of 19: 'SetupLocalGPNP'.

2025/02/25 23:44:13 CLSRSC-594: Executing installation step 9 of 19: 'ConfigOLR'.

2025/02/25 23:44:15 CLSRSC-594: Executing installation step 10 of 19: 'ConfigCHMOS'.

2025/02/25 23:44:15 CLSRSC-594: Executing installation step 11 of 19: 'CreateOHASD'.

2025/02/25 23:44:17 CLSRSC-594: Executing installation step 12 of 19: 'ConfigOHASD'.

2025/02/25 23:44:32 CLSRSC-330: Adding Clusterware entries to file 'oracle-ohasd.service'

2025/02/25 23:45:19 CLSRSC-594: Executing installation step 13 of 19: 'InstallAFD'.

2025/02/25 23:45:20 CLSRSC-594: Executing installation step 14 of 19: 'InstallACFS'.

CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'cbdps02'

CRS-2793: Shutdown of Oracle High Availability Services-managed resources on 'cbdps02' has completed

CRS-4133: Oracle High Availability Services has been stopped.

CRS-4123: Oracle High Availability Services has been started.

2025/02/25 23:46:25 CLSRSC-594: Executing installation step 15 of 19: 'InstallKA'.

2025/02/25 23:46:26 CLSRSC-594: Executing installation step 16 of 19: 'InitConfig'.

CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'cbdps02'

CRS-2793: Shutdown of Oracle High Availability Services-managed resources on 'cbdps02' has completed

CRS-4133: Oracle High Availability Services has been stopped.

CRS-4123: Oracle High Availability Services has been started.

CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'cbdps02'

CRS-2673: Attempting to stop 'ora.drivers.acfs' on 'cbdps02'

CRS-2677: Stop of 'ora.drivers.acfs' on 'cbdps02' succeeded

CRS-2793: Shutdown of Oracle High Availability Services-managed resources on 'cbdps02' has completed

CRS-4133: Oracle High Availability Services has been stopped.

2025/02/25 23:46:45 CLSRSC-594: Executing installation step 17 of 19: 'StartCluster'.

CRS-4123: Starting Oracle High Availability Services-managed resources

CRS-2672: Attempting to start 'ora.mdnsd' on 'cbdps02'

CRS-2672: Attempting to start 'ora.evmd' on 'cbdps02'

CRS-2676: Start of 'ora.mdnsd' on 'cbdps02' succeeded

CRS-2676: Start of 'ora.evmd' on 'cbdps02' succeeded

CRS-2672: Attempting to start 'ora.gpnpd' on 'cbdps02'

CRS-2676: Start of 'ora.gpnpd' on 'cbdps02' succeeded

CRS-2672: Attempting to start 'ora.gipcd' on 'cbdps02'

CRS-2676: Start of 'ora.gipcd' on 'cbdps02' succeeded

CRS-2672: Attempting to start 'ora.cssdmonitor' on 'cbdps02'

CRS-2676: Start of 'ora.cssdmonitor' on 'cbdps02' succeeded

CRS-2672: Attempting to start 'ora.cssd' on 'cbdps02'

CRS-2672: Attempting to start 'ora.diskmon' on 'cbdps02'

CRS-2676: Start of 'ora.diskmon' on 'cbdps02' succeeded

CRS-2676: Start of 'ora.cssd' on 'cbdps02' succeeded

CRS-2672: Attempting to start 'ora.cluster_interconnect.haip' on 'cbdps02'

CRS-2672: Attempting to start 'ora.ctssd' on 'cbdps02'

CRS-2676: Start of 'ora.ctssd' on 'cbdps02' succeeded

CRS-2676: Start of 'ora.cluster_interconnect.haip' on 'cbdps02' succeeded

CRS-2672: Attempting to start 'ora.asm' on 'cbdps02'

CRS-2676: Start of 'ora.asm' on 'cbdps02' succeeded

CRS-2672: Attempting to start 'ora.storage' on 'cbdps02'

CRS-2676: Start of 'ora.storage' on 'cbdps02' succeeded

CRS-2672: Attempting to start 'ora.crf' on 'cbdps02'

CRS-2676: Start of 'ora.crf' on 'cbdps02' succeeded

CRS-2672: Attempting to start 'ora.crsd' on 'cbdps02'

CRS-2676: Start of 'ora.crsd' on 'cbdps02' succeeded

CRS-6017: Processing resource auto-start for servers: cbdps02

CRS-2673: Attempting to stop 'ora.LISTENER_SCAN1.lsnr' on 'cbdps01'

CRS-2672: Attempting to start 'ora.net1.network' on 'cbdps02'

CRS-2672: Attempting to start 'ora.ASMNET1LSNR_ASM.lsnr' on 'cbdps02'

CRS-2677: Stop of 'ora.LISTENER_SCAN1.lsnr' on 'cbdps01' succeeded

CRS-2676: Start of 'ora.net1.network' on 'cbdps02' succeeded

CRS-2673: Attempting to stop 'ora.scan1.vip' on 'cbdps01'

CRS-2672: Attempting to start 'ora.ons' on 'cbdps02'

CRS-2677: Stop of 'ora.scan1.vip' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.scan1.vip' on 'cbdps02'

CRS-2676: Start of 'ora.scan1.vip' on 'cbdps02' succeeded

CRS-2672: Attempting to start 'ora.LISTENER_SCAN1.lsnr' on 'cbdps02'

CRS-2676: Start of 'ora.ASMNET1LSNR_ASM.lsnr' on 'cbdps02' succeeded

CRS-2672: Attempting to start 'ora.asm' on 'cbdps02'

CRS-2676: Start of 'ora.ons' on 'cbdps02' succeeded

CRS-2676: Start of 'ora.LISTENER_SCAN1.lsnr' on 'cbdps02' succeeded

CRS-2676: Start of 'ora.asm' on 'cbdps02' succeeded

CRS-6016: Resource auto-start has completed for server cbdps02

CRS-6024: Completed start of Oracle Cluster Ready Services-managed resources

CRS-4123: Oracle High Availability Services has been started.

2025/02/25 23:49:09 CLSRSC-343: Successfully started Oracle Clusterware stack

2025/02/25 23:49:09 CLSRSC-594: Executing installation step 18 of 19: 'ConfigNode'.

2025/02/25 23:49:21 CLSRSC-594: Executing installation step 19 of 19: 'PostConfig'.

2025/02/25 23:49:30 CLSRSC-325: Configure Oracle Grid Infrastructure for a Cluster ... succeeded

[grid@cbdps01 grid]$ crsctl stat res -t

--------------------------------------------------------------------------------

Name           Target  State        Server                   State details      

--------------------------------------------------------------------------------

Local Resources

--------------------------------------------------------------------------------

ora.ASMNET1LSNR_ASM.lsnr

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.LISTENER.lsnr

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.OCR.dg

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.chad

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.net1.network

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.ons

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.proxy_advm

               OFFLINE OFFLINE      cbdps01                  STABLE

               OFFLINE OFFLINE      cbdps02                  STABLE

--------------------------------------------------------------------------------

Cluster Resources

--------------------------------------------------------------------------------

ora.LISTENER_SCAN1.lsnr

      1        ONLINE  ONLINE       cbdps02                  STABLE

ora.LISTENER_SCAN2.lsnr

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.LISTENER_SCAN3.lsnr

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.MGMTLSNR

      1        ONLINE  ONLINE       cbdps01                  169.254.221.11 192.1

                                                             68.78.170,STABLE

ora.asm

      1        ONLINE  ONLINE       cbdps01                  Started,STABLE

      2        ONLINE  ONLINE       cbdps02                  Started,STABLE

      3        OFFLINE OFFLINE                               STABLE

ora.cbdps01.vip

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.cbdps02.vip

      1        ONLINE  ONLINE       cbdps02                  STABLE

ora.cvu

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.mgmtdb

      1        ONLINE  ONLINE       cbdps01                  Open,STABLE

ora.qosmserver

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.scan1.vip

      1        ONLINE  ONLINE       cbdps02                  STABLE

ora.scan2.vip

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.scan3.vip

      1        ONLINE  ONLINE       cbdps01                  STABLE

--------------------------------------------------------------------------------

 

14、测试集群FAILED OVER功能

14.1、测试节点2

--节点2重启,查看节点1状态.

[grid@cbdps01 ~]$ crsctl stat res -t

--------------------------------------------------------------------------------

Name           Target  State        Server                   State details      

--------------------------------------------------------------------------------

Local Resources

--------------------------------------------------------------------------------

ora.ASMNET1LSNR_ASM.lsnr

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  OFFLINE      cbdps02                  STABLE

ora.LISTENER.lsnr

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  OFFLINE      cbdps02                  STABLE

ora.OCR.dg

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  OFFLINE      cbdps02                  STABLE

ora.chad

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  OFFLINE      cbdps02                  STABLE

ora.net1.network

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  OFFLINE      cbdps02                  STABLE

ora.ons

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  OFFLINE      cbdps02                  STABLE

ora.proxy_advm

               OFFLINE OFFLINE      cbdps01                  STABLE

               OFFLINE OFFLINE      cbdps02                  STABLE

--------------------------------------------------------------------------------

Cluster Resources

--------------------------------------------------------------------------------

ora.LISTENER_SCAN1.lsnr

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.MGMTLSNR

      1        ONLINE  ONLINE       cbdps01                  169.254.253.194 192.

                                                             168.78.170,STABLE

ora.asm

      1        ONLINE  ONLINE       cbdps01                  Started,STABLE

      2        ONLINE  OFFLINE                               STABLE

      3        OFFLINE OFFLINE                               STABLE

ora.cbdps01.vip

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.cbdps02.vip

      1        ONLINE  INTERMEDIATE cbdps01                  FAILED OVER,STABLE

ora.cvu

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.mgmtdb

      1        ONLINE  ONLINE       cbdps01                  Open,STABLE

ora.qosmserver

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.scan1.vip

      1        ONLINE  ONLINE       cbdps01                  STABLE

--------------------------------------------------------------------------------

14.2、测试节点1

--节点1重启,查看节点2状态.

[grid@cbdps02 ~]$ crsctl stat res -t

--------------------------------------------------------------------------------

Name           Target  State        Server                   State details      

--------------------------------------------------------------------------------

Local Resources

--------------------------------------------------------------------------------

ora.ASMNET1LSNR_ASM.lsnr

               ONLINE  OFFLINE      cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.LISTENER.lsnr

               ONLINE  OFFLINE      cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.OCR.dg

               ONLINE  OFFLINE      cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.chad

               ONLINE  OFFLINE      cbdps01                  STABLE

               ONLINE  OFFLINE      cbdps02                  STABLE

ora.net1.network

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.ons

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.proxy_advm

               OFFLINE OFFLINE      cbdps01                  STABLE

               OFFLINE OFFLINE      cbdps02                  STABLE

--------------------------------------------------------------------------------

Cluster Resources

--------------------------------------------------------------------------------

ora.LISTENER_SCAN1.lsnr

      1        ONLINE  OFFLINE                               STABLE

ora.MGMTLSNR

      1        ONLINE  OFFLINE      cbdps02                  169.254.253.194 192.

                                                             168.78.170,STARTING

ora.asm

      1        ONLINE  OFFLINE                               STABLE

      2        ONLINE  ONLINE       cbdps02                  Started,STABLE

      3        OFFLINE OFFLINE                               STABLE

ora.cbdps01.vip

      1        ONLINE  OFFLINE                               STABLE

ora.cbdps02.vip

      1        ONLINE  ONLINE       cbdps02                  STABLE

ora.cvu

      1        ONLINE  OFFLINE                               STABLE

ora.mgmtdb

      1        ONLINE  OFFLINE                               Instance Shutdown,ST

                                                             ABLE

ora.qosmserver

      1        ONLINE  OFFLINE                               STABLE

ora.scan1.vip

      1        ONLINE  OFFLINE                               STABLE

--------------------------------------------------------------------------------

 

15、DB软件安装

[oracle@cbdps01 ~]$ cd /u01/setup/db/database/

[oracle@cbdps01 database]$ export DISPLAY=192.168.133.1:0.0

[oracle@cbdps01 database]$ ./runInstaller

Starting Oracle Universal Installer...

 

Checking Temp space: must be greater than 500 MB.   Actual 64628 MB    Passed

Checking swap space: must be greater than 150 MB.   Actual 8191 MB    Passed

Checking monitor: must be configured to display at least 256 colors.    Actual 16777216    Passed

Preparing to launch Oracle Universal Installer from /tmp/OraInstall2025-02-03_09-38-14AM. Please wait ...[oracle@cbdps01 database]$ 

依次点击"SSH connectivity",填写oracle密码,然后点击"setup"、"Test",分别出现如下弹框.

注意:如下为安装过程中可忽略的检查项(如果检查项中有其它失败项,则不可忽略).

resolv.conf Integrity

Single Client Access Name(SCAN)

[root@cbdps01 ~]# /u01/app/oracle/product/12.2.0.1/db_1/root.sh

Performing root user operation.

 

The following environment variables are set as:

    ORACLE_OWNER= oracle

    ORACLE_HOME=  /u01/app/oracle/product/12.2.0.1/db_1

 

Enter the full pathname of the local bin directory: [/usr/local/bin]:

The contents of "dbhome" have not changed. No need to overwrite.

The contents of "oraenv" have not changed. No need to overwrite.

The contents of "coraenv" have not changed. No need to overwrite.

 

Entries will be added to the /etc/oratab file as needed by

Database Configuration Assistant when a database is created

Finished running generic part of root script.

Now product-specific root actions will be performed.

 

说明:分别在两个节点root用户执行root.sh脚本.

说明:至此DB软件已完成安装.

 

16、配置ASM磁盘

备注:节点1执行,创建DATA磁盘组,冗余选择external(若生产环境选择external,底层存储必须要做成RAID),以后有需求在加入磁盘.

[grid@cbdps01 ~]$ export DISPLAY=192.168.133.1:0.0

[grid@cbdps01 ~]$ asmca

[grid@cbdps01 ~]$ asmcmd

ASMCMD> lsdg

State    Type    Rebal  Sector  Logical_Sector  Block       AU  Total_MB  Free_MB  Req_mir_free_MB  Usable_file_MB  Offline_disks  Voting_files  Name

MOUNTED  EXTERN  N         512             512   4096  4194304     40952    40820                0           40820              0             N  DATA/

MOUNTED  EXTERN  N         512             512   4096  4194304     40952     6932                0            6932              0             Y  OCR/

 

说明:此处可以看到新创建的DATA磁盘组已经创建完成并成功mounted.

 

17、补丁包升级

17.1、升级前补丁包情况

[oracle@cbdps01 ~]$ opatch lspatches

There are no Interim patches installed in this Oracle Home "/u01/app/oracle/product/12.2.0.1/db_1".

 

OPatch succeeded.

 

[grid@cbdps01 ~]$ opatch lspatches

33116894;ACFS JUL 2021 RELEASE UPDATE 12.2.0.1.210720 (33116894)

 

OPatch succeeded.

 

[oracle@cbdps02 ~]$ opatch lspatches

There are no Interim patches installed in this Oracle Home "/u01/app/oracle/product/12.2.0.1/db_1".

 

OPatch succeeded.

 

[grid@cbdps02 ~]$ opatch lspatches

33116894;ACFS JUL 2021 RELEASE UPDATE 12.2.0.1.210720 (33116894)

 

OPatch succeeded.

 

17.2、补丁包升级

17.2.1、升级OPatch

注意:节点1和节点2均需升级Opatch,其版本需大于等于12.2.0.1.28,此处安装12.2.0.1.41.

解压p6880880_122010_Linux-x86-64.zip并替换两个节点的ORACLE_HOME/OPatch和GRID_HOME/OPatch.

升级grid软件OPatch

[grid@cbdps01 ~]$ opatch version

OPatch Version: 12.2.0.1.41

 

OPatch succeeded.

[oracle@cbdps01 ~]$ opatch version

OPatch Version: 12.2.0.1.6

 

OPatch succeeded.

升级oracle软件OPatch

[grid@cbdps01 ~]$ su - oracle

[oracle@cbdps01 ~]$ mv $ORACLE_HOME/OPatch $ORACLE_HOME/OPatch.bak

[oracle@cbdps01 ~]$ cd /u01/setup/OPatch/

[oracle@cbdps01 tmp]$ unzip -q -o /u01/setup/OPatch/p6880880_122010_Linux-x86-64.zip -d $ORACLE_HOME

[oracle@cbdps01 ~]$ $ORACLE_HOME/OPatch/opatch version

OPatch Version: 12.2.0.1.41

 

OPatch succeeded.

 

注意:节点2 操作同上,完成后再依次往下执行.

 

17.2.2、升级RU

GI+DB升级RU(33583921)补丁,root用户执行,注意:若已安装数据库实例,需先将实例关闭.

17.2.2.1、升级GI RU

注意:两节点均需升级.

注意:因在安装GI时单独升级过ACFS(33116894)补丁,此时先做相应检查.

 

[root@cbdps01 ~]# cd /u01/setup/RU

[root@cbdps01 RU]# /u01/app/12.2.0.1/grid/OPatch/opatchauto apply /u01/setup/RU/33583921 -analyze

 

OPatchauto session is initiated at Wed Feb 12 23:08:26 2025

 

System initialization log file is /u01/app/12.2.0.1/grid/cfgtoollogs/opatchautodb/systemconfig2025-02-12_11-08-28PM.log.

 

Session log file is /u01/app/12.2.0.1/grid/cfgtoollogs/opatchauto/opatchauto2025-02-12_11-08-50PM.log

The id for this session is 9BM1

 

Executing OPatch prereq operations to verify patch applicability on home /u01/app/12.2.0.1/grid

Patch applicability verified successfully on home /u01/app/12.2.0.1/grid

 

 

Executing patch validation checks on home /u01/app/12.2.0.1/grid

Patch validation checks successfully completed on home /u01/app/12.2.0.1/grid

 

OPatchAuto successful.

 

--------------------------------Summary--------------------------------

 

Analysis for applying patches has completed successfully:

 

Host:cbdps01

CRS Home:/u01/app/12.2.0.1/grid

Version:12.2.0.1.0

 

 

==Following patches were SKIPPED:

 

Patch: /u01/setup/RU/33583921/33116894

Log: /u01/app/12.2.0.1/grid/cfgtoollogs/opatchauto/core/opatch/opatch2025-02-12_23-09-07PM_1.log

Reason: /u01/setup/RU/33583921/33116894 is not required to be applied to oracle home /u01/app/12.2.0.1/grid

 

 

==Following patches were SUCCESSFULLY analyzed to be applied:

 

Patch: /u01/setup/RU/33583921/33678030

Log: /u01/app/12.2.0.1/grid/cfgtoollogs/opatchauto/core/opatch/opatch2025-02-12_23-09-07PM_1.log

 

Patch: /u01/setup/RU/33583921/26839277

Log: /u01/app/12.2.0.1/grid/cfgtoollogs/opatchauto/core/opatch/opatch2025-02-12_23-09-07PM_1.log

 

Patch: /u01/setup/RU/33583921/33610989

Log: /u01/app/12.2.0.1/grid/cfgtoollogs/opatchauto/core/opatch/opatch2025-02-12_23-09-07PM_1.log

 

Patch: /u01/setup/RU/33583921/33587128

Log: /u01/app/12.2.0.1/grid/cfgtoollogs/opatchauto/core/opatch/opatch2025-02-12_23-09-07PM_1.log

 

 

 

OPatchauto session completed at Wed Feb 12 23:09:16 2025

Time taken to complete the session 0 minute, 51 seconds

 

使用root用户安装GI补丁

注意:打RU补丁不用手动停集群和实例.

33587128补丁

[root@cbdps01 RU]# /u01/app/12.2.0.1/grid/OPatch/opatchauto apply /u01/setup/RU/33583921/33587128 -oh /u01/app/12.2.0.1/grid/

 

OPatchauto session is initiated at Wed Feb 26 17:33:13 2025

 

System initialization log file is /u01/app/12.2.0.1/grid/cfgtoollogs/opatchautodb/systemconfig2025-02-26_05-33-14PM.log.

 

Session log file is /u01/app/12.2.0.1/grid/cfgtoollogs/opatchauto/opatchauto2025-02-26_05-33-36PM.log

The id for this session is C3JM

 

Executing OPatch prereq operations to verify patch applicability on home /u01/app/12.2.0.1/grid

Patch applicability verified successfully on home /u01/app/12.2.0.1/grid

 

 

Executing patch validation checks on home /u01/app/12.2.0.1/grid

Patch validation checks successfully completed on home /u01/app/12.2.0.1/grid

 

 

Performing prepatch operations on CRS - bringing down CRS service on home /u01/app/12.2.0.1/grid

Prepatch operation log file location: /u01/app/grid/crsdata/cbdps01/crsconfig/crspatch_cbdps01_2025-02-26_05-33-55PM.log

CRS service brought down successfully on home /u01/app/12.2.0.1/grid

 

 

Start applying binary patch on home /u01/app/12.2.0.1/grid

Binary patch applied successfully on home /u01/app/12.2.0.1/grid

 

 

Running rootadd_rdbms.sh on home /u01/app/12.2.0.1/grid

Successfully executed rootadd_rdbms.sh on home /u01/app/12.2.0.1/grid

 

 

Performing postpatch operations on CRS - starting CRS service on home /u01/app/12.2.0.1/grid

 

Postpatch operation log file location: /u01/app/grid/crsdata/cbdps01/crsconfig/crspatch_cbdps01_2025-02-26_05-38-42PM.log

CRS service started successfully on home /u01/app/12.2.0.1/grid

 

OPatchAuto successful.

 

--------------------------------Summary--------------------------------

 

Patching is completed successfully. Please find the summary as follows:

 

Host:cbdps01

CRS Home:/u01/app/12.2.0.1/grid

Version:12.2.0.1.0

Summary:

 

==Following patches were SUCCESSFULLY applied:

 

Patch: /u01/setup/RU/33583921/33587128

Log: /u01/app/12.2.0.1/grid/cfgtoollogs/opatchauto/core/opatch/opatch2025-02-26_17-34-58PM_1.log

 

 

 

OPatchauto session completed at Wed Feb 26 17:51:34 2025

Time taken to complete the session 18 minutes, 22 seconds

 

33678030补丁

[root@cbdps01 RU]# /u01/app/12.2.0.1/grid/OPatch/opatchauto apply /u01/setup/RU/33583921/33678030 -oh /u01/app/12.2.0.1/grid/

 

OPatchauto session is initiated at Wed Feb 26 17:53:05 2025

 

System initialization log file is /u01/app/12.2.0.1/grid/cfgtoollogs/opatchautodb/systemconfig2025-02-26_05-53-08PM.log.

 

Session log file is /u01/app/12.2.0.1/grid/cfgtoollogs/opatchauto/opatchauto2025-02-26_05-53-32PM.log

The id for this session is 17IM

 

Executing OPatch prereq operations to verify patch applicability on home /u01/app/12.2.0.1/grid

Patch applicability verified successfully on home /u01/app/12.2.0.1/grid

 

 

Executing patch validation checks on home /u01/app/12.2.0.1/grid

Patch validation checks successfully completed on home /u01/app/12.2.0.1/grid

 

 

Performing prepatch operations on CRS - bringing down CRS service on home /u01/app/12.2.0.1/grid

Prepatch operation log file location: /u01/app/grid/crsdata/cbdps01/crsconfig/crspatch_cbdps01_2025-02-26_05-53-50PM.log

CRS service brought down successfully on home /u01/app/12.2.0.1/grid

 

 

Start applying binary patch on home /u01/app/12.2.0.1/grid

Binary patch applied successfully on home /u01/app/12.2.0.1/grid

 

 

Running rootadd_rdbms.sh on home /u01/app/12.2.0.1/grid

Successfully executed rootadd_rdbms.sh on home /u01/app/12.2.0.1/grid

 

 

Performing postpatch operations on CRS - starting CRS service on home /u01/app/12.2.0.1/grid

Postpatch operation log file location: /u01/app/grid/crsdata/cbdps01/crsconfig/crspatch_cbdps01_2025-02-26_05-58-00PM.log

CRS service started successfully on home /u01/app/12.2.0.1/grid

 

OPatchAuto successful.

 

--------------------------------Summary--------------------------------

 

Patching is completed successfully. Please find the summary as follows:

 

Host:cbdps01

CRS Home:/u01/app/12.2.0.1/grid

Version:12.2.0.1.0

Summary:

 

==Following patches were SUCCESSFULLY applied:

 

Patch: /u01/setup/RU/33583921/33678030

Log: /u01/app/12.2.0.1/grid/cfgtoollogs/opatchauto/core/opatch/opatch2025-02-26_17-56-18PM_1.log

 

 

 

OPatchauto session completed at Wed Feb 26 18:03:55 2025

Time taken to complete the session 10 minutes, 50 seconds

 

33610989补丁

[root@cbdps01 ~]# /u01/app/12.2.0.1/grid/bin/cluutil -ckpt -oraclebase /u01/app/grid -writeckpt -name ROOTCRS_PREPATCH -state START

[root@cbdps01 ~]# /u01/app/12.2.0.1/grid/crs/install/rootcrs.sh -prepatch

 

注意:

A、此处在打33610989补丁前需执行以上两步,否则会出现如下报错,若不执行以上两步,在打后面26839277补丁时也会出现以下红色高亮部分的错误信息.

cannot move 'GI_HOME/lib/libasmclntsh12.so' to 'GI_HOME/lib/libasmclntsh12.so.bak'

 

B、节点2在打该补丁时用如下两条命令代替上面的两条命令,否则也会出现异常.

[root@cbdps02 ~]# /u01/app/12.2.0.1/grid/bin/cluutil -ckpt -oraclebase /u01/app/grid -chkckpt -name ROOTCRS_PREPATCH -status

SUCCESS

[root@cbdps02 ~]# /u01/app/12.2.0.1/grid/bin/cluutil -ckpt -oraclebase /u01/app/grid -writeckpt -name ROOTCRS_PREPATCH -state START

 

此处异常参考Doc ID 27554103.8解决.

 

[root@cbdps01 RU]# /u01/app/12.2.0.1/grid/OPatch/opatchauto apply /u01/setup/RU/33583921/33610989 -oh /u01/app/12.2.0.1/grid/

 

OPatchauto session is initiated at Wed Feb 26 23:44:45 2025

 

System initialization log file is /u01/app/12.2.0.1/grid/cfgtoollogs/opatchautodb/systemconfig2025-02-26_11-44-49PM.log.

 

Session log file is /u01/app/12.2.0.1/grid/cfgtoollogs/opatchauto/opatchauto2025-02-26_11-44-54PM.log

The id for this session is X3VR

 

Executing OPatch prereq operations to verify patch applicability on home /u01/app/12.2.0.1/grid

Patch applicability verified successfully on home /u01/app/12.2.0.1/grid

 

 

Executing patch validation checks on home /u01/app/12.2.0.1/grid

Patch validation checks successfully completed on home /u01/app/12.2.0.1/grid

 

 

Performing prepatch operations on CRS - bringing down CRS service on home /u01/app/12.2.0.1/grid

Prepatch operation log file location: /u01/app/grid/crsdata/cbdps01/crsconfig/crspatch_cbdps01_2025-02-26_11-45-05PM.log

CRS service brought down successfully on home /u01/app/12.2.0.1/grid

 

 

Start applying binary patch on home /u01/app/12.2.0.1/grid

Binary patch applied successfully on home /u01/app/12.2.0.1/grid

 

 

Running rootadd_rdbms.sh on home /u01/app/12.2.0.1/grid

Successfully executed rootadd_rdbms.sh on home /u01/app/12.2.0.1/grid

 

 

Performing postpatch operations on CRS - starting CRS service on home /u01/app/12.2.0.1/grid

Postpatch operation log file location: /u01/app/grid/crsdata/cbdps01/crsconfig/crspatch_cbdps01_2025-02-26_11-45-34PM.log

CRS service started successfully on home /u01/app/12.2.0.1/grid

 

OPatchAuto successful.

 

--------------------------------Summary--------------------------------

 

Patching is completed successfully. Please find the summary as follows:

 

Host:cbdps01

CRS Home:/u01/app/12.2.0.1/grid

Version:12.2.0.1.0

Summary:

 

==Following patches were SUCCESSFULLY applied:

 

Patch: /u01/setup/RU/33583921/33610989

Log: /u01/app/12.2.0.1/grid/cfgtoollogs/opatchauto/core/opatch/opatch2025-02-26_23-45-09PM_1.log

 

 

 

OPatchauto session completed at Wed Feb 26 23:51:26 2025

Time taken to complete the session 6 minutes, 42 seconds

 

26839277补丁

[root@cbdps01 RU]# /u01/app/12.2.0.1/grid/OPatch/opatchauto apply /u01/setup/RU/33583921/26839277 -oh /u01/app/12.2.0.1/grid/

 

OPatchauto session is initiated at Wed Feb 26 23:52:51 2025

 

System initialization log file is /u01/app/12.2.0.1/grid/cfgtoollogs/opatchautodb/systemconfig2025-02-26_11-52-56PM.log.

 

Session log file is /u01/app/12.2.0.1/grid/cfgtoollogs/opatchauto/opatchauto2025-02-26_11-53-02PM.log

The id for this session is ZVC8

 

Executing OPatch prereq operations to verify patch applicability on home /u01/app/12.2.0.1/grid

Patch applicability verified successfully on home /u01/app/12.2.0.1/grid

 

 

Executing patch validation checks on home /u01/app/12.2.0.1/grid

Patch validation checks successfully completed on home /u01/app/12.2.0.1/grid

 

 

Performing prepatch operations on CRS - bringing down CRS service on home /u01/app/12.2.0.1/grid

Prepatch operation log file location: /u01/app/grid/crsdata/cbdps01/crsconfig/crspatch_cbdps01_2025-02-26_11-53-13PM.log

CRS service brought down successfully on home /u01/app/12.2.0.1/grid

 

 

Start applying binary patch on home /u01/app/12.2.0.1/grid

Binary patch applied successfully on home /u01/app/12.2.0.1/grid

 

 

Running rootadd_rdbms.sh on home /u01/app/12.2.0.1/grid

Successfully executed rootadd_rdbms.sh on home /u01/app/12.2.0.1/grid

 

 

Performing postpatch operations on CRS - starting CRS service on home /u01/app/12.2.0.1/grid

Postpatch operation log file location: /u01/app/grid/crsdata/cbdps01/crsconfig/crspatch_cbdps01_2025-02-26_11-56-12PM.log

CRS service started successfully on home /u01/app/12.2.0.1/grid

 

OPatchAuto successful.

 

--------------------------------Summary--------------------------------

 

Patching is completed successfully. Please find the summary as follows:

 

Host:cbdps01

CRS Home:/u01/app/12.2.0.1/grid

Version:12.2.0.1.0

Summary:

 

==Following patches were SUCCESSFULLY applied:

 

Patch: /u01/setup/RU/33583921/26839277

Log: /u01/app/12.2.0.1/grid/cfgtoollogs/opatchauto/core/opatch/opatch2025-02-26_23-55-58PM_1.log

 

 

 

OPatchauto session completed at Thu Feb 27 00:01:37 2025

Time taken to complete the session 8 minutes, 46 seconds

 

验证补丁

[grid@cbdps01 ~]$ opatch lspatches

33678030;OCW JAN 2022 RELEASE UPDATE 12.2.0.1.220118 (33678030)

33610989;TOMCAT RELEASE UPDATE 12.2.0.1.0(ID:RELEASE) (33610989)

33587128;Database Jan 2022 Release Update : 12.2.0.1.220118 (33587128)

26839277;DBWLM RELEASE UPDATE 12.2.0.1.0(ID:170913) (26839277)

33116894;ACFS JUL 2021 RELEASE UPDATE 12.2.0.1.210720 (33116894)

 

OPatch succeeded.

 

17.2.2.2、升级Oracle RU

使用root用户安装DB补丁(集群不用停,若安装有oracle实例,需将实例停止)

[root@cbdps01 ~]# chown -R oracle:oinstall /u01/setup/RU/33583921

17.2.2.2.1、OPatch冲突检查
[oracle@cbdps01 ~]$ $ORACLE_HOME/OPatch/opatch prereq CheckConflictAgainstOHWithDetail -phBaseDir /u01/setup/RU/33583921/33587128
复制

[oracle@cbdps01 ~]$ $ORACLE_HOME/OPatch/opatch prereq CheckConflictAgainstOHWithDetail -phBaseDir /u01/setup/RU/33583921/33678030

 

17.2.2.2.2、OPatch系统空间检查

[oracle@cbdps01 ~]$ vi /tmp/patch_list_dbhome.txt

/u01/setup/RU/33583921/33587128

/u01/setup/RU/33583921/33678030

 

[oracle@cbdps01 ~]$ $ORACLE_HOME/OPatch/opatch prereq CheckSystemSpace -phBaseFile /tmp/patch_list_dbhome.txt

 

17.2.2.2.3、DB RU升级

[root@cbdps01 ~]# /u01/app/oracle/product/12.2.0.1/db_1/OPatch/opatchauto apply /u01/setup/RU/33583921/33587128 -oh /u01/app/oracle/product/12.2.0.1/db_1/

[root@cbdps01 ~]# /u01/app/oracle/product/12.2.0.1/db_1/OPatch/opatchauto apply /u01/setup/RU/33583921/33678030 -oh /u01/app/oracle/product/12.2.0.1/db_1/

 

注:分别消耗5/1分钟,具体路径根据系统环境修改.

 

[oracle@cbdps01 ~]$ opatch lspatches

33678030;OCW JAN 2022 RELEASE UPDATE 12.2.0.1.220118 (33678030)

33587128;Database Jan 2022 Release Update : 12.2.0.1.220118 (33587128)

 

OPatch succeeded.

 

说明:节点2 操作同上,完成后再依次往下执行,注意确认下节点1集群需正常开启.

 

17.3、安装OJVM RU

[root@cbdps01 ~]# chown -R oracle:oinstall /u01/setup/OJVM

[root@cbdps01 ~]# su - oracle

[oracle@cbdps01 ~]$ opatch lsinventory

[oracle@cbdps01 ~]$ cd /u01/setup/OJVM

[oracle@cbdps01 OJVM]$ unzip -q p33561275_122010_Linux-x86-64.zip

[oracle@cbdps01 OJVM]$ cd 33561275

[oracle@cbdps01 33561275]$ opatch prereq CheckConflictAgainstOHWithDetail -ph ./
复制

[oracle@cbdps01 33561275]$ $ORACLE_HOME/OPatch/opatch apply -silent

Oracle Interim Patch Installer version 12.2.0.1.41

Copyright (c) 2025, Oracle Corporation.  All rights reserved.

 

 

Oracle Home       : /u01/app/oracle/product/12.2.0.1/db_1

Central Inventory : /u01/app/oraInventory

   from           : /u01/app/oracle/product/12.2.0.1/db_1/oraInst.loc

OPatch version    : 12.2.0.1.41

OUI version       : 12.2.0.1.4

Log file location : /u01/app/oracle/product/12.2.0.1/db_1/cfgtoollogs/opatch/opatch2025-02-07_16-23-50PM_1.log

 

Verifying environment and performing prerequisite checks...

OPatch continues with these patches:   33561275 

 

Do you want to proceed? [y|n]

Y (auto-answered by -silent)

User Responded with: Y

All checks passed.

 

Please shutdown Oracle instances running out of this ORACLE_HOME on the local system.

(Oracle Home = '/u01/app/oracle/product/12.2.0.1/db_1')

 

 

Is the local system ready for patching? [y|n]

Y (auto-answered by -silent)

User Responded with: Y

 

Backing up files...

Applying interim patch '33561275' to OH '/u01/app/oracle/product/12.2.0.1/db_1'

 

Patching component oracle.javavm.server, 12.2.0.1.0...

 

Patching component oracle.javavm.server.core, 12.2.0.1.0...

 

Patching component oracle.rdbms.dbscripts, 12.2.0.1.0...

 

Patching component oracle.javavm.client, 12.2.0.1.0...

 

Patching component oracle.rdbms, 12.2.0.1.0...

 

Patching component oracle.dbjava.jdbc, 12.2.0.1.0...

 

Patching component oracle.dbjava.ic, 12.2.0.1.0...

Patch 33561275 successfully applied.

Log file location: /u01/app/oracle/product/12.2.0.1/db_1/cfgtoollogs/opatch/opatch2025-02-07_16-23-50PM_1.log

 

OPatch succeeded.

[oracle@cbdps01 33561275]$ opatch lsinventory
复制

[oracle@cbdps01 33561275]$ opatch lspatches

33561275;OJVM RELEASE UPDATE 12.2.0.1.220118 (33561275)

33678030;OCW JAN 2022 RELEASE UPDATE 12.2.0.1.220118 (33678030)

33587128;Database Jan 2022 Release Update : 12.2.0.1.220118 (33587128)

 

OPatch succeeded.

 

说明:节点2 操作同上,完成后再依次往下执行.

 

17.4、安装DB Oneoff补丁

17.4.1、24921392补丁安装

注意:For a RAC environment, shut down all the services (database, ASM, listeners, nodeapps, and CRS daemons) running from the Oracle home of the node you want to patch. After you patch this node, start the services on this node.Repeat this process for each of the other nodes of the Oracle RAC system. OPatch is used on only one node at a time.

[root@cbdps01 setup]# crsctl stop crs

[root@cbdps01 setup]# chown -R oracle:oinstall /u01/setup/oneoff_patch/

[root@cbdps01 setup]# su - oracle

[oracle@cbdps01 ~]$ cd /u01/setup/oneoff_patch/

$ unzip p24921392_12201230117DBJAN2023RU_Linux-x86-64.zip

$ cd 24921392

$ opatch prereq CheckConflictAgainstOHWithDetail -ph ./

Oracle Interim Patch Installer version 12.2.0.1.41

Copyright (c) 2025, Oracle Corporation.  All rights reserved.

 

PREREQ session

 

Oracle Home       : /u01/app/oracle/product/12.2.0.1/db_1

Central Inventory : /u01/app/oraInventory

   from           : /u01/app/oracle/product/12.2.0.1/db_1/oraInst.loc

OPatch version    : 12.2.0.1.41

OUI version       : 12.2.0.1.4

Log file location : /u01/app/oracle/product/12.2.0.1/db_1/cfgtoollogs/opatch/opatch2025-02-07_17-50-57PM_1.log

 

Invoking prereq "checkconflictagainstohwithdetail"

 

Prereq "checkConflictAgainstOHWithDetail" failed.

 

The details are:

Interim patch 24921392  requires prerequisite patch(es) [34850184] which are not present in the Oracle Home.

Apply prerequisite patch(es) [34850184] before applying interim patch 24921392.

Summary of Conflict Analysis:

 

There are no patches that can be applied now.

 

Conflicts/Supersets for each patch are:

 

Patch : 24921392

 

        Conflict with 33587128

        Conflict details:

        /u01/app/oracle/product/12.2.0.1/db_1/lib/libgeneric12.a:sskgm.o

注意24921392补丁安装时检查发现存在异常,暂停安装.

 

17.4.2、26878028补丁安装

[root@cbdps01 setup]# su - oracle

[oracle@cbdps01 ~]$ cd /u01/setup/oneoff_patch/

$ unzip p26878028_12201240416DBAPR2024RU_Linux-x86-64.zip

$ cd 26878028

$ opatch prereq CheckConflictAgainstOHWithDetail -ph ./

Oracle Interim Patch Installer version 12.2.0.1.41

Copyright (c) 2025, Oracle Corporation.  All rights reserved.

 

PREREQ session

 

Oracle Home       : /u01/app/oracle/product/12.2.0.1/db_1

Central Inventory : /u01/app/oraInventory

   from           : /u01/app/oracle/product/12.2.0.1/db_1/oraInst.loc

OPatch version    : 12.2.0.1.41

OUI version       : 12.2.0.1.4

Log file location : /u01/app/oracle/product/12.2.0.1/db_1/cfgtoollogs/opatch/opatch2025-02-07_17-59-23PM_1.log

 

Invoking prereq "checkconflictagainstohwithdetail"

 

Prereq "checkConflictAgainstOHWithDetail" failed.

 

The details are:

Interim patch 26878028  requires prerequisite patch(es) [36325581] which are not present in the Oracle Home.

Apply prerequisite patch(es) [36325581] before applying interim patch 26878028.

Summary of Conflict Analysis:

 

There are no patches that can be applied now.

 

Conflicts/Supersets for each patch are:

 

Patch : 26878028

 

        Conflict with 33587128

        Conflict details:

        /u01/app/oracle/product/12.2.0.1/db_1/lib/libgeneric12.a:kgl.o

 

OPatch succeeded.

注意:26878028补丁安装时检查发现存在冲突,暂停安装.

 

17.4.3、27873364补丁安装

[root@cbdps01 setup]# su - oracle

[oracle@cbdps01 ~]$ cd /u01/setup/oneoff_patch/

$ unzip p27873364_121020_Linux-x86-64.zip

$ cd 27873364

$ opatch prereq CheckConflictAgainstOHWithDetail -ph ./

Oracle Interim Patch Installer version 12.2.0.1.41

Copyright (c) 2025, Oracle Corporation.  All rights reserved.

 

PREREQ session

 

Oracle Home       : /u01/app/oracle/product/12.2.0.1/db_1

Central Inventory : /u01/app/oraInventory

   from           : /u01/app/oracle/product/12.2.0.1/db_1/oraInst.loc

OPatch version    : 12.2.0.1.41

OUI version       : 12.2.0.1.4

Log file location : /u01/app/oracle/product/12.2.0.1/db_1/cfgtoollogs/opatch/opatch2025-02-07_18-09-28PM_1.log

 

Invoking prereq "checkconflictagainstohwithdetail"

 

ZOP-40: The patch(es) has conflicts with other patches installed in the Oracle Home (or) among themselves.

 

Prereq "checkConflictAgainstOHWithDetail" failed.

 

Summary of Conflict Analysis:

 

There are no patches that can be applied now.

 

Following patches have conflicts. Please contact Oracle Support and get the merged patch of the patches :

27873364, 33587128

 

Conflicts/Supersets for each patch are:

 

Patch : 27873364

 

        Conflict with 33587128

        Conflict details:

        /u01/app/oracle/product/12.2.0.1/db_1/lib/libserver12.a:ktli.o

 

OPatch succeeded.

注意27873364补丁安装时检查发现存在冲突,暂停安装.

 

17.4.4、27882764补丁安装

[root@cbdps01 setup]# su - oracle

[oracle@cbdps01 ~]$ cd /u01/setup/oneoff_patch/

$ unzip p27882764_122010_Linux-x86-64.zip

$ cd 27882764

$ opatch prereq CheckConflictAgainstOHWithDetail -ph ./

Oracle Interim Patch Installer version 12.2.0.1.41

Copyright (c) 2025, Oracle Corporation.  All rights reserved.

 

PREREQ session

 

Oracle Home       : /u01/app/oracle/product/12.2.0.1/db_1

Central Inventory : /u01/app/oraInventory

   from           : /u01/app/oracle/product/12.2.0.1/db_1/oraInst.loc

OPatch version    : 12.2.0.1.41

OUI version       : 12.2.0.1.4

Log file location : /u01/app/oracle/product/12.2.0.1/db_1/cfgtoollogs/opatch/opatch2025-02-07_18-16-27PM_1.log

 

Invoking prereq "checkconflictagainstohwithdetail"

 

ZOP-40: The patch(es) has conflicts with other patches installed in the Oracle Home (or) among themselves.

 

Prereq "checkConflictAgainstOHWithDetail" failed.

 

Summary of Conflict Analysis:

 

There are no patches that can be applied now.

 

Following patches have conflicts. Please contact Oracle Support and get the merged patch of the patches :

27882764, 33587128

 

Conflicts/Supersets for each patch are:

 

Patch : 27882764

 

        Conflict with 33587128

        Conflict details:

        /u01/app/oracle/product/12.2.0.1/db_1/lib/libserver12.a:ksu.o

 

OPatch succeeded.

注意27882764补丁安装时检查发现存在冲突,暂停安装.

 

17.4.5、30666479补丁安装

[root@cbdps01 setup]# su - oracle

[oracle@cbdps01 ~]$ cd /u01/setup/oneoff_patch/

$ unzip -q p30666479_12201220118DBJAN2022RU_Linux-x86-64.zip

$ cd 30666479

$ opatch prereq CheckConflictAgainstOHWithDetail -ph ./

$ opatch apply

$ opatch lsinventory

[oracle@cbdps01 30666479]$ opatch lspatches

30666479;

33561275;OJVM RELEASE UPDATE 12.2.0.1.220118 (33561275)

33678030;OCW JAN 2022 RELEASE UPDATE 12.2.0.1.220118 (33678030)

33587128;Database Jan 2022 Release Update : 12.2.0.1.220118 (33587128)

 

OPatch succeeded.

 

说明:某生产核心库oracle补丁如下,判断27882764、27873364、26878028、24921392补丁可能在33587128之前已安装,所以没检测出补丁冲突.

[oracle@rhcrmdbadm01 rhcrmdbadm01]$ opatch lspatches

30666479;

27882764;

27873364;

26878028;

24921392;

33561275;OJVM RELEASE UPDATE 12.2.0.1.220118 (33561275)

30118419;OCW Interim patch for 30118419

33587128;Database Jan 2022 Release Update : 12.2.0.1.220118 (33587128)

17.4.6、启动节点1集群

[root@cbdps01 ~]# crsctl start crs

 

说明:节点2 操作同上,完成后再依次往下执行.

 

18、DBCA建库

说明:仅在节点1执行.

打开Xmanager软件,dbca图形界面创建数据库,数据库字符集选择ZHS16GBK.

[oracle@cbdps01 ~]$ export DISPLAY=192.168.133.1:0.0

[oracle@cbdps01 ~]$ dbca

注意:此处若有其它检查项未通过,则不能忽略.

说明:至此Oracle 12.2.0.1 RAC数据库已成功创建完成.

 

[grid@cbdps01 ~]$ crsctl stat res -t

--------------------------------------------------------------------------------

Name           Target  State        Server                   State details      

--------------------------------------------------------------------------------

Local Resources

--------------------------------------------------------------------------------

ora.ASMNET1LSNR_ASM.lsnr

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.DATA.dg

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.LISTENER.lsnr

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.OCR.dg

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.chad

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.net1.network

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.ons

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.proxy_advm

               OFFLINE OFFLINE      cbdps01                  STABLE

               OFFLINE OFFLINE      cbdps02                  STABLE

--------------------------------------------------------------------------------

Cluster Resources

--------------------------------------------------------------------------------

ora.LISTENER_SCAN1.lsnr

      1        ONLINE  ONLINE       cbdps02                  STABLE

ora.LISTENER_SCAN2.lsnr

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.LISTENER_SCAN3.lsnr

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.MGMTLSNR

      1        ONLINE  ONLINE       cbdps01                  169.254.221.11 192.1

                                                             68.78.170,STABLE

ora.asm

      1        ONLINE  ONLINE       cbdps01                  Started,STABLE

      2        ONLINE  ONLINE       cbdps02                  Started,STABLE

      3        OFFLINE OFFLINE                               STABLE

ora.cbdps.db

      1        ONLINE  ONLINE       cbdps01                  Open,HOME=/u01/app/o

                                                             racle/product/12.2.0

                                                             .1/db_1,STABLE

      2        ONLINE  ONLINE       cbdps02                  Open,HOME=/u01/app/o

                                                             racle/product/12.2.0

                                                             .1/db_1,STABLE

ora.cbdps01.vip

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.cbdps02.vip

      1        ONLINE  ONLINE       cbdps02                  STABLE

ora.cvu

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.mgmtdb

      1        ONLINE  ONLINE       cbdps01                  Open,STABLE

ora.qosmserver

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.scan1.vip

      1        ONLINE  ONLINE       cbdps02                  STABLE

ora.scan2.vip

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.scan3.vip

      1        ONLINE  ONLINE       cbdps01                  STABLE

--------------------------------------------------------------------------------

 

说明:所有资源均正常,至此在RHEL 7.9上整个安装Oracle 12.2.0.1 GI&RAC的操作已结束.

19、相关查询

[oracle@cbdps01 ~]$ sqlplus / as sysdba

 

SQL*Plus: Release 12.2.0.1.0 Production on Thu Feb 13 09:08:03 2025

 

Copyright (c) 1982, 2016, Oracle.  All rights reserved.

 

 

Connected to:

Oracle Database 12c Enterprise Edition Release 12.2.0.1.0 - 64bit Production

SQL> set line 200

SQL> col comp_id for a20

SQL> col status for a15

SQL> select comp_id,status,version from dba_registry;

 

COMP_ID              STATUS          VERSION

-------------------- --------------- ------------------------------

CATALOG              VALID           12.2.0.1.0

CATPROC              VALID           12.2.0.1.0

JAVAVM               VALID           12.2.0.1.0

XML                  VALID           12.2.0.1.0

CATJAVA              VALID           12.2.0.1.0

APS                  VALID           12.2.0.1.0

RAC                  VALID           12.2.0.1.0

XDB                  VALID           12.2.0.1.0

OWM                  VALID           12.2.0.1.0

CONTEXT              VALID           12.2.0.1.0

ORDIM                VALID           12.2.0.1.0

 

COMP_ID              STATUS          VERSION

-------------------- --------------- ------------------------------

SDO                  VALID           12.2.0.1.0

XOQ                  VALID           12.2.0.1.0

OLS                  VALID           12.2.0.1.0

DV                   VALID           12.2.0.1.0

 

15 rows selected.

 

SQL> select BANNER from v$version;

 

BANNER

------------------------------------------------------------------------------

Oracle Database 12c Enterprise Edition Release 12.2.0.1.0 - 64bit Production

PL/SQL Release 12.2.0.1.0 - Production

CORE    12.2.0.1.0      Production

TNS for Linux: Version 12.2.0.1.0 - Production

NLSRTL Version 12.2.0.1.0 - Production

 

SQL> select * from v$logfile;

 

    GROUP# STATUS          TYPE    MEMBER                                             IS_     CON_ID

---------- --------------- ------- -------------------------------------------------- --- ----------

         2                 ONLINE  +DATA/CBDPS/ONLINELOG/group_2.263.1192957083       NO           0

         1                 ONLINE  +DATA/CBDPS/ONLINELOG/group_1.262.1192957083       NO           0

         3                 ONLINE  +DATA/CBDPS/ONLINELOG/group_3.266.1192957439       NO           0

         4                 ONLINE  +DATA/CBDPS/ONLINELOG/group_4.267.1192957439       NO           0

 

SQL> col name for a70

SQL> select name from v$controlfile;

 

NAME

----------------------------------------------------------------------

+DATA/CBDPS/CONTROLFILE/current.261.1192957081

 

SQL> col file_name for a45

SQL> select tablespace_name,file_name,file_id from dba_data_files order by 3;

 

TABLESPACE_NAME                FILE_NAME                                        FILE_ID

------------------------------ --------------------------------------------- ----------

SYSTEM                         +DATA/CBDPS/DATAFILE/system.257.1192956917             1

SYSAUX                         +DATA/CBDPS/DATAFILE/sysaux.258.1192956997             3

UNDOTBS1                       +DATA/CBDPS/DATAFILE/undotbs1.259.1192957023           4

UNDOTBS2                       +DATA/CBDPS/DATAFILE/undotbs2.265.1192957115           5

USERS                          +DATA/CBDPS/DATAFILE/users.260.1192957023              7

 

参考网址:

https://www.cnblogs.com/aegis1019/p/8866756.html

https://cloud.tencent.com/developer/article/1431555

https://cloud.tencent.com/developer/article/1431536

https://cloud.tencent.com/developer/article/1431538

https://cloud.tencent.com/developer/user/1955618/search/article-Linux%E5%B9%B3%E5%8F%B0%20Oracle%2012cR2%20RAC

https://www.cnblogs.com/binliubiao/p/12608341.html 静默

「喜欢这篇文章,您的关注和赞赏是给作者最好的鼓励」
关注作者
【版权声明】本文为墨天轮用户原创内容,转载时必须标注文章的来源(墨天轮),文章链接,文章作者等基本信息,否则作者和墨天轮有权追究责任。如果您发现墨天轮中有涉嫌抄袭或者侵权的内容,欢迎发送邮件至:contact@modb.pro进行举报,并提供相关证据,一经查实,墨天轮将立刻删除相关内容。

评论