暂无图片
暂无图片
暂无图片
暂无图片
暂无图片

rhel 7.9搭建Oracle 12.2.0.1 RAC

Leo 2025-03-16
26

文档课题:rhel 7.9搭建Oracle 12.2.0.1 RAC.

1、集群规划

存储空间规划.

软件包信息.

2、网络配置

分别按如下方式给两台主机添加3块网卡.

2.1、节点1网卡配置

以下对节点1的四张网卡进行配置.

[root@cbdps01 network-scripts]# cat ifcfg-ens33

TYPE=Ethernet

PROXY_METHOD=none

BROWSER_ONLY=no

BOOTPROTO=none

DEFROUTE=yes

IPV4_FAILURE_FATAL=no

IPV6INIT=yes

IPV6_AUTOCONF=yes

IPV6_DEFROUTE=yes

IPV6_FAILURE_FATAL=no

IPV6_ADDR_GEN_MODE=stable-privacy

NAME=ens33

UUID=f7a128ad-7a99-44a9-a4ba-10ef1cdd77af

DEVICE=ens33

ONBOOT=yes

IPADDR=192.168.133.50

HWADDR=00:50:56:32:5d:08

PREFIX=24

GATEWAY=192.168.133.2

DNS1=192.168.133.2

DOMAIN=192.168.133.2

IPV6_PRIVACY=no

 

[root@cbdps01 network-scripts]# cat ifcfg-ens37

TYPE=Ethernet

PROXY_METHOD=none

BROWSER_ONLY=no

BOOTPROTO=none

DEFROUTE=yes

IPV4_FAILURE_FATAL=no

IPV6INIT=yes

IPV6_AUTOCONF=yes

IPV6_DEFROUTE=yes

IPV6_FAILURE_FATAL=no

IPV6_ADDR_GEN_MODE=stable-privacy

NAME=ens37

UUID=ad2b31c1-b4c5-40a9-befa-631c279f7dba

DEVICE=ens37

ONBOOT=yes

IPADDR=192.168.133.51

HWADDR=00:50:56:37:5e:43

PREFIX=24

GATEWAY=192.168.133.2

DNS1=192.168.133.2

DOMAIN=192.168.133.2

IPV6_PRIVACY=no

 

[root@cbdps01 network-scripts]# cat ifcfg-ens38

TYPE=Ethernet

PROXY_METHOD=none

BROWSER_ONLY=no

BOOTPROTO=none

DEFROUTE=yes

IPV4_FAILURE_FATAL=no

IPV6INIT=yes

IPV6_AUTOCONF=yes

IPV6_DEFROUTE=yes

IPV6_FAILURE_FATAL=no

IPV6_ADDR_GEN_MODE=stable-privacy

NAME=ens38

UUID=0990bce6-d096-4888-a5b7-0e6546757bc6

DEVICE=ens38

ONBOOT=yes

IPADDR=192.168.78.50

HWADDR=00:50:56:3d:cd:7f

PREFIX=24

IPV6_PRIVACY=no

 

[root@cbdps01 network-scripts]# cat ifcfg-ens39

TYPE=Ethernet

PROXY_METHOD=none

BROWSER_ONLY=no

BOOTPROTO=none

DEFROUTE=yes

IPV4_FAILURE_FATAL=no

IPV6INIT=yes

IPV6_AUTOCONF=yes

IPV6_DEFROUTE=yes

IPV6_FAILURE_FATAL=no

IPV6_ADDR_GEN_MODE=stable-privacy

NAME=ens39

UUID=e7d5ee79-a6d7-40d5-a1f0-c2bd0fb7f035

DEVICE=ens39

ONBOOT=yes

IPADDR=192.168.78.51

HWADDR=00:50:56:28:0a:d0

PREFIX=24

IPV6_PRIVACY=no

 

[root@cbdps01 network-scripts]# nmcli con show

NAME    UUID                                  TYPE      DEVICE

ens33   f7a128ad-7a99-44a9-a4ba-10ef1cdd77af  ethernet  ens33 

ens37   ad2b31c1-b4c5-40a9-befa-631c279f7dba  ethernet  ens37 

ens38   0990bce6-d096-4888-a5b7-0e6546757bc6  ethernet  ens38 

ens39   e7d5ee79-a6d7-40d5-a1f0-c2bd0fb7f035  ethernet  ens39 

virbr0  f1ccf559-74a1-4882-9697-6e4692f3652e  bridge    virbr0

 

[root@cbdps01 network-scripts]# ifconfig -a

ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500

        inet 192.168.133.50  netmask 255.255.255.0  broadcast 192.168.133.255

        inet6 fe80::867f:3b7c:6cb0:b1ea  prefixlen 64  scopeid 0x20<link>

        ether 00:50:56:32:5d:08  txqueuelen 1000  (Ethernet)

        RX packets 2046  bytes 2294811 (2.1 MiB)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 725  bytes 96612 (94.3 KiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

ens37: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500

        inet 192.168.133.51  netmask 255.255.255.0  broadcast 192.168.133.255

        inet6 fe80::4942:9e6e:a5f0:a3d0  prefixlen 64  scopeid 0x20<link>

        ether 00:50:56:37:5e:43  txqueuelen 1000  (Ethernet)

        RX packets 25  bytes 3375 (3.2 KiB)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 42  bytes 5119 (4.9 KiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

ens38: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500

        inet 192.168.78.50  netmask 255.255.255.0  broadcast 192.168.78.255

        inet6 fe80::da18:59d2:3ace:6675  prefixlen 64  scopeid 0x20<link>

        ether 00:50:56:3d:cd:7f  txqueuelen 1000  (Ethernet)

        RX packets 21  bytes 3551 (3.4 KiB)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 41  bytes 5049 (4.9 KiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

ens39: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500

        inet 192.168.78.51  netmask 255.255.255.0  broadcast 192.168.78.255

        inet6 fe80::cd65:ab87:f3f:9c59  prefixlen 64  scopeid 0x20<link>

        ether 00:50:56:28:0a:d0  txqueuelen 1000  (Ethernet)

        RX packets 18  bytes 2950 (2.8 KiB)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 40  bytes 4963 (4.8 KiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536

        inet 127.0.0.1  netmask 255.0.0.0

        inet6 ::1  prefixlen 128  scopeid 0x10<host>

        loop  txqueuelen 1000  (Local Loopback)

        RX packets 204  bytes 17340 (16.9 KiB)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 204  bytes 17340 (16.9 KiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

virbr0: flags=4099<UP,BROADCAST,MULTICAST>  mtu 1500

        inet 192.168.122.1  netmask 255.255.255.0  broadcast 192.168.122.255

        ether 52:54:00:74:03:ed  txqueuelen 1000  (Ethernet)

        RX packets 0  bytes 0 (0.0 B)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 0  bytes 0 (0.0 B)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

virbr0-nic: flags=4098<BROADCAST,MULTICAST>  mtu 1500

        ether 52:54:00:74:03:ed  txqueuelen 1000  (Ethernet)

        RX packets 0  bytes 0 (0.0 B)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 0  bytes 0 (0.0 B)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

修改主机名

[root@template ~]# hostnamectl set-hostname cbdps01

[root@template ~]# exec bash

[root@cbdps01 ~]# cat /etc/sysconfig/network

# Created by anaconda

[root@cbdps01 ~]# vi /etc/sysconfig/network

[root@cbdps01 ~]# cat /etc/sysconfig/network

# Created by anaconda

NETWORKING=yes

HOSTNAME=cbdps01

 

2.2、节点2网卡配置

以下对节点2的四张网卡进行配置.

[root@cbdps02 network-scripts]# cat ifcfg-ens33

TYPE=Ethernet

PROXY_METHOD=none

BROWSER_ONLY=no

BOOTPROTO=none

DEFROUTE=yes

IPV4_FAILURE_FATAL=no

IPV6INIT=yes

IPV6_AUTOCONF=yes

IPV6_DEFROUTE=yes

IPV6_FAILURE_FATAL=no

IPV6_ADDR_GEN_MODE=stable-privacy

NAME=ens33

UUID=1ec5d0a4-1339-4730-ae5e-332a7445cce3

DEVICE=ens33

ONBOOT=yes

IPADDR=192.168.133.52

HWADDR=00:50:56:32:c7:51

PREFIX=24

GATEWAY=192.168.133.2

DNS1=192.168.133.2

DOMAIN=192.168.133.2

IPV6_PRIVACY=no

 

[root@cbdps02 network-scripts]# cat ifcfg-ens37

TYPE=Ethernet

PROXY_METHOD=none

BROWSER_ONLY=no

BOOTPROTO=none

DEFROUTE=yes

IPV4_FAILURE_FATAL=no

IPV6INIT=yes

IPV6_AUTOCONF=yes

IPV6_DEFROUTE=yes

IPV6_FAILURE_FATAL=no

IPV6_ADDR_GEN_MODE=stable-privacy

NAME=ens37

UUID=b276c4f1-a77c-497f-afa7-332bb74b0738

DEVICE=ens37

ONBOOT=yes

IPADDR=192.168.133.53

HWADDR=00:50:56:3c:5e:b9

PREFIX=24

GATEWAY=192.168.133.2

DNS1=192.168.133.2

DOMAIN=192.168.133.2

IPV6_PRIVACY=no

 

[root@cbdps02 network-scripts]# cat ifcfg-ens38

TYPE=Ethernet

PROXY_METHOD=none

BROWSER_ONLY=no

BOOTPROTO=none

DEFROUTE=yes

IPV4_FAILURE_FATAL=no

IPV6INIT=yes

IPV6_AUTOCONF=yes

IPV6_DEFROUTE=yes

IPV6_FAILURE_FATAL=no

IPV6_ADDR_GEN_MODE=stable-privacy

NAME=ens38

UUID=7111970e-1fff-400f-85c2-bd731e7b96c4

DEVICE=ens38

ONBOOT=yes

IPADDR=192.168.78.52

HWADDR=00:50:56:29:46:0c

PREFIX=24

IPV6_PRIVACY=no

 

[root@cbdps02 network-scripts]# cat ifcfg-ens39

TYPE=Ethernet

PROXY_METHOD=none

BROWSER_ONLY=no

BOOTPROTO=none

DEFROUTE=yes

IPV4_FAILURE_FATAL=no

IPV6INIT=yes

IPV6_AUTOCONF=yes

IPV6_DEFROUTE=yes

IPV6_FAILURE_FATAL=no

IPV6_ADDR_GEN_MODE=stable-privacy

NAME=ens39

UUID=a5b0514d-831d-4ae1-b4cb-0d2eba6549f7

DEVICE=ens39

ONBOOT=yes

IPADDR=192.168.78.53

HWADDR=00:50:56:2c:31:5f

PREFIX=24

IPV6_PRIVACY=no

 

[root@cbdps02 network-scripts]# nmcli con show

NAME    UUID                                  TYPE      DEVICE

ens33   1ec5d0a4-1339-4730-ae5e-332a7445cce3  ethernet  ens33 

ens37   b276c4f1-a77c-497f-afa7-332bb74b0738  ethernet  ens37 

ens38   7111970e-1fff-400f-85c2-bd731e7b96c4  ethernet  ens38 

ens39   a5b0514d-831d-4ae1-b4cb-0d2eba6549f7  ethernet  ens39 

virbr0  47b4e531-445b-40f8-bd1f-3798a4b14461  bridge    virbr0

 

[root@cbdps02 network-scripts]# ifconfig

ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500

        inet 192.168.133.52  netmask 255.255.255.0  broadcast 192.168.133.255

        inet6 fe80::e4f2:f6ca:5a8:f838  prefixlen 64  scopeid 0x20<link>

        ether 00:50:56:32:c7:51  txqueuelen 1000  (Ethernet)

        RX packets 4205  bytes 4385925 (4.1 MiB)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 1535  bytes 239345 (233.7 KiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

ens37: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500

        inet 192.168.133.53  netmask 255.255.255.0  broadcast 192.168.133.255

        inet6 fe80::9c26:1d5c:abc9:6a11  prefixlen 64  scopeid 0x20<link>

        ether 00:50:56:3c:5e:b9  txqueuelen 1000  (Ethernet)

        RX packets 47  bytes 5354 (5.2 KiB)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 41  bytes 5102 (4.9 KiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

ens38: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500

        inet 192.168.78.52  netmask 255.255.255.0  broadcast 192.168.78.255

        inet6 fe80::6d2d:d09:ef67:16f4  prefixlen 64  scopeid 0x20<link>

        ether 00:50:56:29:46:0c  txqueuelen 1000  (Ethernet)

        RX packets 19  bytes 3475 (3.3 KiB)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 38  bytes 4887 (4.7 KiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

ens39: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500

        inet 192.168.78.53  netmask 255.255.255.0  broadcast 192.168.78.255

        inet6 fe80::bb8b:a422:a97:e4cd  prefixlen 64  scopeid 0x20<link>

        ether 00:50:56:2c:31:5f  txqueuelen 1000  (Ethernet)

        RX packets 18  bytes 3394 (3.3 KiB)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 38  bytes 4887 (4.7 KiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536

        inet 127.0.0.1  netmask 255.0.0.0

        inet6 ::1  prefixlen 128  scopeid 0x10<host>

        loop  txqueuelen 1000  (Local Loopback)

        RX packets 228  bytes 19380 (18.9 KiB)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 228  bytes 19380 (18.9 KiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

virbr0: flags=4099<UP,BROADCAST,MULTICAST>  mtu 1500

        inet 192.168.122.1  netmask 255.255.255.0  broadcast 192.168.122.255

        ether 52:54:00:74:03:ed  txqueuelen 1000  (Ethernet)

        RX packets 0  bytes 0 (0.0 B)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 0  bytes 0 (0.0 B)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

修改主机名

[root@template ~]# hostnamectl set-hostname cbdps02

[root@template ~]# exec bash

[root@cbdps02 ~]# cat /etc/sysconfig/network

# Created by anaconda

[root@cbdps02 ~]# vi /etc/sysconfig/network

[root@cbdps02 ~]# cat /etc/sysconfig/network

# Created by anaconda

NETWORKING=yes

HOSTNAME=cbdps02

 

2.3、节点1网卡绑定

此处采用bond方式绑定

[root@cbdps01 network-scripts]# vi ifcfg-ens33

NAME=ens33

TYPE=Ethernet

BOOTPROTO=none

DEVICE=ens33

ONBOOT=yes

MASTER=bond0

SLAVE=yes

NM_CONTROLLED=no

 

[root@cbdps01 network-scripts]# vi ifcfg-ens37

NAME=ens37

TYPE=Ethernet

BOOTPROTO=none

DEVICE=ens37

ONBOOT=yes

MASTER=bond0

SLAVE=yes

NM_CONTROLLED=no

 

[root@cbdps01 network-scripts]# vi ifcfg-ens38

NAME=ens38

TYPE=Ethernet

BOOTPROTO=none

DEVICE=ens38

ONBOOT=yes

MASTER=bond1

SLAVE=yes

NM_CONTROLLED=no

 

[root@cbdps01 network-scripts]# vi ifcfg-ens39

NAME=ens39

TYPE=Ethernet

BOOTPROTO=none

DEVICE=ens39

ONBOOT=yes

MASTER=bond1

SLAVE=yes

NM_CONTROLLED=no

 

[root@cbdps01 network-scripts]# vi ifcfg-bond0

DEVICE=bond0

TYPE=Bond

NAME=bond0

BONDING_MASTER=yes

BOOTPROTO=none

ONBOOT=yes

NM_CONTROLLED=no

BONDING_OPTS="mode=1 miimon=100"

IPADDR=192.168.133.245

NETMASK=255.255.255.0

GATEWAY=192.168.133.2

 

[root@ cbdps01 network-scripts]# vi ifcfg-bond1

DEVICE=bond1

TYPE=Bond

NAME=bond1

BONDING_MASTER=yes

BOOTPROTO=none

ONBOOT=yes

NM_CONTROLLED=no

BONDING_OPTS="mode=1 miimon=100"

IPADDR=192.168.78.245

NETMASK=255.255.255.0

 

说明:

mode=1 表示启用"主动-备份模式";

miimon=100 表示"链路监测间隔"为100毫秒.

 

[root@cbdps01 network-scripts]# systemctl restart network

 

[root@cbdps01~]# ifconfig

bond0: flags=5187<UP,BROADCAST,RUNNING,MASTER,MULTICAST>  mtu 1500

        inet 192.168.133.245  netmask 255.255.255.0  broadcast 192.168.133.255

        inet6 fe80::250:56ff:fe32:5d08  prefixlen 64  scopeid 0x20<link>

        ether 00:50:56:32:5d:08  txqueuelen 1000  (Ethernet)

        RX packets 95  bytes 10322 (10.0 KiB)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 72  bytes 12649 (12.3 KiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

bond1: flags=5187<UP,BROADCAST,RUNNING,MASTER,MULTICAST>  mtu 1500

        inet 192.168.78.245  netmask 255.255.255.0  broadcast 192.168.78.255

        inet6 fe80::250:56ff:fe3d:cd7f  prefixlen 64  scopeid 0x20<link>

        ether 00:50:56:3d:cd:7f  txqueuelen 1000  (Ethernet)

        RX packets 5  bytes 300 (300.0 B)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 31  bytes 4312 (4.2 KiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

ens33: flags=6211<UP,BROADCAST,RUNNING,SLAVE,MULTICAST>  mtu 1500

        ether 00:50:56:32:5d:08  txqueuelen 1000  (Ethernet)

        RX packets 2619  bytes 2363478 (2.2 MiB)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 1033  bytes 212906 (207.9 KiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

ens37: flags=6211<UP,BROADCAST,RUNNING,SLAVE,MULTICAST>  mtu 1500

        ether 00:50:56:32:5d:08  txqueuelen 1000  (Ethernet)

        RX packets 232  bytes 26444 (25.8 KiB)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 47  bytes 5512 (5.3 KiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

ens38: flags=6211<UP,BROADCAST,RUNNING,SLAVE,MULTICAST>  mtu 1500

        ether 00:50:56:3d:cd:7f  txqueuelen 1000  (Ethernet)

        RX packets 80  bytes 12678 (12.3 KiB)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 77  bytes 9754 (9.5 KiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

ens39: flags=6211<UP,BROADCAST,RUNNING,SLAVE,MULTICAST>  mtu 1500

        ether 00:50:56:3d:cd:7f  txqueuelen 1000  (Ethernet)

        RX packets 82  bytes 12377 (12.0 KiB)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 45  bytes 5356 (5.2 KiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536

        inet 127.0.0.1  netmask 255.0.0.0

        inet6 ::1  prefixlen 128  scopeid 0x10<host>

        loop  txqueuelen 1000  (Local Loopback)

        RX packets 256  bytes 21764 (21.2 KiB)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 256  bytes 21764 (21.2 KiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

virbr0: flags=4099<UP,BROADCAST,MULTICAST>  mtu 1500

        inet 192.168.122.1  netmask 255.255.255.0  broadcast 192.168.122.255

        ether 52:54:00:74:03:ed  txqueuelen 1000  (Ethernet)

        RX packets 0  bytes 0 (0.0 B)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 0  bytes 0 (0.0 B)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

[root@cbdps01 ~]# nmcli con show

NAME    UUID                                  TYPE    DEVICE

virbr0  f1ccf559-74a1-4882-9697-6e4692f3652e  bridge  virbr0        

 

说明

1)以上重启系统后,nmcli con show依然显示一条记录.

2)以下为某生产库rac搭建时的配置信息,系统为rhel 6.5.

[root@localhost network-scripts]# cat ifcfg-eth14

NAME=eth14

TYPE=Ethernet

BOOTPROTO=none

DEVICE=eth14

ONBOOT=yes

MASTER=bond0

SLAVE=yes

NM_CONTROLLED=no

 

2.4、节点2网卡绑定

此处采用bond的方式绑定

[root@cbdps02 network-scripts]# vi ifcfg-ens33

NAME=ens33

TYPE=Ethernet

BOOTPROTO=none

DEVICE=ens33

ONBOOT=yes

MASTER=bond0

SLAVE=yes

NM_CONTROLLED=no

 

[root@cbdps02 network-scripts]# vi ifcfg-ens37

NAME=ens37

TYPE=Ethernet

BOOTPROTO=none

DEVICE=ens37

ONBOOT=yes

MASTER=bond0

SLAVE=yes

NM_CONTROLLED=no

 

[root@cbdps02 network-scripts]# vi ifcfg-ens38

NAME=ens38

TYPE=Ethernet

BOOTPROTO=none

DEVICE=ens38

ONBOOT=yes

MASTER=bond1

SLAVE=yes

NM_CONTROLLED=no

 

[root@cbdps02 network-scripts]# vi ifcfg-ens39

NAME=ens39

TYPE=Ethernet

BOOTPROTO=none

DEVICE=ens39

ONBOOT=yes

MASTER=bond1

SLAVE=yes

NM_CONTROLLED=no

 

[root@cbdps02 network-scripts]# vi ifcfg-bond0

DEVICE=bond0

TYPE=Bond

NAME=bond0

BONDING_MASTER=yes

BOOTPROTO=none

ONBOOT=yes

NM_CONTROLLED=no

BONDING_OPTS="mode=1 miimon=100"

IPADDR=192.168.133.175

NETMASK=255.255.255.0

GATEWAY=192.168.133.2

 

[root@cbdps02 network-scripts]# vi ifcfg-bond1

DEVICE=bond1

TYPE=Bond

NAME=bond1

BONDING_MASTER=yes

BOOTPROTO=none

ONBOOT=yes

NM_CONTROLLED=no

BONDING_OPTS="mode=1 miimon=100"

IPADDR=192.168.78.175

NETMASK=255.255.255.0

 

[root@cbdps02 network-scripts]# systemctl restart network

 

[root@cbdps02 ~]# ifconfig

bond0: flags=5187<UP,BROADCAST,RUNNING,MASTER,MULTICAST>  mtu 1500

        inet 192.168.133.175  netmask 255.255.255.0  broadcast 192.168.133.255

        inet6 fe80::250:56ff:fe32:c751  prefixlen 64  scopeid 0x20<link>

        ether 00:50:56:32:c7:51  txqueuelen 1000  (Ethernet)

        RX packets 99  bytes 13486 (13.1 KiB)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 82  bytes 14366 (14.0 KiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

bond1: flags=5187<UP,BROADCAST,RUNNING,MASTER,MULTICAST>  mtu 1500

        inet 192.168.78.175  netmask 255.255.255.0  broadcast 192.168.78.255

        inet6 fe80::250:56ff:fe29:460c  prefixlen 64  scopeid 0x20<link>

        ether 00:50:56:29:46:0c  txqueuelen 1000  (Ethernet)

        RX packets 6  bytes 360 (360.0 B)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 31  bytes 4291 (4.1 KiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

ens33: flags=6211<UP,BROADCAST,RUNNING,SLAVE,MULTICAST>  mtu 1500

        ether 00:50:56:32:c7:51  txqueuelen 1000  (Ethernet)

        RX packets 4871  bytes 4464311 (4.2 MiB)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 1917  bytes 367744 (359.1 KiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

ens37: flags=6211<UP,BROADCAST,RUNNING,SLAVE,MULTICAST>  mtu 1500

        ether 00:50:56:32:c7:51  txqueuelen 1000  (Ethernet)

        RX packets 196  bytes 19706 (19.2 KiB)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 48  bytes 5657 (5.5 KiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

ens38: flags=6211<UP,BROADCAST,RUNNING,SLAVE,MULTICAST>  mtu 1500

        ether 00:50:56:29:46:0c  txqueuelen 1000  (Ethernet)

        RX packets 79  bytes 12454 (12.1 KiB)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 76  bytes 9733 (9.5 KiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

ens39: flags=6211<UP,BROADCAST,RUNNING,SLAVE,MULTICAST>  mtu 1500

        ether 00:50:56:29:46:0c  txqueuelen 1000  (Ethernet)

        RX packets 84  bytes 12733 (12.4 KiB)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 45  bytes 5442 (5.3 KiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536

        inet 127.0.0.1  netmask 255.0.0.0

        inet6 ::1  prefixlen 128  scopeid 0x10<host>

        loop  txqueuelen 1000  (Local Loopback)

        RX packets 280  bytes 23804 (23.2 KiB)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 280  bytes 23804 (23.2 KiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

virbr0: flags=4099<UP,BROADCAST,MULTICAST>  mtu 1500

        inet 192.168.122.1  netmask 255.255.255.0  broadcast 192.168.122.255

        ether 52:54:00:74:03:ed  txqueuelen 1000  (Ethernet)

        RX packets 0  bytes 0 (0.0 B)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 0  bytes 0 (0.0 B)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

[root@cbdps02 ~]# nmcli con show

NAME    UUID                                  TYPE    DEVICE

virbr0  47b4e531-445b-40f8-bd1f-3798a4b14461  bridge  virbr0

 

说明:以上重启系统后,nmcli con show依然显示一条记录.

 

2.5、DNS 服务器配置

DNS服务器:IP 192.168.133.200 主机名(leo-mydns)

2.5.1、修改主机名

[root@leo-mydns yum.repos.d]# vi /etc/sysconfig/network

[root@leo-mydns yum.repos.d]# cat /etc/sysconfig/network

# Created by anaconda

HOSTNAME=leo-mydns

 

2.5.2、安装bind软件

挂载镜像源

# mount /dev/sr0 /mnt

mount: /dev/sr0 is write-protected, mounting read-only

配置yum源

[root@leo-mydns ~]# cd /etc/yum.repos.d/

[root@leo-mydns yum.repos.d]# mkdir repo.bak

[root@leo-mydns yum.repos.d]# mv *.repo repo.bak

[root@leo-mydns yum.repos.d]# cat <<EOF>>/etc/yum.repos.d/local.repo

[local]

name=local

baseurl=file:///mnt

gpgcheck=0

enabled=1

EOF

[root@leo-mydns yum.repos.d]# yum makecache

[root@leo-mydns yum.repos.d]# rpm -qa bind

[root@leo-mydns yum.repos.d]# yum install -y bind*

[root@leo-mydns yum.repos.d]# rpm -qa bind       

bind-9.11.4-26.P2.el7.x86_64

[root@leo-mydns yum.repos.d]# rpm -qa | grep bind

bind-libs-lite-9.11.4-26.P2.el7.x86_64

bind-chroot-9.11.4-26.P2.el7.x86_64

bind-9.11.4-26.P2.el7.x86_64

bind-export-libs-9.11.4-26.P2.el7.x86_64

keybinder3-0.3.0-1.el7.x86_64

bind-pkcs11-libs-9.11.4-26.P2.el7.x86_64

bind-pkcs11-9.11.4-26.P2.el7.x86_64

bind-libs-9.11.4-26.P2.el7.x86_64

bind-pkcs11-utils-9.11.4-26.P2.el7.x86_64

bind-dyndb-ldap-11.1-7.el7.x86_64

rpcbind-0.2.0-49.el7.x86_64

bind-utils-9.11.4-26.P2.el7.x86_64

bind-license-9.11.4-26.P2.el7.noarch

 

说明:

bind-9.11.4:主程序文件

bind-libs:主程序库文件

bind-utils:包含测试检测工具命令,如nslookup、dig、host、rndc...

 

2.5.3、编辑主配置文件"named.conf"

[root@leo-mydns yum.repos.d]# vi /etc/named.conf

options {

        directory "/var/named";      // Base directory for named

        allow-transfer {"none";};    // Slave serves that can pull zone transfer. Ban everyone by default

        };

 

zone "." IN {

        type hint;

        file "named.ca";

};

 

include "/etc/named.rfcl912.zones";

 

[root@leo-mydns yum.repos.d]# vi /etc/named.rfcl912.zones

# 反向解析Zone

zone "133.168.192.IN-ADDR.ARPA." IN { // Rerverse zone.

        type master;

        notify no;

        file "192.168.133.db";

};

# 正向解析Zone

zone "cqupt.com." IN {

        type master;

        notify no;

        file "cqupt.com.db";

};

 

说明:"cqupt.com"为域名.

 

2.5.4、创建正向解析文件

[root@leo-mydns ~]# vi /var/named/cqupt.com.db

$TTL 86400         ; Time to live

$ORIGIN  cqupt.com.

@              IN    SOA     leo-mydns  root.cqupt.com. (

                             2025030901    ; serial (todays date + todays serial #)

                             3H            ; refresh 3 hours

                             1H            ; retry 1 hour

                             1W            ; expire 1 week

                             1D )          ; minimum 24 hour

                           

@              IN            NS  leo-mydns

;             

               IN            A 192.168.133.200

leo-mydns      IN A          192.168.133.200

cbdps01        IN A          192.168.133.245

cbdps02        IN A          192.168.133.175

cbdps01-vip    IN A          192.168.133.246

cbdps02-vip    IN A          192.168.133.176

cbdps-scan     IN A          192.168.133.247

leo-openfiler  IN A          192.168.133.201

;

 

说明:

$TTL 86400:设置此区域文件中所有DNS记录的默认生存时间为24小时,TTL定义DNS记录在解析器或客户端缓存中的有效时间,过期后需要重新获取.

$ORIGIN  cqupt.com.:指定区域文件的默认域名,在该文件中任何不带域名的记录(如leo-mydns)会自动附加上该默认域名,最终变成"leo-mydns.cqupt.com"

2013011201:区域文件的序列号,通常由日期(2013年1月12日)加上一个递增的数字构成,用于标记区域更新

 

2.5.4、创建反向解析文件

[root@leo-mydns ~]# vi /var/named/192.168.133.db

$TTL 86400

@       IN      SOA     leo-mydns root.cqupt.com.  (

                        2025030901     ; serial (todays date + todays serial #)

                        3H             ; refresh 3 hours

                        1H             ; retry 1 hour

                        1W             ; expire 1 week

                        1D )           ; minimum 24 hour

;

                 NS        leo-mydns.cqupt.com.

200      IN      PTR       leo-mydns.cqupt.com.

245     IN PTR  cbdps01.cqupt.com.

175     IN PTR  cbdps02.cqupt.com.

246     IN PTR  cbdps01-vip.cqupt.com.

176     IN PTR  cbdps02-vip.cqupt.com.

247     IN PTR  cbdps-scan.cqupt.com.

201      IN PTR  leo-openfiler.cqupt.com.

 

2.5.5、修改 /etc/resolv.conf 文件

[root@leo-mydns ~]# vi /etc/resolv.conf

[root@leo-mydns ~]# cat /etc/resolv.conf

# Generated by NetworkManager

options attempts: 2

options timeout: 1

search cqupt.com

nameserver 192.168.133.200

 

说明:

nameserver 后需要填写DNS服务器的IP.

search     后面是搜索的域名.

 

2.5.6、启动DNS进程

[root@leo-mydns ~]# systemctl restart named

[root@leo-mydns ~]# systemctl status named

● named.service - Berkeley Internet Name Domain (DNS)

   Loaded: loaded (/usr/lib/systemd/system/named.service; disabled; vendor preset: disabled)

   Active: active (running) since Sun 2025-03-09 15:37:57 CST; 7s ago

  Process: 6878 ExecStart=/usr/sbin/named -u named -c ${NAMEDCONF} $OPTIONS (code=exited, status=0/SUCCESS)

  Process: 6875 ExecStartPre=/bin/bash -c if [ ! "$DISABLE_ZONE_CHECKING" == "yes" ]; then /usr/sbin/named-checkconf -z "$NAMEDCONF"; else echo "Checking of zone files is disabled"; fi (code=exited, status=0/SUCCESS)

 Main PID: 6881 (named)

    Tasks: 5

   CGroup: /system.slice/named.service

           └─6881 /usr/sbin/named -u named -c /etc/named.conf

 

Mar 09 15:37:57 leo-mydns named[6881]: configuring command channel from '/etc/rndc.key'

Mar 09 15:37:57 leo-mydns named[6881]: command channel listening on 127.0.0.1#953

Mar 09 15:37:57 leo-mydns named[6881]: configuring command channel from '/etc/rndc.key'

Mar 09 15:37:57 leo-mydns named[6881]: command channel listening on ::1#953

Mar 09 15:37:57 leo-mydns named[6881]: managed-keys-zone: loaded serial 0

Mar 09 15:37:57 leo-mydns named[6881]: zone cqupt.com/IN: loaded serial 2025030901

Mar 09 15:37:57 leo-mydns named[6881]: zone 133.168.192.IN-ADDR.ARPA/IN: loaded serial 2025030901

Mar 09 15:37:57 leo-mydns named[6881]: all zones loaded

Mar 09 15:37:57 leo-mydns named[6881]: running

Mar 09 15:37:57 leo-mydns systemd[1]: Started Berkeley Internet Name Domain (DNS).

 

[root@leo-mydns ~]# rndc status

version: BIND 9.11.4-P2-RedHat-9.11.4-26.P2.el7 (Extended Support Version) <id:7107deb>

running on leo-mydns: Linux x86_64 3.10.0-1160.el7.x86_64 #1 SMP Tue Aug 18 14:50:17 EDT 2020

boot time: Sun, 09 Mar 2025 07:37:57 GMT

last configured: Sun, 09 Mar 2025 07:37:57 GMT

configuration file: /etc/named.conf

CPUs found: 2

worker threads: 2

UDP listeners per interface: 1

number of zones: 102 (99 automatic)

debug level: 0

xfers running: 0

xfers deferred: 0

soa queries in progress: 0

query logging is OFF

recursive clients: 0/900/1000

tcp clients: 2/150

server is up and running

 

2.5.7、DNS进程开机自启

[root@leo-mydns ~]# systemctl is-enabled named

disabled

[root@leo-mydns ~]# systemctl enable named

Created symlink from /etc/systemd/system/multi-user.target.wants/named.service to /usr/lib/systemd/system/named.service.

[root@leo-mydns ~]# systemctl is-enabled named

enabled

 

2.5.8、DNS解析测试

-- 测试dns正向解析 cbdps01

[root@leo-mydns ~]# nslookup cbdps01

Server:         192.168.133.200

Address:        192.168.133.200#53

 

Name:   cbdps01.cqupt.com

Address: 192.168.133.245

 

-- 测试dns正向解析 cbdps02

[root@leo-mydns ~]# nslookup cbdps02

Server:         192.168.133.200

Address:        192.168.133.200#53

 

Name:   cbdps02.cqupt.com

Address: 192.168.133.175

 

-- 测试dns正向解析 cbdps-scan

[root@leo-mydns ~]# nslookup cbdps-scan

Server:         192.168.133.200

Address:        192.168.133.200#53

 

Name:   cbdps-scan.cqupt.com

Address: 192.168.133.247

 

-- 测试dns反向解析 192.168.133.245(cbdps01)

[root@leo-mydns ~]# nslookup 192.168.133.245

245.133.168.192.IN-ADDR.ARPA    name = cbdps01.cqupt.com.

 

-- 测试dns反向解析 192.168.133.175(cbdps02)

[root@leo-mydns ~]# nslookup 192.168.133.175

175.133.168.192.IN-ADDR.ARPA    name = cbdps02.cqupt.com.

 

-- 测试dns反向解析 192.168.133.247(cbdps-scan)

[root@leo-mydns ~]# nslookup 192.168.133.247

247.133.168.192.IN-ADDR.ARPA    name = cbdps-scan.cqupt.com.

 

2.5.8、关闭防火墙

[root@leo-mydns ~]# systemctl stop firewalld.service

[root@leo-mydns ~]# systemctl disable firewalld.service

Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service.

Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.

 

说明:关闭防火墙旨在让客户端能通过该DNS服务器进行域名解析.

 

3、安装准备

注意:除特别说明之外,3.1-3.18均需在两个节点执行.

3.1、系统版本

[root@cbdps01 ~]# cat /etc/*release

NAME="Red Hat Enterprise Linux Server"

VERSION="7.9 (Maipo)"

ID="rhel"

ID_LIKE="fedora"

VARIANT="Server"

VARIANT_ID="server"

VERSION_ID="7.9"

PRETTY_NAME="Red Hat Enterprise Linux Server 7.9 (Maipo)"

ANSI_COLOR="0;31"

CPE_NAME="cpe:/o:redhat:enterprise_linux:7.9:GA:server"

HOME_URL="https://www.redhat.com/"

BUG_REPORT_URL="https://bugzilla.redhat.com/"

 

REDHAT_BUGZILLA_PRODUCT="Red Hat Enterprise Linux 7"

REDHAT_BUGZILLA_PRODUCT_VERSION=7.9

REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux"

REDHAT_SUPPORT_PRODUCT_VERSION="7.9"

Red Hat Enterprise Linux Server release 7.9 (Maipo)

Red Hat Enterprise Linux Server release 7.9 (Maipo)

 

移除virbr0

[root@cbdps01 ~]# yum remove libvirt-libs

 

3.2、依赖包安装

3.2.1、挂载镜像源

# mount /dev/sr0 /mnt

mount: /dev/sr0 is write-protected, mounting read-only

3.2.2、配置yum源

[root@cbdps01 ~]# cd /etc/yum.repos.d/

[root@cbdps01 yum.repos.d]# mkdir repo.bak

[root@cbdps01 yum.repos.d]# mv *.repo repo.bak

[root@cbdps01 yum.repos.d]# cat <<EOF>>/etc/yum.repos.d/local.repo

[local]

name=local

baseurl=file:///mnt

gpgcheck=0

enabled=1

EOF

[root@cbdps01 yum.repos.d]# yum makecache

 

3.2.3、安装依赖包

# yum install -y bc \

binutils \

compat-libcap1 \

compat-libstdc++-33 \

gcc \

gcc-c++ \

elfutils-libelf \

elfutils-libelf-devel \

glibc \

glibc-devel \

ksh \

libaio \

libaio-devel \

libgcc \

libstdc++ \

libstdc++-devel \

libxcb \

libX11 \

libXau \

libXi \

libXtst \

libXrender \

libXrender-devel \

make \

net-tools \

nfs-utils \

smartmontools \

sysstat \

e2fsprogs \

e2fsprogs-libs \

fontconfig-devel \

expect \

unzip \

openssh-clients \

readline* \

tigervnc* \

psmisc \

iotop --skip-broken

 

上传依赖包

# mkdir -p /u01/setup/package

sftp> cd /u01/setup/package

sftp> lcd F:\package

sftp> put compat-libstdc++-33-3.2.3-72.el7.x86_64.rpm

# cd /u01/setup/package

# rpm -ivh compat-libstdc++-33-3.2.3-72.el7.x86_64.rpm

 

检查依赖包

# rpm -q bc \

binutils \

compat-libcap1 \

compat-libstdc++-33 \

gcc \

gcc-c++ \

elfutils-libelf \

elfutils-libelf-devel \

glibc \

glibc-devel \

ksh \

libaio \

libaio-devel \

libgcc \

libstdc++ \

libstdc++-devel \

libxcb \

libX11 \

libXau \

libXi \

libXtst \

libXrender \

libXrender-devel \

make \

net-tools \

nfs-utils \

smartmontools \

sysstat \

e2fsprogs \

e2fsprogs-libs \

fontconfig-devel \

expect \

unzip \

openssh-clients \

readline \

tigervnc \

psmisc \

iotop | grep "not installed"

 

3.3、修改hosts文件

按如下修改.

[root@cbdps01 ~]# vi /etc/hosts

127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4

::1         localhost localhost.localdomain localhost6 localhost6.localdomain6

添加如下内容:

#Public IP

192.168.133.245 cbdps01.cqupt.com cbdps01

192.168.133.175 cbdps02.cqupt.com cbdps02

 

#Private IP

192.168.78.245 cbdps01-priv.cqupt.com cbdps01-priv

192.168.78.175 cbdps02-priv.cqupt.com cbdps02-priv

 

#Virtual IP

192.168.133.246 cbdps01-vip.cqupt.com cbdps01-vip

192.168.133.176 cbdps02-vip.cqupt.com cbdps02-vip

 

#Scan IP

192.168.133.247 cbdps-scan.cqupt.com cbdps-scan

 

#storage

192.168.133.201 leo-openfiler.cqupt.com leo-openfiler

 

#DNS server

192.168.133.200 leo-mydns.cqupt.com leo-mydns

 

说明:此时公网、私网能ping通,VIP和Scan IP不能ping通才正常.

 

3.4、防火墙配置

# systemctl status firewalld.service

# systemctl stop firewalld.service

# systemctl disable firewalld.service

 

3.5、禁用selinux

将SELINUX修改为disabled

[root@cbdps01 ~]# sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config

[root@cbdps01~]# cat /etc/selinux/config

 

# This file controls the state of SELinux on the system.

# SELINUX= can take one of these three values:

#     enforcing - SELinux security policy is enforced.

#     permissive - SELinux prints warnings instead of enforcing.

#     disabled - No SELinux policy is loaded.

SELINUX=disabled

# SELINUXTYPE= can take one of three values:

#     targeted - Targeted processes are protected,

#     minimum - Modification of targeted policy. Only selected processes are protected.

#     mls - Multi Level Security protection.

SELINUXTYPE=targeted

 

注意:需重启主机,才能生效

 

3.6、时间同步配置

从oracle 11gR2 rac开始使用Cluster Time Synchronization Service(CTSS)同步各节点的时间,此处关闭NTP、chrony服务,Oracle会自动启用ctssd进程.

[root@cbdps01 ~]# systemctl stop ntpd

[root@cbdps01 ~]# systemctl disable ntpd.service

[root@cbdps01 ~]# mv /etc/ntp.conf /etc/ntp.conf.bak

[root@cbdps01 ~]# systemctl disable chronyd

[root@cbdps01 ~]# systemctl stop chronyd

[root@cbdps01 ~]# mv /etc/chrony.conf /etc/chrony.conf_bak

 

3.7、avahi-daemon与NetworkManager配置

[root@cbdps01 ~]# yum install -y avahi*

[root@cbdps01 ~]# systemctl stop avahi-daemon.socket

[root@cbdps01 ~]# systemctl stop avahi-daemon.service

[root@cbdps01 ~]# pgrep -f avahi-daemon | awk '{print "kill -9 "$2}'

[root@cbdps01 ~]# systemctl disable avahi-daemon.socket

[root@cbdps01 ~]# systemctl disable avahi-daemon.service

[root@cbdps01 ~]# systemctl status avahi-daemon

3.8、配置NOZEROCONF

[root@cbdps01 ~]# cat <<EOF>>/etc/sysconfig/network

NOZEROCONF=yes

EOF

 

3.9、配置系统参数

修改/etc/sysctl.conf文件

# cat <<EOF>>/etc/sysctl.conf

fs.aio-max-nr = 4194304

fs.file-max = 6815744

kernel.shmall = 1980560

kernel.shmmax = 6489899008

kernel.shmmni = 4096

kernel.sem = 250 32000 100 128

net.ipv4.ip_local_port_range = 9000 65500

net.core.rmem_default = 262144

net.core.rmem_max = 4194304

net.core.wmem_default = 262144

net.core.wmem_max = 1048586

net.ipv4.ipfrag_high_thresh = 16777216

net.ipv4.ipfrag_low_thresh = 15728640

kernel.randomize_va_space = 0

vm.swappiness = 10

vm.min_free_kbytes = 524288

kernel.panic_on_oops = 1

net.ipv4.conf.bond0.rp_filter = 1

net.ipv4.conf.bond1.rp_filter = 2

EOF

 

# /sbin/sysctl -p

 

3.10、创建用户、用户组、目录

注意:

A、创建用户和组之前需确认gid/uid为1000是否被占用.

B、发现有些生产库中并未创建oper组.

groupadd -g 1000 oinstall

groupadd -g 1001 dba

groupadd -g 1002 oper

groupadd -g 1010 asmadmin

groupadd -g 1011 asmdba

groupadd -g 1012 asmoper

useradd -u 1000 -g oinstall -G dba,oper,asmdba  -m -d /home/oracle oracle

useradd -u 1001 -g oinstall -G asmadmin,asmdba,asmoper,dba,oper -m -d /home/grid grid

echo "oracle4U"| passwd --stdin oracle

echo "grid4U"| passwd --stdin grid

 

usermod -a -G oinstall oracle

usermod -a -G oinstall grid

 

mkdir -p /u01/app/oracle

mkdir -p /u01/app/oraInventory

mkdir -p /u01/app/12.2.0.1/grid

mkdir -p /u01/app/grid

mkdir -p /u01/app/oracle/product/12.2.0.1/db_1

chown -R grid:oinstall /u01

chown -R oracle:oinstall /u01/app/oracle

chmod -R 775 /u01

 

说明:红色高亮为更新部分,若不更新在安装rac前检查会出现以下告警.

PRVG-10467 : The default Oracle Inventory group could not be determined.

 

3.11、系统资源限制配置

配置limits.conf

[root@cbdps01 ~]# cat <<EOF>>/etc/security/limits.conf

grid soft core 0

grid hard core 0

grid soft nproc 400000

grid hard nproc 400000

grid soft memlock 711656100

grid hard memlock 711656100

grid soft nofile 400000

grid hard nofile 400000

grid soft stack 10240

grid hard stack 32768

 

oracle soft core 0

oracle hard core 0

oracle soft nproc 400000

oracle hard nproc 400000

oracle soft memlock unlimited

oracle hard memlock unlimited

oracle soft nofile 400000

oracle hard nofile 400000

oracle soft stack  10240

oracle hard stack  32768

EOF

 

注意:粗体高亮为更新部分,若不更新在安装rac前检查会出现以下告警.

a、"Soft Limit: maximum stack size" could not be fixed on nodes "cbdps02,cjtpssb01"

 

3.12、添加pam_limits.so模块

修改/etc/pam.d/login文件

[root@cbdps01 ~]# cat <<EOF>>/etc/pam.d/login

session required pam_limits.so

session required /lib64/security/pam_limits.so

EOF

 

3.13、环境变量配置

grid用户:

$ cat <<EOF>>/home/grid/.bash_profile

# Oracle Grid 12c Environment

export TEMP=/tmp

export TMPDIR=\$TEMP

export ORACLE_SID=+ASM1

export ORACLE_BASE=/u01/app/grid

export ORACLE_HOME=/u01/app/12.2.0.1/grid

export LIBPATH=\$ORACLE_HOME/lib:\$ORACLE_HOME/lib32

export LD_LIBARY_PATH=\$LD_LIBARY_PATH:\$ORACLE_HOME/lib:\$ORACLE_HOME/lib32:.

export LD_LIBARY_PATH=\$LD_LIBARY_PATH:\$ORACLE_HOME/jdk/jre/lib:\$ORACLE_HOME/network/lib:\$ORACLE_HOME/rdbms/lib

export CLASSPATH=\$ORACLE_HOME/jre:\$ORACLE_HOME/jlib:\$ORACLE_HOME/rdbms/jlib:\$ORACLE_HOME/network/jlib

export PATH=\$ORACLE_HOME/bin:\$ORACLE_HOME/OPatch:\$HOME/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/usr/ucb:/usr/bin/X11:/sbin:.

 

umask 022

if [ \$USER = "grid" ]; then

if [ \$SHELL = "/bin/ksh" ]; then

ulimit -p 16384

ulimit -n 65536

else

ulimit -u 16384 -n 65536

fi

fi

alias sas='sqlplus / as sysasm'

#stty erase ^H

EOF

 

$ source .bash_profile

 

注意:节点2为+ASM2.

 

oracle用户:

$ cat <<EOF>>/home/oracle/.bash_profile

# Oracle 12c oracle Environment

export TEMP=/tmp

export TMPDIR=\$TEMP

export ORACLE_SID=cbdps1

export ORACLE_BASE=/u01/app/oracle

export ORACLE_HOME=/u01/app/oracle/product/12.2.0.1/db_1

#export NLS_LANG=AMERICAN_AMERICA.AL32UTF8

export LIBPATH=\$ORACLE_HOME/lib:\$ORACLE_HOME/lib32

export LD_LIBRARY_PATH=\$LD_LIBARY_PATH:\$ORACLE_HOME/jdk/jre/lib:\$ORACLE_HOME/network/lib:\$ORACLE_HOME/rdbms/lib

export LD_LIBRARY_PATH=\$LD_LIBARY_PATH:\$ORACLE_HOME/lib:\$ORACLE_HOME/lib32

export CLASSPATH=\$ORACLE_HOME/jre:\$ORACLE_HOME/jlib:\$ORACLE_HOME/rdbms/jlib:\$ORACLE_HOME/network/jlib

export PATH=\$ORACLE_HOME/bin:\$ORACLE_HOME/OPatch:\$HOME/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/usr/ucb:/usr/bin/X11:/sbin:.

 

umask 022

if [ \$USER = "oracle" ]; then

if [ \$SHELL = "/bin/ksh" ]; then

ulimit -p 16384

ulimit -n 65536

else

ulimit -u 16384 -n 65536

fi

fi

alias sas='sqlplus / as sysdba'

#stty erase ^H

EOF

 

$ source .bash_profile

 

注意:节点2 为cbdps2.

 

3.14、ROOT配置CRSCTL

ROOT配置调用GRID相关命令:

[root@cbdps01 /]# cat >> /etc/profile <<EOF

export PATH=/u01/app/12.2.0.1/grid/bin:\$PATH

EOF

 

3.15、修改/etc/profile

# cat <<EOF>>/etc/profile

    if [ \$USER = "oracle" ] || [ \$USER = "grid" ]; then

            if [ \$SHELL = "/bin/ksh" ]; then

                  ulimit -p 16384

                  ulimit -n 65536

            else

                  ulimit -u 16384 -n 65536

            fi

            umask 022

    fi

EOF

 

# source /etc/profile

 

3.16、大页和NUMA

[root@cbdps01 ~]# sed -i 's/quiet/quiet transparent_hugepage=never numa=off/' /etc/default/grub

[root@cbdps01 ~]# grub2-mkconfig -o /boot/grub2/grub.cfg

 

开启大页

[root@cbdps01~]# vi /etc/sysctl.conf

因测试环境内存为8g,可不开启大页管理,但此处开启大页以做测试,添加如下:

vm.nr_hugepages=2800

 

对于内存大于32G,可考虑配置大页:

vm.nr_hugepages>=SGA_Target/Hugepagesize(2M)

 

例如:SGA=X G, vm.nr_hugepages=(X+2)*1024/2

grep HugePages /proc/meminfo

 

说明:此次测试SGA值配置为3700MB.

 

以下为重启后数据:

[root@cbdps01 ~]# cat /sys/kernel/mm/transparent_hugepage/enabled

always madvise [never]

[root@cbdps01 ~]# cat /proc/cmdline

BOOT_IMAGE=/vmlinuz-5.4.17-2011.6.2.el7uek.x86_64 root=/dev/mapper/ol-root ro crashkernel=auto rd.lvm.lv=ol/root rd.lvm.lv=ol/swap rhgb quiet transparent_hugepage=never numa=off

 

[root@cbdps01 ~]# cat /proc/meminfo | grep HugePages

AnonHugePages:         0 kB

HugePages_Total:    2800

HugePages_Free:     2800

HugePages_Rsvd:        0

HugePages_Surp:        0

 

3.17、修改LO网卡的MTU值

检查LO网卡的MTU,如果是65536,需要修改为16436:

参见:ORA-27301: OS Failure Message: No Buffer Space Available / ORA-27302: failure occurred at: sskgxpsnd2 Source Script (Doc ID 2322410.1)    

--临时生效:

[root@cbdps01 ~]# ifconfig lo mtu 16436

--修改配置文件,永久生效

[root@cbdps01 ~]# vi /etc/sysconfig/network-scripts/ifcfg-lo

65536修改为16436

--如果没有则新增

MTU=16436

 

注意:此处重启系统后确认mtu值.

 

3.18、客户端DNS解析配置

3.18.1、安装DNS服务

[root@cbdps01 ~]# yum install -y bind*

[root@cbdps01 ~]# systemctl start named

[root@cbdps01 ~]# systemctl status named

● named.service - Berkeley Internet Name Domain (DNS)

   Loaded: loaded (/usr/lib/systemd/system/named.service; disabled; vendor preset: disabled)

   Active: active (running) since Sun 2025-03-09 18:43:41 CST; 2s ago

  Process: 8475 ExecStart=/usr/sbin/named -u named -c ${NAMEDCONF} $OPTIONS (code=exited, status=0/SUCCESS)

  Process: 8472 ExecStartPre=/bin/bash -c if [ ! "$DISABLE_ZONE_CHECKING" == "yes" ]; then /usr/sbin/named-checkconf -z "$NAMEDCONF"; else echo "Checking of zone files is disabled"; fi (code=exited, status=0/SUCCESS)

 Main PID: 8477 (named)

   CGroup: /system.slice/named.service

           └─8477 /usr/sbin/named -u named -c /etc/named.conf

 

Mar 09 18:43:42 cbdps01 named[8477]: network unreachable resolving './DNSKEY/IN': 2001:500:2::c#53

Mar 09 18:43:42 cbdps01 named[8477]: network unreachable resolving './DNSKEY/IN': 2001:7fe::53#53

Mar 09 18:43:42 cbdps01 named[8477]: network unreachable resolving './DNSKEY/IN': 2001:500:2d::d#53

Mar 09 18:43:42 cbdps01 named[8477]: resolver priming query complete

Mar 09 18:43:42 cbdps01 named[8477]: checkhints: b.root-servers.net/A (170.247.170.2) missing from hints

Mar 09 18:43:42 cbdps01 named[8477]: checkhints: b.root-servers.net/A (199.9.14.201) extra record in hints

Mar 09 18:43:42 cbdps01 named[8477]: checkhints: b.root-servers.net/AAAA (2801:1b8:10::b) missing from hints

Mar 09 18:43:42 cbdps01 named[8477]: checkhints: b.root-servers.net/AAAA (2001:500:200::b) extra record in hints

Mar 09 18:43:42 cbdps01 named[8477]: managed-keys-zone: Key 20326 for zone . acceptance timer complete: key now trusted

Mar 09 18:43:42 cbdps01 named[8477]: managed-keys-zone: Initializing automatic trust anchor management for zone '.'; DNSKEY ID 38696 is now trusted, waiving the normal 30-day waiting period.

Hint: Some lines were ellipsized, use -l to show in full.

 

3.18.2、设置DNS服务开机自启

[root@cbdps01 ~]# systemctl is-enabled named

disabled

[root@cbdps01 ~]# systemctl enable named

Created symlink from /etc/systemd/system/multi-user.target.wants/named.service to /usr/lib/systemd/system/named.service.

[root@cbdps01 ~]# systemctl is-enabled named

enabled

 

3.18.3、配置DNS解析

cbdps01作为客户端进行配置

[root@cbdps01 ~]# vi /etc/resolv.conf

# Generated by NetworkManager

options attempts: 2

options timeout: 1

search cqupt.com

nameserver 192.168.133.200

 

3.18.4、DNS解析测试

[root@cbdps01 ~]# nslookup cbdps-scan

Server:         192.168.133.200

Address:        192.168.133.200#53

 

Name:   cbdps-scan.cqupt.com

Address: 192.168.133.247

 

[root@cbdps01 ~]# nslookup 192.168.133.247

247.133.168.192.IN-ADDR.ARPA    name = cbdps-scan.cqupt.com.

 

说明:如上所示客户端DNS正常解析.

 

4、存储配置

共享存储可以使用第三方软件提供的方式来共享,也可以使用 WMware Workstation软件进行存储共享,或者使用ISCSI网络存储服务来配置共享存储.本次使用ISCSI网络存储服务来模拟共享存储,常用ISCSI配置共享存储的软件:Openfiler和StarWind,本次讲解Openfiler软件.

4.1、openfiler安装

说明:此处依次创建1块40g磁盘用于CRS,4块10g磁盘用于存储DATA,2块10磁盘用于存储归档.

注意:7块磁盘均选择"Yes".

--选择create custom layout自定义,否则将使用所有磁盘

创建/boot分区.

创建swap.

如上所示,依次创建/ /boot swap分区目录,然后点击"Next".

说明:如上所示,openfiler安装成功,使用https://192.168.133.201:446打开浏览器时注意使用火狐浏览器.

 

4.2、浏览器访问

说明:

a、 用户名为 openfiler

b、 密码为 password

注意在Service栏将红色方框中的服务开启.

4.3、存储划分

4.3.1、非raid划分

划分步骤:PV->VG->LV,选择块设备,如有多个块设备可分别操作.

选择其中一块磁盘创建PV.

说明:依次将/dev/sdb&c&d&e&f&g&h创建为PV.

 

4.3.2、创建VG

输入一个名称用于创建VG,此处选择多个PV创建VG.

说明:选择磁盘后,填好VG名,然后点击"Add volume group".

创建VG后截图如下.

4.3.3、创建LV

选择VG名称,点击change,开始创建LV,如下创建7个LV,分别指定大小,类型选择“block”,用于Oracle共享磁盘.

依次按如上操作,添加剩下6块磁盘.

4.4、创建TARGET

点击"ADD"创建存储的target,用于客户端发现存储,点击update更新target信息.

4.5、LUN映射

在LUN Mapping中点击map,将lun映射到iscsi target中.

lun映射完毕截图如下.

4.6、创建网络访问

要使客户端可以连接iscsi存储,需要在网络访问配置,添加客户端IP地址.

说明:此处将rac两个节点用于与存储服务器连接的IP填写进去.

4.7、调整网段限制

调整网段限制允许客户端访问lun,将客户端的access设置为allow,然后点击"update",不然客户端无法发现存储.

4.8、客户端映射连接

注意:两个节点均执行.

检查是否有iscsi initiator

[root@cbdps01 ~]# rpm -qa | grep iscsi

iscsi-initiator-utils-iscsiuio-6.2.0.874-19.el7.x86_64

iscsi-initiator-utils-6.2.0.874-19.el7.x86_64

libiscsi-1.9.0-7.el7.x86_64

 

[root@cbdps02 ~]# rpm -qa | grep iscsi

iscsi-initiator-utils-iscsiuio-6.2.0.874-19.el7.x86_64

iscsi-initiator-utils-6.2.0.874-19.el7.x86_64

libiscsi-1.9.0-7.el7.x86_64

 

说明:若没有则进行安装,安装指令如下,

# yum install -y iscsi-initiator-utils*

 

启动iscsi并添加到系统服务

[root@cbdps01 ~]# lsblk

NAME          MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT

sda             8:0    0  100G  0 disk

├─sda1          8:1    0    1G  0 part /boot

└─sda2          8:2    0   99G  0 part

  ├─rhel-root 253:0    0   91G  0 lvm  /

  └─rhel-swap 253:1    0    8G  0 lvm  [SWAP]

sr0            11:0    1  4.2G  0 rom  /mnt

 

[root@cbdps01~]# iscsiadm -m discovery -t st -p 192.168.133.201

192.168.133.201:3260,1 iqn.2006-01.com.openfiler:tsn.8380820283ea

 

说明:192.168.133.201 为openfiler存储服务器的IP.

 

[root@cbdps01~]# iscsiadm -m node -T iqn.2006-01.com.openfiler:tsn.8380820283ea -p 192.168.133.201 -l

Logging in to [iface: default, target: iqn.2006-01.com.openfiler:tsn.8380820283ea, portal: 192.168.133.201,3260] (multiple)

Login to [iface: default, target: iqn.2006-01.com.openfiler:tsn.8380820283ea, portal: 192.168.133.201,3260] successful.

 

[root@cbdps01 ~]# lsblk -p

NAME                      MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT

/dev/sda                    8:0    0  100G  0 disk

├─/dev/sda1                 8:1    0    1G  0 part /boot

└─/dev/sda2                 8:2    0   99G  0 part

  ├─/dev/mapper/rhel-root 253:0    0   91G  0 lvm  /

  └─/dev/mapper/rhel-swap 253:1    0    8G  0 lvm  [SWAP]

/dev/sdb                    8:16   0   40G  0 disk

/dev/sdc                    8:32   0  9.1G  0 disk

/dev/sdd                    8:48   0  9.1G  0 disk

/dev/sde                    8:64   0  9.1G  0 disk

/dev/sdf                    8:80   0  9.1G  0 disk

/dev/sdg                    8:96   0  9.1G  0 disk

/dev/sdh                    8:112  0  9.1G  0 disk

/dev/sr0                   11:0    1  4.2G  0 rom  /mnt

 

说明:

a、 红颜色高亮部分为新挂载的共享存储;

b、 iscsi服务没有开启,理论上iscsi与iscsid服务都应该正常开启.

 

[root@cbdps01 ~]# systemctl status iscsi

● iscsi.service - Login and scanning of iSCSI devices

   Loaded: loaded (/usr/lib/systemd/system/iscsi.service; enabled; vendor preset: disabled)

   Active: inactive (dead)

Condition: start condition failed at Sun 2025-03-09 17:55:42 CST; 2h 33min ago

     Docs: man:iscsiadm(8)

           man:iscsid(8)

 

Mar 09 17:55:43 cjtpssb01 systemd[1]: Unit iscsi.service cannot be reloaded because it is inactive.

Mar 09 17:55:43 cjtpssb01 systemd[1]: Unit iscsi.service cannot be reloaded because it is inactive.

 

[root@cbdps01 ~]# systemctl status iscsid

● iscsid.service - Open-iSCSI

   Loaded: loaded (/usr/lib/systemd/system/iscsid.service; disabled; vendor preset: disabled)

   Active: active (running) since Sun 2025-03-09 20:26:16 CST; 3min 7s ago

     Docs: man:iscsid(8)

           man:iscsiuio(8)

           man:iscsiadm(8)

 Main PID: 9819 (iscsid)

   Status: "Ready to process requests"

   CGroup: /system.slice/iscsid.service

           └─9819 /sbin/iscsid -f

 

Mar 09 20:26:16 cbdps01 systemd[1]: Starting Open-iSCSI...

Mar 09 20:26:16 cbdps01 systemd[1]: Started Open-iSCSI.

Mar 09 20:27:53 cbdps01 iscsid[9819]: iscsid: Could not set session1 priority. READ/WRITE throughout and latency could be affected.

Mar 09 20:27:53 cbdps01 iscsid[9819]: iscsid: Connection1:0 to [target: iqn.2006-01.com.openfiler:tsn.8380820283ea, portal: 192.168.133.201,3260] through [iface: default] is operational now

 

[root@cbdps01 ~]# systemctl is-enabled iscsid

disabled

[root@cbdps01 ~]# systemctl is-enabled iscsi

enabled

[root@cbdps01 ~]# systemctl enable iscsid 

 

4.9、multipath与udev绑盘

注意:4.9.1-4.9.5两个节点执行.

4.9.1、安装multipath

[root@cbdps01 ~]# yum -y install device-mapper*

[root@cbdps01 ~]# mpathconf --enable --with_multipathd y

[root@cbdps01 ~]# systemctl status multipathd

● multipathd.service - Device-Mapper Multipath Device Controller

   Loaded: loaded (/usr/lib/systemd/system/multipathd.service; enabled; vendor preset: enabled)

   Active: active (running) since Sun 2025-03-09 20:33:28 CST; 9s ago

  Process: 10027 ExecStart=/sbin/multipathd (code=exited, status=0/SUCCESS)

  Process: 10024 ExecStartPre=/sbin/multipath -A (code=exited, status=0/SUCCESS)

  Process: 10019 ExecStartPre=/sbin/modprobe dm-multipath (code=exited, status=0/SUCCESS)

 Main PID: 10030 (multipathd)

   CGroup: /system.slice/multipathd.service

           └─10030 /sbin/multipathd

 

Mar 09 20:33:28 cbdps01 systemd[1]: Starting Device-Mapper Multipath Device Controller...

Mar 09 20:33:28 cbdps01 systemd[1]: Started Device-Mapper Multipath Device Controller.

Mar 09 20:33:28 cbdps01 multipathd[10030]: path checkers start up

 

[root@cbdps01 ~]# systemctl is-enabled multipathd

enabled

 

4.9.2、查看共享盘的uuid

[root@cbdps01 ~]# /usr/lib/udev/scsi_id -g -u /dev/sdb

14f504e46494c45526572636a566d2d797670472d316c5654

[root@cbdps01 ~]# /usr/lib/udev/scsi_id -g -u /dev/sdc

14f504e46494c4552344366577a4c2d417932672d32533554

[root@cbdps01 ~]# /usr/lib/udev/scsi_id -g -u /dev/sdd

14f504e46494c4552766e554e65362d5469554d2d4b6e4b66

[root@cbdps01 ~]# /usr/lib/udev/scsi_id -g -u /dev/sde

14f504e46494c45527a31674e43472d59596d522d526b676d

[root@cbdps01 ~]# /usr/lib/udev/scsi_id -g -u /dev/sdf

14f504e46494c4552626f635a757a2d414447542d4d354170

[root@cbdps01 ~]# /usr/lib/udev/scsi_id -g -u /dev/sdg

14f504e46494c45526d4e3649664a2d6c4752792d4471766f

[root@cbdps01 ~]# /usr/lib/udev/scsi_id -g -u /dev/sdh

14f504e46494c45524d43396b57742d6a3567412d55793037

 

4.9.3、配置multipath

wwid值为上面获取的值,alias可自定义,这里配置1块OCR盘,4块DATA盘,2块FRA盘.

[root@cbdps01 ~]# cat <<EOF>/etc/multipath.conf

defaults {

    user_friendly_names yes

}

 

blacklist {

  devnode "^sda"

}

 

multipaths {

  multipath {

  wwid "14f504e46494c45526572636a566d2d797670472d316c5654"

  alias vote

  }

  multipath {

  wwid "14f504e46494c4552344366577a4c2d417932672d32533554"

  alias data01

  }

  multipath {

  wwid "14f504e46494c4552766e554e65362d5469554d2d4b6e4b66"

  alias data02

  }

  multipath {

  wwid "14f504e46494c45527a31674e43472d59596d522d526b676d"

  alias data03

  }

  multipath {

  wwid "14f504e46494c4552626f635a757a2d414447542d4d354170"

  alias data04

  }

  multipath {

  wwid "14f504e46494c45526d4e3649664a2d6c4752792d4471766f"

  alias fra01

  }

  multipath {

  wwid "14f504e46494c45524d43396b57742d6a3567412d55793037"

  alias fra02

  }

}

EOF

 

4.9.4、激活multipath多路径

[root@cbdps01 ~]# multipath -F

[root@cbdps01 ~]# multipath -v2

create: vote (14f504e46494c45526572636a566d2d797670472d316c5654) undef OPNFILER,VIRTUAL-DISK   

size=40G features='0' hwhandler='0' wp=undef

`-+- policy='service-time 0' prio=1 status=undef

  `- 33:0:0:0 sdb 8:16  undef ready running

create: data01 (14f504e46494c4552344366577a4c2d417932672d32533554) undef OPNFILER,VIRTUAL-DISK   

size=9.1G features='0' hwhandler='0' wp=undef

`-+- policy='service-time 0' prio=1 status=undef

  `- 33:0:0:1 sdc 8:32  undef ready running

create: data02 (14f504e46494c4552766e554e65362d5469554d2d4b6e4b66) undef OPNFILER,VIRTUAL-DISK   

size=9.1G features='0' hwhandler='0' wp=undef

`-+- policy='service-time 0' prio=1 status=undef

  `- 33:0:0:2 sdd 8:48  undef ready running

create: data03 (14f504e46494c45527a31674e43472d59596d522d526b676d) undef OPNFILER,VIRTUAL-DISK   

size=9.1G features='0' hwhandler='0' wp=undef

`-+- policy='service-time 0' prio=1 status=undef

  `- 33:0:0:3 sde 8:64  undef ready running

create: data04 (14f504e46494c4552626f635a757a2d414447542d4d354170) undef OPNFILER,VIRTUAL-DISK   

size=9.1G features='0' hwhandler='0' wp=undef

`-+- policy='service-time 0' prio=1 status=undef

  `- 33:0:0:4 sdf 8:80  undef ready running

create: fra01 (14f504e46494c45526d4e3649664a2d6c4752792d4471766f) undef OPNFILER,VIRTUAL-DISK   

size=9.1G features='0' hwhandler='0' wp=undef

`-+- policy='service-time 0' prio=1 status=undef

  `- 33:0:0:5 sdg 8:96  undef ready running

create: fra02 (14f504e46494c45524d43396b57742d6a3567412d55793037) undef OPNFILER,VIRTUAL-DISK   

size=9.1G features='0' hwhandler='0' wp=undef

`-+- policy='service-time 0' prio=1 status=undef

  `- 33:0:0:6 sdh 8:112 undef ready running

[root@cbdps01 ~]# multipath -ll

data01 (14f504e46494c4552344366577a4c2d417932672d32533554) dm-3 OPNFILER,VIRTUAL-DISK   

size=9.1G features='0' hwhandler='0' wp=rw

`-+- policy='service-time 0' prio=1 status=active

  `- 33:0:0:1 sdc 8:32  active ready running

fra02 (14f504e46494c45524d43396b57742d6a3567412d55793037) dm-8 OPNFILER,VIRTUAL-DISK   

size=9.1G features='0' hwhandler='0' wp=rw

`-+- policy='service-time 0' prio=1 status=active

  `- 33:0:0:6 sdh 8:112 active ready running

fra01 (14f504e46494c45526d4e3649664a2d6c4752792d4471766f) dm-7 OPNFILER,VIRTUAL-DISK   

size=9.1G features='0' hwhandler='0' wp=rw

`-+- policy='service-time 0' prio=1 status=active

  `- 33:0:0:5 sdg 8:96  active ready running

data04 (14f504e46494c4552626f635a757a2d414447542d4d354170) dm-6 OPNFILER,VIRTUAL-DISK    

size=9.1G features='0' hwhandler='0' wp=rw

`-+- policy='service-time 0' prio=1 status=active

  `- 33:0:0:4 sdf 8:80  active ready running

data03 (14f504e46494c45527a31674e43472d59596d522d526b676d) dm-5 OPNFILER,VIRTUAL-DISK   

size=9.1G features='0' hwhandler='0' wp=rw

`-+- policy='service-time 0' prio=1 status=active

  `- 33:0:0:3 sde 8:64  active ready running

data02 (14f504e46494c4552766e554e65362d5469554d2d4b6e4b66) dm-4 OPNFILER,VIRTUAL-DISK   

size=9.1G features='0' hwhandler='0' wp=rw

`-+- policy='service-time 0' prio=1 status=active

  `- 33:0:0:2 sdd 8:48  active ready running

vote (14f504e46494c45526572636a566d2d797670472d316c5654) dm-2 OPNFILER,VIRTUAL-DISK   

size=40G features='0' hwhandler='0' wp=rw

`-+- policy='service-time 0' prio=1 status=active

  `- 33:0:0:0 sdb 8:16  active ready running

 

[root@cbdps01 ~]# lsblk -p

NAME                      MAJ:MIN RM  SIZE RO TYPE  MOUNTPOINT

/dev/sda                    8:0    0  100G  0 disk 

├─/dev/sda1                 8:1    0    1G  0 part  /boot

└─/dev/sda2                 8:2    0   99G  0 part 

  ├─/dev/mapper/rhel-root 253:0    0   91G  0 lvm   /

  └─/dev/mapper/rhel-swap 253:1    0    8G  0 lvm   [SWAP]

/dev/sdb                    8:16   0   40G  0 disk 

└─/dev/mapper/vote        253:2    0   40G  0 mpath

/dev/sdc                    8:32   0  9.1G  0 disk 

└─/dev/mapper/data01      253:3    0  9.1G  0 mpath

/dev/sdd                    8:48   0  9.1G  0 disk 

└─/dev/mapper/data02      253:4    0  9.1G  0 mpath

/dev/sde                    8:64   0  9.1G  0 disk 

└─/dev/mapper/data03      253:5    0  9.1G  0 mpath

/dev/sdf                    8:80   0  9.1G  0 disk 

└─/dev/mapper/data04      253:6    0  9.1G  0 mpath

/dev/sdg                    8:96   0  9.1G  0 disk 

└─/dev/mapper/fra01       253:7    0  9.1G  0 mpath

/dev/sdh                    8:112  0  9.1G  0 disk 

└─/dev/mapper/fra02       253:8    0  9.1G  0 mpath

/dev/sr0                   11:0    1  4.2G  0 rom   /mnt

 

[root@cbdps01 ~]# blkid

/dev/sda1: UUID="6c482088-cda9-4ccd-90ed-15f6a1eafb86" TYPE="xfs"

/dev/sda2: UUID="w7RjSJ-Ij88-wIu3-Nmpm-8h2S-ZlSV-dPsvhg" TYPE="LVM2_member"

/dev/sr0: UUID="2020-09-17-19-35-15-00" LABEL="RHEL-7.9 Server.x86_64" TYPE="iso9660" PTTYPE="dos"

/dev/mapper/rhel-root: UUID="c9559550-affa-419c-90b4-325949cdb029" TYPE="xfs"

/dev/mapper/rhel-swap: UUID="c0c998d2-24af-451a-9173-ffb87c112d9e" TYPE="swap"

 

4.9.5、udev绑盘

# vi /etc/udev/rules.d/99-oracle-asmdevices.rules

ENV{DM_NAME}=="vote", OWNER:="grid", GROUP:="asmadmin", MODE:="0660", SYMLINK+="asm-vote"

ENV{DM_NAME}=="data01", OWNER:="grid", GROUP:="asmadmin", MODE:="0660", SYMLINK+="asm-data01"

ENV{DM_NAME}=="data02", OWNER:="grid", GROUP:="asmadmin", MODE:="0660", SYMLINK+="asm-data02"

ENV{DM_NAME}=="data03", OWNER:="grid", GROUP:="asmadmin", MODE:="0660", SYMLINK+="asm-data03"

ENV{DM_NAME}=="data04", OWNER:="grid", GROUP:="asmadmin", MODE:="0660", SYMLINK+="asm-data04"

ENV{DM_NAME}=="fra01", OWNER:="grid", GROUP:="asmadmin", MODE:="0660", SYMLINK+="asm-fra01"

ENV{DM_NAME}=="fra02", OWNER:="grid", GROUP:="asmadmin", MODE:="0660", SYMLINK+="asm-fra02"

 

刷新udev策略:

[root@cbdps01 ~]# /sbin/udevadm control --reload-rules

[root@cbdps01 ~]# /sbin/udevadm trigger --type=devices --action=change

 

5、安装介质处理

5.1、上传安装介质

注意:此步骤仅在节点1执行.

创建单独的安装文件目录

# mkdir -p /u01/setup/{db,grid}

 

--上传文件

sftp> lcd F:\installmedium\12c

sftp> cd /u01/setup/grid

sftp> put linuxx64_12201_grid_home.zip

sftp> cd /u01/setup/db

sftp> put linuxx64_12201_database.zip

 

5.2、上传补丁包

注意:此步骤在两个节点执行.

[root@cbdps01 ~]# mkdir -p /u01/setup/{RU,OJVM,oneoff_patch,OPatch}

sftp> cd /u01/setup/OPatch

sftp> lcd F:\installmedium\12c\OPatch\12.2.0.1.41

sftp> put p6880880_122010_Linux-x86-64.zip

sftp> cd /u01/setup/RU

sftp> lcd F:\installmedium\12c\RU

sftp> put p33583921_122010_Linux-x86-64.zip

sftp> cd /u01/setup/OJVM

sftp> put p33561275_122010_Linux-x86-64.zip

sftp> cd /u01/setup/oneoff_patch/

sftp> put p24921392_12201230117DBJAN2023RU_Linux-x86-64.zip

sftp> put p26878028_12201240416DBAPR2024RU_Linux-x86-64.zip

sftp> put p27873364_121020_Linux-x86-64.zip

sftp> put p27882764_122010_Linux-x86-64.zip

sftp> put p30666479_12201220118DBJAN2022RU_Linux-x86-64.zip

 

5.3、解压安装介质

注意:此步骤仅在节点1执行,12C R2的grid和之前版本不同,压缩包本身是软件的一部分,安装时需直接解压到Grid Home下.

 

grid安装包

# chown -R grid:oinstall /u01/setup/grid

# su - grid -c "unzip -q /u01/setup/grid/linuxx64_12201_grid_home.zip -d /u01/app/12.2.0.1/grid/"

 

oracle安装包

# chown -R oracle:oinstall /u01/setup/db

# su - oracle -c "unzip -q /u01/setup/db/linuxx64_12201_database.zip -d /u01/setup/db"

 

5.4、解压补丁包

注意:此处仅节点1升级Grid 用户的OPatch补丁包.

# chown -R grid:oinstall /u01/setup/OPatch

# su - grid -c "unzip -q -o /u01/setup/OPatch/p6880880_122010_Linux-x86-64.zip -d /u01/app/12.2.0.1/grid"

 

RU补丁包

# chown -R grid:oinstall /u01/setup/RU

# su - grid -c "unzip -q /u01/setup/RU/p33583921_122010_Linux-x86-64.zip -d /u01/setup/RU"

 

查看补丁版本

[grid@cbdps01~]$ opatch version

OPatch Version: 12.2.0.1.41

 

OPatch succeeded.

 

特别说明:GI安装完成后,节点2的Grid用户Opatch版本自动升级为12.2.0.1.41,如下所示.

[grid@cbdps02 ~]$ opatch version

OPatch Version: 12.2.0.1.41

 

OPatch succeeded.

 

5.5、cvuqdisk安装

--安装cvuqdisk依赖包

[root@cbdps01 ~]# cd /u01/app/12.2.0.1/grid/cv/rpm

[root@cbdps01 rpm]# CVUQDISK_GRP=oinstall; export CVUQDISK_GRP

[root@cbdps01 rpm]# rpm -ivh cvuqdisk-1.0.10-1.rpm

Preparing...                ########################################### [100%]

   1:cvuqdisk               ########################################### [100%]

[root@cbdps01 rpm]# scp cvuqdisk-1.0.10-1.rpm cbdps02:/tmp

[root@cbdps02 ~]# CVUQDISK_GRP=oinstall; export CVUQDISK_GRP

[root@cbdps02 ~]# rpm -ivh /tmp/cvuqdisk-1.0.10-1.rpm

 

6、rac节点互信

注意:仅在节点1执行.

6.1、互信配置

[root@cbdps01 ~]# su - grid

[grid@cbdps01 ~]$ cd $ORACLE_HOME/oui/prov/resources/scripts

[grid@cbdps01 scripts]$ ./sshUserSetup.sh -user grid -hosts "cbdps01 cbdps02" -advanced -noPromptPassphrase

[grid@cbdps01 scripts]$ ./sshUserSetup.sh -user oracle -hosts "cbdps01 cbdps02" -advanced -noPromptPassphrase

 

注意:此处用grid或root用户run互信脚本均可以.

 

6.2、互信测试

说明:两个节点均需测试.

grid用户

[grid@cbdps01 ~]$ ssh cbdps01 date

Sun Mar  9 20:58:08 CST 2025

[grid@cbdps01 ~]$ ssh cbdps02 date

Sun Mar  9 20:58:12 CST 2025

[grid@cbdps01 ~]$ ssh cbdps02-priv date

The authenticity of host 'cbdps02-priv (192.168.78.175)' can't be established.

ECDSA key fingerprint is SHA256:MMev6BuaVKRwTUtB8bC7znz9ViYR41gCEezPAHY7izk.

ECDSA key fingerprint is MD5:83:e9:2f:88:0a:e4:6c:90:cc:31:2e:10:89:35:08:4d.

Are you sure you want to continue connecting (yes/no)? yes

Warning: Permanently added 'cbdps02-priv,192.168.78.175' (ECDSA) to the list of known hosts.

Sun Mar  9 20:58:17 CST 2025

[grid@cbdps01 ~]$ ssh cbdps02-priv date

Sun Mar  9 20:58:19 CST 2025

[grid@cbdps01 ~]$ ssh cbdps01-priv date

The authenticity of host 'cbdps01-priv (192.168.78.245)' can't be established.

ECDSA key fingerprint is SHA256:MMev6BuaVKRwTUtB8bC7znz9ViYR41gCEezPAHY7izk.

ECDSA key fingerprint is MD5:83:e9:2f:88:0a:e4:6c:90:cc:31:2e:10:89:35:08:4d.

Are you sure you want to continue connecting (yes/no)? yes

Warning: Permanently added 'cbdps01-priv,192.168.78.245' (ECDSA) to the list of known hosts.

Sun Mar  9 20:58:22 CST 2025

[grid@cbdps01 ~]$ ssh cbdps01-priv date

Sun Mar  9 20:58:23 CST 2025

 

oracle用户

[oracle@cbdps01 ~]$ ssh cbdps01 date

Sun Mar  9 20:58:44 CST 2025

[oracle@cbdps01 ~]$ ssh cbdps02 date

Sun Mar  9 20:58:47 CST 2025

[oracle@cbdps01 ~]$ ssh cbdps02-priv date

The authenticity of host 'cbdps02-priv (192.168.78.175)' can't be established.

ECDSA key fingerprint is SHA256:MMev6BuaVKRwTUtB8bC7znz9ViYR41gCEezPAHY7izk.

ECDSA key fingerprint is MD5:83:e9:2f:88:0a:e4:6c:90:cc:31:2e:10:89:35:08:4d.

Are you sure you want to continue connecting (yes/no)? yes

Warning: Permanently added 'cbdps02-priv,192.168.78.175' (ECDSA) to the list of known hosts.

Sun Mar  9 20:58:51 CST 2025

[oracle@cbdps01 ~]$ ssh cbdps02-priv date

Sun Mar  9 20:58:52 CST 2025

[oracle@cbdps01 ~]$ ssh cbdps01-priv date

The authenticity of host 'cbdps01-priv (192.168.78.245)' can't be established.

ECDSA key fingerprint is SHA256:MMev6BuaVKRwTUtB8bC7znz9ViYR41gCEezPAHY7izk.

ECDSA key fingerprint is MD5:83:e9:2f:88:0a:e4:6c:90:cc:31:2e:10:89:35:08:4d.

Are you sure you want to continue connecting (yes/no)? yes

Warning: Permanently added 'cbdps01-priv,192.168.78.245' (ECDSA) to the list of known hosts.

Sun Mar  9 20:58:57 CST 2025

[oracle@cbdps01 ~]$ ssh cbdps01-priv date

Sun Mar  9 20:58:58 CST 2025

 

注意:需要达到不输入yes.

7、检查 selinux、IO调度器、透明大页、大页、numa

注意系统重启后,检查selinux等设置是否生效.

以下为重启后数据.

[root@cbdps02 ~]# getenforce

Disabled

[root@cbdps01 ~]# getenforce

Disabled

 

[root@cbdps02 ~]# cat /sys/block/sdb/queue/scheduler

noop [deadline] cfq

[root@cbdps02 ~]# cat /sys/block/sdc/queue/scheduler

noop [deadline] cfq

[root@cbdps02 ~]# cat /sys/block/sdd/queue/scheduler

noop [deadline] cfq

[root@cbdps02 ~]# cat /sys/block/sde/queue/scheduler

noop [deadline] cfq

[root@cbdps02 ~]# cat /sys/block/sdf/queue/scheduler

noop [deadline] cfq

[root@cbdps02 ~]# cat /sys/block/sdg/queue/scheduler

noop [deadline] cfq

[root@cbdps02 ~]# cat /sys/block/sdh/queue/scheduler

noop [deadline] cfq

 

[root@cbdps01 ~]# cat /sys/kernel/mm/transparent_hugepage/enabled

always madvise [never]

[root@cbdps02 ~]# cat /sys/kernel/mm/transparent_hugepage/enabled

always madvise [never]

 

检查大页数据

[root@cbdps01~]# cat /proc/meminfo | grep HugePages

AnonHugePages:         0 kB

HugePages_Total:    2800

HugePages_Free:     2800

HugePages_Rsvd:        0

HugePages_Surp:        0

 

[root@cbdps02 ~]# cat /proc/meminfo | grep HugePages

AnonHugePages:         0 kB

HugePages_Total:    2800

HugePages_Free:     2800

HugePages_Rsvd:        0

HugePages_Surp:        0

 

[root@cbdps01 ~]# ifconfig | grep -i mtu | grep -i lo

lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 16436

 

[root@cbdps01 ~]# cat /proc/cmdline

BOOT_IMAGE=/vmlinuz-3.10.0-1160.el7.x86_64 root=/dev/mapper/rhel-root ro crashkernel=auto spectre_v2=retpoline rd.lvm.lv=rhel/root rd.lvm.lv=rhel/swap rhgb quiet transparent_hugepage=never numa=off

 

[root@cbdps02 ~]# cat /proc/cmdline

BOOT_IMAGE=/vmlinuz-3.10.0-1160.el7.x86_64 root=/dev/mapper/rhel-root ro crashkernel=auto spectre_v2=retpoline rd.lvm.lv=rhel/root rd.lvm.lv=rhel/swap rhgb quiet transparent_hugepage=never numa=off

 

8、图形界面安装Grid

说明:安装前建议重启一下系统,图形界面安装GI只在节点1执行.

[grid@cbdps01 ~]$ cd $ORACLE_HOME

[grid@cbdps01 grid]$ export DISPLAY=192.168.133.1:0.0

[grid@cbdps01 grid]$ ./gridSetup.sh -applyPSU /u01/setup/RU/33583921

 

说明:

a、虽然此处仅在节点一升级33583921 RU,待GI安装成功后,节点二也会相应的打上33583921 RU.

b、提前打RU防止出现以下两个报错.

安装前检查时报错

1)PRVG-0802 : Storage type for path "/dev/asm-vote01" could not be determined on node "cjtpssb01". PRVG-0801 : invalid internal command tags- Cause:An error occurred while attempting to determine the storage type of the indicated path. Accompanying messages provide further details.- Action:Resolve the issues described in any accompanying messages and retry.

PRVG-0802 : Storage type for path "/dev/asm-vote01" could not be determined on node "cjtpssb01". PRVG-0801 : invalid internal command tags- Cause:An error occurred while attempting to determine the storage type of the indicated path. Accompanying messages provide further details.- Action:Resolve the issues described in any accompanying messages and retry.

 

执行root.sh脚本时报错

2)The command '/u01/app/12.2.0.1/grid/perl/bin/perl -I/u01/app/12.2.0.1/grid/perl/lib -I/u01/app/12.2.0.1/grid/crs/install /u01/app/12.2.0.1/grid/crs/install/rootcrs.pl ' execution failed

 

Preparing the home to patch...

Applying the patch /u01/setup/RU/33583921/33116894/...

Successfully applied the patch.

The log can be found at: /tmp/GridSetupActions2025-02-22_03-16-13PM/installerPatchActions_2025-02-22_03-16-13PM.log

Launching Oracle Grid Infrastructure Setup Wizard...

 

选择集群安装

添加节点2 public hostname和virtual hostname后,点击"SSH connectivity",输入 grid 用户密码,创建用户时两节点密码必须保持一致.先执行setup,再执行test,开始互信.

点击"Setup"后出现如下界面.

点击"Test"后出现如下界面.

确保对应网卡和IP网段对应即可,12C心跳网段默认ASM & Private,用于ASM实例的托管.

创建OCR投票盘,注意:如果选择"High"类型,至少需要5块磁盘.

说明:由于此次安装有配置DNS解析,所以此前出现的以下图片告警未在本次安装中出现.

节点一执行:

# /u01/app/oraInventory/orainstRoot.sh

节点二执行:

# /u01/app/oraInventory/orainstRoot.sh

 

节点一执行:

# /u01/app/12.2.0.1/grid/root.sh

节点二执行:

# /u01/app/12.2.0.1/grid/root.sh

 

注意:执行完root.sh脚本后,/u01的权限变更为如下.

drwxr-xr-x.   4 root oinstall  4096 Feb  5 11:00 u01

 

未执行root.s脚本之前的属性.

drwxrwxr-x.   3 grid oinstall  4096 Feb  5 10:28 u01

 

说明:查某核心生产库4个节点后,/u01确实为root:oinstall属性,判断在执行root.sh后,目录属性确实会发生改变,因为后面在打补丁时遇到权限问题导致总是失败,所以再此特别强调该目录.

执行日志记录

节点1

[root@cbdps01 ~]# /u01/app/oraInventory/orainstRoot.sh

Changing permissions of /u01/app/oraInventory.

Adding read,write permissions for group.

Removing read,write,execute permissions for world.

 

Changing groupname of /u01/app/oraInventory to oinstall.

The execution of the script is complete.

[root@cbdps01 ~]# /u01/app/12.2.0.1/grid/root.sh

Performing root user operation.

 

The following environment variables are set as:

    ORACLE_OWNER= grid

    ORACLE_HOME=  /u01/app/12.2.0.1/grid

 

Enter the full pathname of the local bin directory: [/usr/local/bin]:

   Copying dbhome to /usr/local/bin ...

   Copying oraenv to /usr/local/bin ...

   Copying coraenv to /usr/local/bin ...

 

 

Creating /etc/oratab file...

Entries will be added to the /etc/oratab file as needed by

Database Configuration Assistant when a database is created

Finished running generic part of root script.

Now product-specific root actions will be performed.

Relinking oracle with rac_on option

Using configuration parameter file: /u01/app/12.2.0.1/grid/crs/install/crsconfig_params

The log of current session can be found at:

  /u01/app/grid/crsdata/cbdps01/crsconfig/rootcrs_cbdps01_2025-03-09_09-48-18PM.log

2025/03/09 21:48:21 CLSRSC-594: Executing installation step 1 of 19: 'SetupTFA'.

2025/03/09 21:48:21 CLSRSC-4001: Installing Oracle Trace File Analyzer (TFA) Collector.

2025/03/09 21:49:20 CLSRSC-4002: Successfully installed Oracle Trace File Analyzer (TFA) Collector.

2025/03/09 21:49:20 CLSRSC-594: Executing installation step 2 of 19: 'ValidateEnv'.

2025/03/09 21:49:25 CLSRSC-363: User ignored prerequisites during installation

2025/03/09 21:49:25 CLSRSC-594: Executing installation step 3 of 19: 'CheckFirstNode'.

2025/03/09 21:49:27 CLSRSC-594: Executing installation step 4 of 19: 'GenSiteGUIDs'.

2025/03/09 21:49:28 CLSRSC-594: Executing installation step 5 of 19: 'SaveParamFile'.

2025/03/09 21:49:34 CLSRSC-594: Executing installation step 6 of 19: 'SetupOSD'.

2025/03/09 21:49:52 CLSRSC-594: Executing installation step 7 of 19: 'CheckCRSConfig'.

2025/03/09 21:49:52 CLSRSC-594: Executing installation step 8 of 19: 'SetupLocalGPNP'.

2025/03/09 21:50:15 CLSRSC-594: Executing installation step 9 of 19: 'ConfigOLR'.

2025/03/09 21:50:22 CLSRSC-594: Executing installation step 10 of 19: 'ConfigCHMOS'.

2025/03/09 21:50:22 CLSRSC-594: Executing installation step 11 of 19: 'CreateOHASD'.

2025/03/09 21:50:27 CLSRSC-594: Executing installation step 12 of 19: 'ConfigOHASD'.

2025/03/09 21:50:43 CLSRSC-330: Adding Clusterware entries to file 'oracle-ohasd.service'

2025/03/09 21:51:19 CLSRSC-594: Executing installation step 13 of 19: 'InstallAFD'.

2025/03/09 21:51:24 CLSRSC-594: Executing installation step 14 of 19: 'InstallACFS'.

CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'cbdps01'

CRS-2793: Shutdown of Oracle High Availability Services-managed resources on 'cbdps01' has completed

CRS-4133: Oracle High Availability Services has been stopped.

CRS-4123: Oracle High Availability Services has been started.

2025/03/09 21:52:29 CLSRSC-594: Executing installation step 15 of 19: 'InstallKA'.

2025/03/09 21:52:34 CLSRSC-594: Executing installation step 16 of 19: 'InitConfig'.

CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'cbdps01'

CRS-2793: Shutdown of Oracle High Availability Services-managed resources on 'cbdps01' has completed

CRS-4133: Oracle High Availability Services has been stopped.

CRS-4123: Oracle High Availability Services has been started.

CRS-2672: Attempting to start 'ora.evmd' on 'cbdps01'

CRS-2672: Attempting to start 'ora.mdnsd' on 'cbdps01'

CRS-2676: Start of 'ora.evmd' on 'cbdps01' succeeded

CRS-2676: Start of 'ora.mdnsd' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.gpnpd' on 'cbdps01'

CRS-2676: Start of 'ora.gpnpd' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.cssdmonitor' on 'cbdps01'

CRS-2672: Attempting to start 'ora.gipcd' on 'cbdps01'

CRS-2676: Start of 'ora.cssdmonitor' on 'cbdps01' succeeded

CRS-2676: Start of 'ora.gipcd' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.cssd' on 'cbdps01'

CRS-2672: Attempting to start 'ora.diskmon' on 'cbdps01'

CRS-2676: Start of 'ora.diskmon' on 'cbdps01' succeeded

CRS-2676: Start of 'ora.cssd' on 'cbdps01' succeeded

 

Disk groups created successfully. Check /u01/app/grid/cfgtoollogs/asmca/asmca-250309PM095307.log for details.

 

 

2025/03/09 21:53:47 CLSRSC-482: Running command: '/u01/app/12.2.0.1/grid/bin/ocrconfig -upgrade grid oinstall'

CRS-2672: Attempting to start 'ora.crf' on 'cbdps01'

CRS-2672: Attempting to start 'ora.storage' on 'cbdps01'

CRS-2676: Start of 'ora.storage' on 'cbdps01' succeeded

CRS-2676: Start of 'ora.crf' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.crsd' on 'cbdps01'

CRS-2676: Start of 'ora.crsd' on 'cbdps01' succeeded

CRS-4256: Updating the profile

Successful addition of voting disk c765b4e198e74f22bfdc62827686036a.

Successfully replaced voting disk group with +OCR.

CRS-4256: Updating the profile

CRS-4266: Voting file(s) successfully replaced

##  STATE    File Universal Id                File Name Disk group

--  -----    -----------------                --------- ---------

 1. ONLINE   c765b4e198e74f22bfdc62827686036a (/dev/asm-vote) [OCR]

Located 1 voting disk(s).

CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'cbdps01'

CRS-2673: Attempting to stop 'ora.crsd' on 'cbdps01'

CRS-2677: Stop of 'ora.crsd' on 'cbdps01' succeeded

CRS-2673: Attempting to stop 'ora.storage' on 'cbdps01'

CRS-2673: Attempting to stop 'ora.crf' on 'cbdps01'

CRS-2673: Attempting to stop 'ora.drivers.acfs' on 'cbdps01'

CRS-2673: Attempting to stop 'ora.gpnpd' on 'cbdps01'

CRS-2673: Attempting to stop 'ora.mdnsd' on 'cbdps01'

CRS-2677: Stop of 'ora.drivers.acfs' on 'cbdps01' succeeded

CRS-2677: Stop of 'ora.gpnpd' on 'cbdps01' succeeded

CRS-2677: Stop of 'ora.crf' on 'cbdps01' succeeded

CRS-2677: Stop of 'ora.storage' on 'cbdps01' succeeded

CRS-2673: Attempting to stop 'ora.asm' on 'cbdps01'

CRS-2677: Stop of 'ora.mdnsd' on 'cbdps01' succeeded

CRS-2677: Stop of 'ora.asm' on 'cbdps01' succeeded

CRS-2673: Attempting to stop 'ora.cluster_interconnect.haip' on 'cbdps01'

CRS-2677: Stop of 'ora.cluster_interconnect.haip' on 'cbdps01' succeeded

CRS-2673: Attempting to stop 'ora.ctssd' on 'cbdps01'

CRS-2673: Attempting to stop 'ora.evmd' on 'cbdps01'

CRS-2677: Stop of 'ora.ctssd' on 'cbdps01' succeeded

CRS-2677: Stop of 'ora.evmd' on 'cbdps01' succeeded

CRS-2673: Attempting to stop 'ora.cssd' on 'cbdps01'

CRS-2677: Stop of 'ora.cssd' on 'cbdps01' succeeded

CRS-2673: Attempting to stop 'ora.gipcd' on 'cbdps01'

CRS-2677: Stop of 'ora.gipcd' on 'cbdps01' succeeded

CRS-2793: Shutdown of Oracle High Availability Services-managed resources on 'cbdps01' has completed

CRS-4133: Oracle High Availability Services has been stopped.

2025/03/09 21:55:28 CLSRSC-594: Executing installation step 17 of 19: 'StartCluster'.

CRS-4123: Starting Oracle High Availability Services-managed resources

CRS-2672: Attempting to start 'ora.mdnsd' on 'cbdps01'

CRS-2672: Attempting to start 'ora.evmd' on 'cbdps01'

CRS-2676: Start of 'ora.mdnsd' on 'cbdps01' succeeded

CRS-2676: Start of 'ora.evmd' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.gpnpd' on 'cbdps01'

CRS-2676: Start of 'ora.gpnpd' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.gipcd' on 'cbdps01'

CRS-2676: Start of 'ora.gipcd' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.cssdmonitor' on 'cbdps01'

CRS-2676: Start of 'ora.cssdmonitor' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.cssd' on 'cbdps01'

CRS-2672: Attempting to start 'ora.diskmon' on 'cbdps01'

CRS-2676: Start of 'ora.diskmon' on 'cbdps01' succeeded

CRS-2676: Start of 'ora.cssd' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.cluster_interconnect.haip' on 'cbdps01'

CRS-2672: Attempting to start 'ora.ctssd' on 'cbdps01'

CRS-2676: Start of 'ora.ctssd' on 'cbdps01' succeeded

CRS-2676: Start of 'ora.cluster_interconnect.haip' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.asm' on 'cbdps01'

CRS-2676: Start of 'ora.asm' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.storage' on 'cbdps01'

CRS-2676: Start of 'ora.storage' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.crf' on 'cbdps01'

CRS-2676: Start of 'ora.crf' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.crsd' on 'cbdps01'

CRS-2676: Start of 'ora.crsd' on 'cbdps01' succeeded

CRS-6023: Starting Oracle Cluster Ready Services-managed resources

CRS-6017: Processing resource auto-start for servers: cbdps01

CRS-6016: Resource auto-start has completed for server cbdps01

CRS-6024: Completed start of Oracle Cluster Ready Services-managed resources

CRS-4123: Oracle High Availability Services has been started.

2025/03/09 21:56:57 CLSRSC-343: Successfully started Oracle Clusterware stack

2025/03/09 21:56:57 CLSRSC-594: Executing installation step 18 of 19: 'ConfigNode'.

CRS-2672: Attempting to start 'ora.ASMNET1LSNR_ASM.lsnr' on 'cbdps01'

CRS-2676: Start of 'ora.ASMNET1LSNR_ASM.lsnr' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.asm' on 'cbdps01'

CRS-2676: Start of 'ora.asm' on 'cbdps01' succeeded

CRS-2672: Attempting to start 'ora.OCR.dg' on 'cbdps01'

CRS-2676: Start of 'ora.OCR.dg' on 'cbdps01' succeeded

2025/03/09 21:58:39 CLSRSC-594: Executing installation step 19 of 19: 'PostConfig'.

 

2025/03/09 22:09:00 CLSRSC-325: Configure Oracle Grid Infrastructure for a Cluster ... succeeded

 

特别说明:

1)执行root.sh脚本时可能会出现"CLSRSC-614: failed to get the list of configured diskgroups"报错,详情如下所示:

 

2025/03/07 22:45:29 CLSRSC-594: Executing installation step 18 of 19: 'ConfigNode'.

CRS-2672: Attempting to start 'ora.ASMNET1LSNR_ASM.lsnr' on 'cjtpssb01'

CRS-2676: Start of 'ora.ASMNET1LSNR_ASM.lsnr' on 'cjtpssb01' succeeded

CRS-2672: Attempting to start 'ora.asm' on 'cjtpssb01'

CRS-2676: Start of 'ora.asm' on 'cjtpssb01' succeeded

CRS-2672: Attempting to start 'ora.OCR.dg' on 'cjtpssb01'

CRS-2676: Start of 'ora.OCR.dg' on 'cjtpssb01' succeeded

2025/03/07 22:46:47 CLSRSC-594: Executing installation step 19 of 19: 'PostConfig'.

2025/03/07 22:51:52 CLSRSC-614: failed to get the list of configured diskgroups

Died at /u01/app/12.2.0.1/grid/crs/install/oraasm.pm line 2069.

The command '/u01/app/12.2.0.1/grid/perl/bin/perl -I/u01/app/12.2.0.1/grid/perl/lib -I/u01/app/12.2.0.1/grid/crs/install /u01/app/12.2.0.1/grid/crs/install/rootcrs.pl ' execution failed

 

解决方案:

[root@cbdps01~]# export ORACLE_HOME=/u01/app/12.2.0.1/grid

[root@cbdps01~]# cd /u01/app/12.2.0.1/grid/rdbms/lib

[root@cbdps01lib]# /usr/bin/make -f ins_rdbms.mk client_sharedlib libasmclntsh12.ohso libasmperl12.ohso ORACLE_HOME=$ORACLE_HOME

 

此后重新执行root.sh脚本,参考如下网址.

https://blog.csdn.net/m0_37625564/article/details/124465373

 

2)执行root.sh脚本可能会出现"Failed to start resource ora.ons"报错,详情如下:

2025/03/08 13:45:18 CLSRSC-594: Executing installation step 18 of 19: 'ConfigNode'.

CRS-2672: Attempting to start 'ora.ASMNET1LSNR_ASM.lsnr' on 'cjtpssb01'

CRS-2676: Start of 'ora.ASMNET1LSNR_ASM.lsnr' on 'cjtpssb01' succeeded

CRS-2672: Attempting to start 'ora.asm' on 'cjtpssb01'

CRS-2676: Start of 'ora.asm' on 'cjtpssb01' succeeded

CRS-2672: Attempting to start 'ora.OCR.dg' on 'cjtpssb01'

CRS-2676: Start of 'ora.OCR.dg' on 'cjtpssb01' succeeded

PRCR-1013 : Failed to start resource ora.ons

PRCR-1064 : Failed to start resource ora.ons on node cjtpssb01

CRS-5016: Process "/u01/app/12.2.0.1/grid/opmn/bin/onsctli" spawned by agent "ORAAGENT" for action "start" failed: details at "(:CLSN00010:)" in "/u01/app/grid/diag/crs/cjtpssb01/crs/trace/crsd_oraagent_grid.trc"

CRS-2674: Start of 'ora.ons' on 'cjtpssb01' failed

 

解决方案:

重新执行root.sh脚本可避免.

 

3)执行root.sh脚本可能会出现CRS-2883告警,详情如下:

CRS-2883: Resource 'ora.cssdmonitor' failed during Clusterware stack start.

CRS-4406: Oracle High Availability Services synchronous start failed.

CRS-4000: Command Start failed, or completed with errors.

CRS-4535: Cannot communicate with Cluster Ready Services

CRS-4000: Command Status failed, or completed with errors.

2025/03/07 23:12:51 CLSRSC-117: Failed to start Oracle Clusterware stack

Died at /u01/app/12.2.0.1/grid/crs/install/crsinstall.pm line 1516.

The command '/u01/app/12.2.0.1/grid/perl/bin/perl -I/u01/app/12.2.0.1/grid/perl/lib -I/u01/app/12.2.0.1/grid/crs/install /u01/app/12.2.0.1/grid/crs/install/rootcrs.pl ' execution failed

 

解决方案:

判断为环境不干净导致,重建系统从头开始搭建RAC成功解决.

 

节点2

[root@cbdps02 ~]# /u01/app/oraInventory/orainstRoot.sh

Changing permissions of /u01/app/oraInventory.

Adding read,write permissions for group.

Removing read,write,execute permissions for world.

 

Changing groupname of /u01/app/oraInventory to oinstall.

The execution of the script is complete.

[root@cbdps02 ~]# /u01/app/12.2.0.1/grid/root.sh

Performing root user operation.

 

The following environment variables are set as:

    ORACLE_OWNER= grid

    ORACLE_HOME=  /u01/app/12.2.0.1/grid

 

Enter the full pathname of the local bin directory: [/usr/local/bin]:

   Copying dbhome to /usr/local/bin ...

   Copying oraenv to /usr/local/bin ...

   Copying coraenv to /usr/local/bin ...

 

 

Creating /etc/oratab file...

Entries will be added to the /etc/oratab file as needed by

Database Configuration Assistant when a database is created

Finished running generic part of root script.

Now product-specific root actions will be performed.

Relinking oracle with rac_on option

Using configuration parameter file: /u01/app/12.2.0.1/grid/crs/install/crsconfig_params

The log of current session can be found at:

  /u01/app/grid/crsdata/cbdps02/crsconfig/rootcrs_cbdps02_2025-03-09_10-09-31PM.log

2025/03/09 22:09:34 CLSRSC-594: Executing installation step 1 of 19: 'SetupTFA'.

2025/03/09 22:09:34 CLSRSC-4001: Installing Oracle Trace File Analyzer (TFA) Collector.

2025/03/09 22:10:34 CLSRSC-4002: Successfully installed Oracle Trace File Analyzer (TFA) Collector.

2025/03/09 22:10:34 CLSRSC-594: Executing installation step 2 of 19: 'ValidateEnv'.

2025/03/09 22:10:35 CLSRSC-363: User ignored prerequisites during installation

2025/03/09 22:10:35 CLSRSC-594: Executing installation step 3 of 19: 'CheckFirstNode'.

2025/03/09 22:10:36 CLSRSC-594: Executing installation step 4 of 19: 'GenSiteGUIDs'.

2025/03/09 22:10:36 CLSRSC-594: Executing installation step 5 of 19: 'SaveParamFile'.

2025/03/09 22:10:39 CLSRSC-594: Executing installation step 6 of 19: 'SetupOSD'.

2025/03/09 22:10:58 CLSRSC-594: Executing installation step 7 of 19: 'CheckCRSConfig'.

2025/03/09 22:10:59 CLSRSC-594: Executing installation step 8 of 19: 'SetupLocalGPNP'.

2025/03/09 22:11:01 CLSRSC-594: Executing installation step 9 of 19: 'ConfigOLR'.

2025/03/09 22:11:02 CLSRSC-594: Executing installation step 10 of 19: 'ConfigCHMOS'.

2025/03/09 22:11:02 CLSRSC-594: Executing installation step 11 of 19: 'CreateOHASD'.

2025/03/09 22:11:04 CLSRSC-594: Executing installation step 12 of 19: 'ConfigOHASD'.

2025/03/09 22:11:19 CLSRSC-330: Adding Clusterware entries to file 'oracle-ohasd.service'

2025/03/09 22:11:51 CLSRSC-594: Executing installation step 13 of 19: 'InstallAFD'.

2025/03/09 22:11:53 CLSRSC-594: Executing installation step 14 of 19: 'InstallACFS'.

CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'cbdps02'

CRS-2793: Shutdown of Oracle High Availability Services-managed resources on 'cbdps02' has completed

CRS-4133: Oracle High Availability Services has been stopped.

CRS-4123: Oracle High Availability Services has been started.

2025/03/09 22:12:36 CLSRSC-594: Executing installation step 15 of 19: 'InstallKA'.

2025/03/09 22:12:38 CLSRSC-594: Executing installation step 16 of 19: 'InitConfig'.

CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'cbdps02'

CRS-2793: Shutdown of Oracle High Availability Services-managed resources on 'cbdps02' has completed

CRS-4133: Oracle High Availability Services has been stopped.

CRS-4123: Oracle High Availability Services has been started.

CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'cbdps02'

CRS-2673: Attempting to stop 'ora.drivers.acfs' on 'cbdps02'

CRS-2677: Stop of 'ora.drivers.acfs' on 'cbdps02' succeeded

CRS-2793: Shutdown of Oracle High Availability Services-managed resources on 'cbdps02' has completed

CRS-4133: Oracle High Availability Services has been stopped.

2025/03/09 22:12:46 CLSRSC-594: Executing installation step 17 of 19: 'StartCluster'.

CRS-4123: Starting Oracle High Availability Services-managed resources

CRS-2672: Attempting to start 'ora.mdnsd' on 'cbdps02'

CRS-2672: Attempting to start 'ora.evmd' on 'cbdps02'

CRS-2676: Start of 'ora.evmd' on 'cbdps02' succeeded

CRS-2676: Start of 'ora.mdnsd' on 'cbdps02' succeeded

CRS-2672: Attempting to start 'ora.gpnpd' on 'cbdps02'

CRS-2676: Start of 'ora.gpnpd' on 'cbdps02' succeeded

CRS-2672: Attempting to start 'ora.gipcd' on 'cbdps02'

CRS-2676: Start of 'ora.gipcd' on 'cbdps02' succeeded

CRS-2672: Attempting to start 'ora.cssdmonitor' on 'cbdps02'

CRS-2676: Start of 'ora.cssdmonitor' on 'cbdps02' succeeded

CRS-2672: Attempting to start 'ora.cssd' on 'cbdps02'

CRS-2672: Attempting to start 'ora.diskmon' on 'cbdps02'

CRS-2676: Start of 'ora.diskmon' on 'cbdps02' succeeded

CRS-2676: Start of 'ora.cssd' on 'cbdps02' succeeded

CRS-2672: Attempting to start 'ora.cluster_interconnect.haip' on 'cbdps02'

CRS-2672: Attempting to start 'ora.ctssd' on 'cbdps02'

CRS-2676: Start of 'ora.ctssd' on 'cbdps02' succeeded

CRS-2676: Start of 'ora.cluster_interconnect.haip' on 'cbdps02' succeeded

CRS-2672: Attempting to start 'ora.asm' on 'cbdps02'

CRS-2676: Start of 'ora.asm' on 'cbdps02' succeeded

CRS-2672: Attempting to start 'ora.storage' on 'cbdps02'

CRS-2676: Start of 'ora.storage' on 'cbdps02' succeeded

CRS-2672: Attempting to start 'ora.crf' on 'cbdps02'

CRS-2676: Start of 'ora.crf' on 'cbdps02' succeeded

CRS-2672: Attempting to start 'ora.crsd' on 'cbdps02'

CRS-2676: Start of 'ora.crsd' on 'cbdps02' succeeded

CRS-6017: Processing resource auto-start for servers: cbdps02

CRS-2672: Attempting to start 'ora.net1.network' on 'cbdps02'

CRS-2672: Attempting to start 'ora.ASMNET1LSNR_ASM.lsnr' on 'cbdps02'

CRS-2676: Start of 'ora.net1.network' on 'cbdps02' succeeded

CRS-2672: Attempting to start 'ora.ons' on 'cbdps02'

CRS-2676: Start of 'ora.ASMNET1LSNR_ASM.lsnr' on 'cbdps02' succeeded

CRS-2672: Attempting to start 'ora.asm' on 'cbdps02'

CRS-2676: Start of 'ora.ons' on 'cbdps02' succeeded

CRS-2676: Start of 'ora.asm' on 'cbdps02' succeeded

CRS-6016: Resource auto-start has completed for server cbdps02

CRS-6024: Completed start of Oracle Cluster Ready Services-managed resources

CRS-4123: Oracle High Availability Services has been started.

2025/03/09 22:14:13 CLSRSC-343: Successfully started Oracle Clusterware stack

2025/03/09 22:14:13 CLSRSC-594: Executing installation step 18 of 19: 'ConfigNode'.

2025/03/09 22:14:25 CLSRSC-594: Executing installation step 19 of 19: 'PostConfig'.

2025/03/09 22:14:34 CLSRSC-325: Configure Oracle Grid Infrastructure for a Cluster ... succeeded

[root@cbdps02 ~]#

注意:此次测试未出现INS-20802报错,判断为配置DNS的原因.

[grid@cbdps01 grid]$ crsctl stat res -t

--------------------------------------------------------------------------------

Name           Target  State        Server                   State details      

--------------------------------------------------------------------------------

Local Resources

--------------------------------------------------------------------------------

ora.ASMNET1LSNR_ASM.lsnr

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.LISTENER.lsnr

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.OCR.dg

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.chad

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.net1.network

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.ons

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.proxy_advm

               OFFLINE OFFLINE      cbdps01                  STABLE

               OFFLINE OFFLINE      cbdps02                  STABLE

--------------------------------------------------------------------------------

Cluster Resources

--------------------------------------------------------------------------------

ora.LISTENER_SCAN1.lsnr

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.MGMTLSNR

      1        ONLINE  ONLINE       cbdps01                  169.254.233.175 192.

                                                             168.78.245,STABLE

ora.asm

      1        ONLINE  ONLINE       cbdps01                  Started,STABLE

      2        ONLINE  ONLINE       cbdps02                  Started,STABLE

      3        OFFLINE OFFLINE                               STABLE

ora.cbdps01.vip

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.cbdps02.vip

      1        ONLINE  ONLINE       cbdps02                  STABLE

ora.cvu

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.mgmtdb

      1        ONLINE  ONLINE       cbdps01                  Open,STABLE

ora.qosmserver

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.scan1.vip

      1        ONLINE  ONLINE       cbdps01                  STABLE

--------------------------------------------------------------------------------

 

[grid@cbdps01 grid]$ crsctl stat res -t -init

--------------------------------------------------------------------------------

Name           Target  State        Server                   State details      

--------------------------------------------------------------------------------

Cluster Resources

--------------------------------------------------------------------------------

ora.asm

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.cluster_interconnect.haip

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.crf

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.crsd

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.cssd

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.cssdmonitor

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.ctssd

      1        ONLINE  ONLINE       cbdps01                  ACTIVE:0,STABLE

ora.diskmon

      1        OFFLINE OFFLINE                               STABLE

ora.drivers.acfs

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.evmd

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.gipcd

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.gpnpd

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.mdnsd

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.storage

      1        ONLINE  ONLINE       cbdps01                  STABLE

--------------------------------------------------------------------------------

 

9、测试集群FAILED OVER功能

9.1、测试节点2

节点2重启,查看节点1状态

[grid@cbdps01 grid]$ crsctl stat res -t

--------------------------------------------------------------------------------

Name           Target  State        Server                   State details      

--------------------------------------------------------------------------------

Local Resources

--------------------------------------------------------------------------------

ora.ASMNET1LSNR_ASM.lsnr

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  OFFLINE      cbdps02                  STABLE

ora.LISTENER.lsnr

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  OFFLINE      cbdps02                  STABLE

ora.OCR.dg

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  OFFLINE      cbdps02                  STABLE

ora.chad

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  OFFLINE      cbdps02                  STABLE

ora.net1.network

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  OFFLINE      cbdps02                  STABLE

ora.ons

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  OFFLINE      cbdps02                  STABLE

ora.proxy_advm

               OFFLINE OFFLINE      cbdps01                  STABLE

               OFFLINE OFFLINE      cbdps02                  STABLE

--------------------------------------------------------------------------------

Cluster Resources

--------------------------------------------------------------------------------

ora.LISTENER_SCAN1.lsnr

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.MGMTLSNR

      1        ONLINE  ONLINE       cbdps01                  169.254.233.175 192.

                                                             168.78.245,STABLE

ora.asm

      1        ONLINE  ONLINE       cbdps01                  Started,STABLE

      2        ONLINE  OFFLINE                               STABLE

      3        OFFLINE OFFLINE                               STABLE

ora.cbdps01.vip

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.cbdps02.vip

      1        ONLINE  INTERMEDIATE cbdps01                  FAILED OVER,STABLE

ora.cvu

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.mgmtdb

      1        ONLINE  ONLINE       cbdps01                  Open,STABLE

ora.qosmserver

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.scan1.vip

      1        ONLINE  ONLINE       cbdps01                  STABLE

 

9.2、测试节点1

节点1重启,查看节点2状态

[grid@cbdps02 ~]$ crsctl stat res -t

--------------------------------------------------------------------------------

Name           Target  State        Server                   State details      

--------------------------------------------------------------------------------

Local Resources

--------------------------------------------------------------------------------

ora.ASMNET1LSNR_ASM.lsnr

               ONLINE  OFFLINE      cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.LISTENER.lsnr

               ONLINE  OFFLINE      cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.OCR.dg

               ONLINE  OFFLINE      cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.chad

               ONLINE  OFFLINE      cbdps01                  STABLE

               ONLINE  OFFLINE      cbdps02                  STABLE

ora.net1.network

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.ons

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.proxy_advm

               OFFLINE OFFLINE      cbdps01                  STABLE

               OFFLINE OFFLINE      cbdps02                  STABLE

--------------------------------------------------------------------------------

Cluster Resources

--------------------------------------------------------------------------------

ora.LISTENER_SCAN1.lsnr

      1        ONLINE  OFFLINE                               STABLE

ora.MGMTLSNR

      1        ONLINE  ONLINE       cbdps02                  169.254.132.4 192.16

                                                             8.78.175,STABLE

ora.asm

      1        ONLINE  OFFLINE                               STABLE

      2        ONLINE  ONLINE       cbdps02                  Started,STABLE

      3        OFFLINE OFFLINE                               STABLE

ora.cbdps01.vip

      1        ONLINE  OFFLINE                               STABLE

ora.cbdps02.vip

      1        ONLINE  ONLINE       cbdps02                  STABLE

ora.cvu

      1        ONLINE  OFFLINE                               STABLE

ora.mgmtdb

      1        ONLINE  OFFLINE      cbdps02                  Instance Shutdown,ST

                                                             ARTING

ora.qosmserver

      1        ONLINE  OFFLINE                               STABLE

ora.scan1.vip

      1        ONLINE  OFFLINE                               STABLE

--------------------------------------------------------------------------------

 

10、DB软件安装

[oracle@cbdps01 ~]$ cd /u01/setup/db/database/

[oracle@cbdps01 database]$ export DISPLAY=192.168.133.1:0.0

[oracle@cbdps01 database]$ ./runInstaller

Starting Oracle Universal Installer...

 

Checking Temp space: must be greater than 500 MB.   Actual 54773 MB    Passed

Checking swap space: must be greater than 150 MB.   Actual 8191 MB    Passed

Checking monitor: must be configured to display at least 256 colors.    Actual 16777216    Passed

Preparing to launch Oracle Universal Installer from /tmp/OraInstall2025-03-09_10-58-32PM. Please wait ...[oracle@cbdps01 database]$

依次点击"SSH connectivity",填写oracle密码,然后点击"setup"、"Test",分别出现如下弹框.

注意:因为配置过DNS,所以以下告警未出现(如果检查项中有其它失败项,则不可忽略).

resolv.conf Integrity

Single Client Access Name(SCAN)

[root@cbdps01 ~]# /u01/app/oracle/product/12.2.0.1/db_1/root.sh

Performing root user operation.

 

The following environment variables are set as:

    ORACLE_OWNER= oracle

    ORACLE_HOME=  /u01/app/oracle/product/12.2.0.1/db_1

 

Enter the full pathname of the local bin directory: [/usr/local/bin]:

The contents of "dbhome" have not changed. No need to overwrite.

The contents of "oraenv" have not changed. No need to overwrite.

The contents of "coraenv" have not changed. No need to overwrite.

 

Entries will be added to the /etc/oratab file as needed by

Database Configuration Assistant when a database is created

Finished running generic part of root script.

Now product-specific root actions will be performed.

 

说明:分别在两个节点root用户执行root.sh脚本.

说明:至此DB软件已完成安装.

 

11、配置ASM磁盘

备注:节点1执行,创建DATA磁盘组,冗余选择external(若生产环境选择external,底层存储必须要做成RAID),以后有需求在加入磁盘.

[grid@cbdps01~]$ export DISPLAY=192.168.133.1:0.0

[grid@cbdps01~]$ asmca

[grid@cbdps01~]$ asmcmd

ASMCMD> lsdg

State    Type    Rebal  Sector  Logical_Sector  Block       AU  Total_MB  Free_MB  Req_mir_free_MB  Usable_file_MB  Offline_disks  Voting_files  Name

MOUNTED  EXTERN  N         512             512   4096  4194304     37248    37080                0           37080              0             N  DATA/

MOUNTED  EXTERN  N         512             512   4096  4194304     18624    18480                0           18480              0             N  FRA/

MOUNTED  EXTERN  N         512             512   4096  4194304     40960     6504                0            6504              0             Y  OCR/

ASMCMD>

 

说明:此处可以看到新创建的DATA/FRA磁盘组已经创建完成并成功mounted.

 

12、补丁包升级

12.1、升级前补丁包情况

[oracle@cbdps01~]$ opatch lspatches

There are no Interim patches installed in this Oracle Home "/u01/app/oracle/product/12.2.0.1/db_1".

 

OPatch succeeded.

 

[grid@cbdps01~]$ opatch lspatches

33610989;TOMCAT RELEASE UPDATE 12.2.0.1.0(ID:RELEASE) (33610989)

33678030;OCW JAN 2022 RELEASE UPDATE 12.2.0.1.220118 (33678030)

26839277;DBWLM RELEASE UPDATE 12.2.0.1.0(ID:170913) (26839277)

33116894;ACFS JUL 2021 RELEASE UPDATE 12.2.0.1.210720 (33116894)

33587128;Database Jan 2022 Release Update : 12.2.0.1.220118 (33587128)

 

OPatch succeeded.

 

12.2、补丁包升级

12.2.1、升级OPatch

注意:节点1和节点2均需升级Opatch,其版本需大于等于12.2.0.1.28,此处安装12.2.0.1.41.

 

解压p6880880_122010_Linux-x86-64.zip并替换两个节点的ORACLE_HOME/OPatch和GRID_HOME/OPatch.

查看OPatch版本

[grid@cbdps01 ~]$ opatch version

OPatch Version: 12.2.0.1.41

 

OPatch succeeded.

[oracle@cbdps01 ~]$ opatch version

OPatch Version: 12.2.0.1.6

 

OPatch succeeded.

 

升级oracle软件OPatch

[root@cbdps01 ~]# chown -R oracle:oinstall /u01/setup/OPatch

[root@cbdps01 setup]# su - oracle

[oracle@cbdps01 ~]$ cd /u01/setup/OPatch/

[oracle@cbdps01 OPatch]$ mv $ORACLE_HOME/OPatch $ORACLE_HOME/OPatch.bak

[oracle@cbdps01 OPatch]$ unzip -q -o p6880880_122010_Linux-x86-64.zip -d $ORACLE_HOME

[oracle@cbdps01 OPatch]$ $ORACLE_HOME/OPatch/opatch version

OPatch Version: 12.2.0.1.41

 

OPatch succeeded.

 

注意:节点2 操作同上,完成后再依次往下执行.

 

12.2.2、升级DB RU

DB升级RU(33583921)补丁,root用户执行.

注意

1)若已安装实例,需先将其关闭;

2)GI的RU在安装GI时已完成升级.

 

使用root用户安装DB补丁(集群不用停,若安装有oracle实例,需将停止实例)

[root@cbdps01~]# chown -R oracle:oinstall /u01/setup/RU

12.2.2.1、OPatch冲突检查

[oracle@cbdps01~]$ $ORACLE_HOME/OPatch/opatch prereq CheckConflictAgainstOHWithDetail -phBaseDir /u01/setup/RU/33583921/33587128
复制

[oracle@cbdps01~]$ $ORACLE_HOME/OPatch/opatch prereq CheckConflictAgainstOHWithDetail -phBaseDir /u01/setup/RU/33583921/33678030

 

12.2.2.2、OPatch系统空间检查

[oracle@cbdps01~]$ vi /tmp/patch_list_dbhome.txt

/u01/setup/RU/33583921/33587128

/u01/setup/RU/33583921/33678030

 

[oracle@cbdps01~]$ $ORACLE_HOME/OPatch/opatch prereq CheckSystemSpace -phBaseFile /tmp/patch_list_dbhome.txt

 

12.2.2.3、DB RU升级

[root@cbdps01 ~]# cd /u01/setup/RU

[root@cbdps01 RU]# /u01/app/oracle/product/12.2.0.1/db_1/OPatch/opatchauto apply /u01/setup/RU/33583921/33587128 -oh /u01/app/oracle/product/12.2.0.1/db_1/

[root@cbdps01 RU]# /u01/app/oracle/product/12.2.0.1/db_1/OPatch/opatchauto apply /u01/setup/RU/33583921/33678030 -oh /u01/app/oracle/product/12.2.0.1/db_1/

 

注:分别消耗6/1分钟(消耗6分钟原因为内存较大,所以很快执行完毕),具体时间根据系统配置而定.

 

[root@cbdps01 ~]# free -m

              total        used        free      shared  buff/cache   available

Mem:          11550        8208         733         642        2608        1035

Swap:          8191          11        8180

 

[oracle@cbdps01 ~]$ opatch lspatches

33678030;OCW JAN 2022 RELEASE UPDATE 12.2.0.1.220118 (33678030)

33587128;Database Jan 2022 Release Update : 12.2.0.1.220118 (33587128)

 

OPatch succeeded.

 

说明:节点2 操作同上,完成后再依次往下执行.

 

节点2解压RU操作

[root@cbdps02 setup]# chown -R oracle:oinstall /u01/setup/RU

[root@cbdps02 setup]# su - oracle -c "unzip -q /u01/setup/RU/p33583921_122010_Linux-x86-64.zip -d /u01/setup/RU"

 

12.3、安装OJVM RU

[root@cbdps01 ~]# chown -R oracle:oinstall /u01/setup/OJVM

[root@cbdps01 ~]# su - oracle

[oracle@cbdps01 ~]$ opatch lsinventory

[oracle@cbdps01 ~]$ cd /u01/setup/OJVM

[oracle@cbdps01 OJVM]$ unzip -q p33561275_122010_Linux-x86-64.zip

[oracle@cbdps01 OJVM]$ cd 33561275

[oracle@cbdps01 33561275]$ opatch prereq CheckConflictAgainstOHWithDetail -ph ./
复制

[oracle@cbdps01 33561275]$ $ORACLE_HOME/OPatch/opatch apply -silent

Oracle Interim Patch Installer version 12.2.0.1.41

Copyright (c) 2025, Oracle Corporation.  All rights reserved.

 

PREREQ session

 

Oracle Home       : /u01/app/oracle/product/12.2.0.1/db_1

Central Inventory : /u01/app/oraInventory

   from           : /u01/app/oracle/product/12.2.0.1/db_1/oraInst.loc

OPatch version    : 12.2.0.1.41

OUI version       : 12.2.0.1.4

Log file location : /u01/app/oracle/product/12.2.0.1/db_1/cfgtoollogs/opatch/opatch2025-03-08_20-33-27PM_1.log

 

Invoking prereq "checkconflictagainstohwithdetail"

 

Prereq "checkConflictAgainstOHWithDetail" passed.

 

OPatch succeeded.

[oracle@cbdps0133561275]$ $ORACLE_HOME/OPatch/opatch apply -silent

Oracle Interim Patch Installer version 12.2.0.1.41

Copyright (c) 2025, Oracle Corporation.  All rights reserved.

 

 

Oracle Home       : /u01/app/oracle/product/12.2.0.1/db_1

Central Inventory : /u01/app/oraInventory

   from           : /u01/app/oracle/product/12.2.0.1/db_1/oraInst.loc

OPatch version    : 12.2.0.1.41

OUI version       : 12.2.0.1.4

Log file location : /u01/app/oracle/product/12.2.0.1/db_1/cfgtoollogs/opatch/opatch2025-03-08_20-33-40PM_1.log

 

Verifying environment and performing prerequisite checks...

OPatch continues with these patches:   33561275 

 

Do you want to proceed? [y|n]

Y (auto-answered by -silent)

User Responded with: Y

All checks passed.

 

Please shutdown Oracle instances running out of this ORACLE_HOME on the local system.

(Oracle Home = '/u01/app/oracle/product/12.2.0.1/db_1')

 

 

Is the local system ready for patching? [y|n]

Y (auto-answered by -silent)

User Responded with: Y

Backing up files...

Applying interim patch '33561275' to OH '/u01/app/oracle/product/12.2.0.1/db_1'

 

Patching component oracle.javavm.server, 12.2.0.1.0...

 

Patching component oracle.javavm.server.core, 12.2.0.1.0...

 

Patching component oracle.rdbms.dbscripts, 12.2.0.1.0...

 

Patching component oracle.javavm.client, 12.2.0.1.0...

 

Patching component oracle.rdbms, 12.2.0.1.0...

 

Patching component oracle.dbjava.jdbc, 12.2.0.1.0...

 

Patching component oracle.dbjava.ic, 12.2.0.1.0...

Patch 33561275 successfully applied.

Log file location: /u01/app/oracle/product/12.2.0.1/db_1/cfgtoollogs/opatch/opatch2025-03-08_20-33-40PM_1.log

 

OPatch succeeded.

 

[oracle@cbdps01 33561275]$ opatch lsinventory
复制

[oracle@cbdps01 33561275]$ opatch lspatches

33561275;OJVM RELEASE UPDATE 12.2.0.1.220118 (33561275)

33678030;OCW JAN 2022 RELEASE UPDATE 12.2.0.1.220118 (33678030)

33587128;Database Jan 2022 Release Update : 12.2.0.1.220118 (33587128)

 

OPatch succeeded.

 

说明:节点2 操作同上,完成后再依次往下执行.

 

12.4、安装DB Oneoff补丁

12.4.1、24921392补丁安装

注意:For a RAC environment, shut down all the services (database, ASM, listeners, nodeapps, and CRS daemons) running from the Oracle home of the node you want to patch. After you patch this node, start the services on this node.Repeat this process for each of the other nodes of the Oracle RAC system. OPatch is used on only one node at a time.

[root@cbdps01 setup]# crsctl stop crs

[root@cbdps01 setup]# chown -R oracle:oinstall /u01/setup/oneoff_patch/

[root@cbdps01 setup]# su - oracle

[oracle@cbdps01 ~]$ cd /u01/setup/oneoff_patch/

$ unzip -q p24921392_12201230117DBJAN2023RU_Linux-x86-64.zip

$ cd 24921392

$ opatch prereq CheckConflictAgainstOHWithDetail -ph ./

Oracle Interim Patch Installer version 12.2.0.1.41

Copyright (c) 2025, Oracle Corporation.  All rights reserved.

 

PREREQ session

 

Oracle Home       : /u01/app/oracle/product/12.2.0.1/db_1

Central Inventory : /u01/app/oraInventory

   from           : /u01/app/oracle/product/12.2.0.1/db_1/oraInst.loc

OPatch version    : 12.2.0.1.41

OUI version       : 12.2.0.1.4

Log file location : /u01/app/oracle/product/12.2.0.1/db_1/cfgtoollogs/opatch/opatch2025-02-07_17-50-57PM_1.log

 

Invoking prereq "checkconflictagainstohwithdetail"

 

Prereq "checkConflictAgainstOHWithDetail" failed.

 

The details are:

Interim patch 24921392  requires prerequisite patch(es) [34850184] which are not present in the Oracle Home.

Apply prerequisite patch(es) [34850184] before applying interim patch 24921392.

Summary of Conflict Analysis:

 

There are no patches that can be applied now.

 

Conflicts/Supersets for each patch are:

 

Patch : 24921392

 

        Conflict with 33587128

        Conflict details:

        /u01/app/oracle/product/12.2.0.1/db_1/lib/libgeneric12.a:sskgm.o

注意24921392补丁安装时检查发现存在冲突,暂停安装.

 

12.4.2、26878028补丁安装

[root@cbdps01setup]# su - oracle

[oracle@cbdps01~]$ cd /u01/setup/oneoff_patch/

$ unzip p26878028_12201240416DBAPR2024RU_Linux-x86-64.zip

$ cd 26878028

$ opatch prereq CheckConflictAgainstOHWithDetail -ph ./

Oracle Interim Patch Installer version 12.2.0.1.41

Copyright (c) 2025, Oracle Corporation.  All rights reserved.

 

PREREQ session

 

Oracle Home       : /u01/app/oracle/product/12.2.0.1/db_1

Central Inventory : /u01/app/oraInventory

   from           : /u01/app/oracle/product/12.2.0.1/db_1/oraInst.loc

OPatch version    : 12.2.0.1.41

OUI version       : 12.2.0.1.4

Log file location : /u01/app/oracle/product/12.2.0.1/db_1/cfgtoollogs/opatch/opatch2025-02-07_17-59-23PM_1.log

 

Invoking prereq "checkconflictagainstohwithdetail"

 

Prereq "checkConflictAgainstOHWithDetail" failed.

 

The details are:

Interim patch 26878028  requires prerequisite patch(es) [36325581] which are not present in the Oracle Home.

Apply prerequisite patch(es) [36325581] before applying interim patch 26878028.

Summary of Conflict Analysis:

 

There are no patches that can be applied now.

 

Conflicts/Supersets for each patch are:

 

Patch : 26878028

 

        Conflict with 33587128

        Conflict details:

        /u01/app/oracle/product/12.2.0.1/db_1/lib/libgeneric12.a:kgl.o

 

OPatch succeeded.

注意:26878028补丁安装时检查发现存在冲突,暂停安装.

 

12.4.3、27873364补丁安装

[root@cbdps01setup]# su - oracle

[oracle@cbdps01~]$ cd /u01/setup/oneoff_patch/

$ unzip p27873364_121020_Linux-x86-64.zip

$ cd 27873364

$ opatch prereq CheckConflictAgainstOHWithDetail -ph ./

Oracle Interim Patch Installer version 12.2.0.1.41

Copyright (c) 2025, Oracle Corporation.  All rights reserved.

 

PREREQ session

 

Oracle Home       : /u01/app/oracle/product/12.2.0.1/db_1

Central Inventory : /u01/app/oraInventory

   from           : /u01/app/oracle/product/12.2.0.1/db_1/oraInst.loc

OPatch version    : 12.2.0.1.41

OUI version       : 12.2.0.1.4

Log file location : /u01/app/oracle/product/12.2.0.1/db_1/cfgtoollogs/opatch/opatch2025-02-07_18-09-28PM_1.log

 

Invoking prereq "checkconflictagainstohwithdetail"

 

ZOP-40: The patch(es) has conflicts with other patches installed in the Oracle Home (or) among themselves.

 

Prereq "checkConflictAgainstOHWithDetail" failed.

 

Summary of Conflict Analysis:

 

There are no patches that can be applied now.

 

Following patches have conflicts. Please contact Oracle Support and get the merged patch of the patches :

27873364, 33587128

 

Conflicts/Supersets for each patch are:

 

Patch : 27873364

 

        Conflict with 33587128

        Conflict details:

        /u01/app/oracle/product/12.2.0.1/db_1/lib/libserver12.a:ktli.o

 

OPatch succeeded.

注意27873364补丁安装时检查发现存在冲突,暂停安装.

 

12.4.4、27882764补丁安装

[root@cbdps01setup]# su - oracle

[oracle@cbdps01~]$ cd /u01/setup/oneoff_patch/

$ unzip p27882764_122010_Linux-x86-64.zip

$ cd 27882764

$ opatch prereq CheckConflictAgainstOHWithDetail -ph ./

Oracle Interim Patch Installer version 12.2.0.1.41

Copyright (c) 2025, Oracle Corporation.  All rights reserved.

 

PREREQ session

 

Oracle Home       : /u01/app/oracle/product/12.2.0.1/db_1

Central Inventory : /u01/app/oraInventory

   from           : /u01/app/oracle/product/12.2.0.1/db_1/oraInst.loc

OPatch version    : 12.2.0.1.41

OUI version       : 12.2.0.1.4

Log file location : /u01/app/oracle/product/12.2.0.1/db_1/cfgtoollogs/opatch/opatch2025-02-07_18-16-27PM_1.log

 

Invoking prereq "checkconflictagainstohwithdetail"

 

ZOP-40: The patch(es) has conflicts with other patches installed in the Oracle Home (or) among themselves.

 

Prereq "checkConflictAgainstOHWithDetail" failed.

 

Summary of Conflict Analysis:

 

There are no patches that can be applied now.

 

Following patches have conflicts. Please contact Oracle Support and get the merged patch of the patches :

27882764, 33587128

 

Conflicts/Supersets for each patch are:

 

Patch : 27882764

 

        Conflict with 33587128

        Conflict details:

        /u01/app/oracle/product/12.2.0.1/db_1/lib/libserver12.a:ksu.o

 

OPatch succeeded.

注意27882764补丁安装时检查发现存在冲突,暂停安装.

 

12.4.5、30666479补丁安装

[root@cbdps01setup]# su - oracle

[oracle@cbdps01~]$ cd /u01/setup/oneoff_patch/

$ unzip -q p30666479_12201220118DBJAN2022RU_Linux-x86-64.zip

$ cd 30666479

$ opatch prereq CheckConflictAgainstOHWithDetail -ph ./

$ opatch apply

$ opatch lsinventory

[oracle@cbdps0130666479]$ opatch lspatches

30666479;

33561275;OJVM RELEASE UPDATE 12.2.0.1.220118 (33561275)

33678030;OCW JAN 2022 RELEASE UPDATE 12.2.0.1.220118 (33678030)

33587128;Database Jan 2022 Release Update : 12.2.0.1.220118 (33587128)

 

OPatch succeeded.

 

说明:

1)仅30666479补丁不存在冲突;

2)某核心生产库oracle补丁如下,判断27882764、27873364、26878028、24921392补丁可能在33587128之前已安装,所以没检测出补丁冲突.

[oracle@hisdb01 ~]$ opatch lspatches

30666479;

27882764;

27873364;

26878028;

24921392;

33561275;OJVM RELEASE UPDATE 12.2.0.1.220118 (33561275)

30118419;OCW Interim patch for 30118419

33587128;Database Jan 2022 Release Update : 12.2.0.1.220118 (33587128)

12.4.6、启动节点1集群

[root@cbdps01~]# crsctl start crs

 

说明:节点2 操作同上,完成后再依次往下执行.

 

13、DBCA建库

说明:仅在节点1执行.

打开Xmanager软件,dbca图形界面创建数据库,数据库字符集选择ZHS16GBK.

[oracle@cbdps01~]$ export DISPLAY=192.168.133.1:0.0

[oracle@cbdps01~]$ dbca

说明:至此Oracle 12.2.0.1 RAC数据库已成功创建完成.

14、信息查询

[oracle@cbdps01 ~]$ sqlplus / as sysdba

 

SQL*Plus: Release 12.2.0.1.0 Production on Mon Mar 10 07:00:48 2025

 

Copyright (c) 1982, 2016, Oracle.  All rights reserved.

 

 

Connected to:

Oracle Database 12c Enterprise Edition Release 12.2.0.1.0 - 64bit Production

 

[oracle@cbdps01 ~]$ sqlplus sys/oracle4U@cbdps01.cqupt.com:1521/cbdps as sysdba

 

SQL*Plus: Release 12.2.0.1.0 Production on Mon Mar 10 07:02:16 2025

 

Copyright (c) 1982, 2016, Oracle.  All rights reserved.

 

 

Connected to:

Oracle Database 12c Enterprise Edition Release 12.2.0.1.0 - 64bit Production

 

SQL>  

[oracle@cbdps01 ~]$ sqlplus sys/oracleU@cbdps01.cqupt.com:1521/cbdps as sysdba

 

SQL*Plus: Release 12.2.0.1.0 Production on Mon Mar 10 07:02:41 2025

 

Copyright (c) 1982, 2016, Oracle.  All rights reserved.

 

ERROR:

ORA-01017: invalid username/password; logon denied

 

 

Enter user-name:

 

SQL> select name,open_mode from v$database;

 

NAME      OPEN_MODE

--------- --------------------

CBDPS     READ WRITE

 

[root@cbdps01 ~]# crsctl stat res -t

--------------------------------------------------------------------------------

Name           Target  State        Server                   State details      

--------------------------------------------------------------------------------

Local Resources

--------------------------------------------------------------------------------

ora.ASMNET1LSNR_ASM.lsnr

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.DATA.dg

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.FRA.dg

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.LISTENER.lsnr

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.OCR.dg

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.chad

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.net1.network

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.ons

               ONLINE  ONLINE       cbdps01                  STABLE

               ONLINE  ONLINE       cbdps02                  STABLE

ora.proxy_advm

               OFFLINE OFFLINE      cbdps01                  STABLE

               OFFLINE OFFLINE      cbdps02                  STABLE

--------------------------------------------------------------------------------

Cluster Resources

--------------------------------------------------------------------------------

ora.LISTENER_SCAN1.lsnr

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.MGMTLSNR

      1        ONLINE  ONLINE       cbdps01                  169.254.233.175 192.

                                                             168.78.245,STABLE

ora.asm

      1        ONLINE  ONLINE       cbdps01                  Started,STABLE

      2        ONLINE  ONLINE       cbdps02                  Started,STABLE

      3        OFFLINE OFFLINE                               STABLE

ora.cbdps.db

      1        ONLINE  ONLINE       cbdps01                  Open,HOME=/u01/app/o

                                                             racle/product/12.2.0

                                                             .1/db_1,STABLE

      2        ONLINE  ONLINE       cbdps02                  Open,HOME=/u01/app/o

                                                             racle/product/12.2.0

                                                             .1/db_1,STABLE

ora.cbdps01.vip

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.cbdps02.vip

      1        ONLINE  ONLINE       cbdps02                  STABLE

ora.cvu

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.mgmtdb

      1        ONLINE  ONLINE       cbdps01                  Open,STABLE

ora.qosmserver

      1        ONLINE  ONLINE       cbdps01                  STABLE

ora.scan1.vip

      1        ONLINE  ONLINE       cbdps01                  STABLE

--------------------------------------------------------------------------------

 

[grid@cbdps01 ~]$ lsnrctl status

 

LSNRCTL for Linux: Version 12.2.0.1.0 - Production on 10-MAR-2025 07:03:40

 

Copyright (c) 1991, 2016, Oracle.  All rights reserved.

 

Connecting to (DESCRIPTION=(ADDRESS=(PROTOCOL=IPC)(KEY=LISTENER)))

STATUS of the LISTENER

------------------------

Alias                     LISTENER

Version                   TNSLSNR for Linux: Version 12.2.0.1.0 - Production

Start Date                09-MAR-2025 23:45:09

Uptime                    0 days 7 hr. 18 min. 31 sec

Trace Level               off

Security                  ON: Local OS Authentication

SNMP                      OFF

Listener Parameter File   /u01/app/12.2.0.1/grid/network/admin/listener.ora

Listener Log File         /u01/app/grid/diag/tnslsnr/cbdps01/listener/alert/log.xml

Listening Endpoints Summary...

  (DESCRIPTION=(ADDRESS=(PROTOCOL=ipc)(KEY=LISTENER)))

  (DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.133.245)(PORT=1521)))

  (DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.133.246)(PORT=1521)))

Services Summary...

Service "+ASM" has 1 instance(s).

  Instance "+ASM1", status READY, has 1 handler(s) for this service...

Service "+ASM_DATA" has 1 instance(s).

  Instance "+ASM1", status READY, has 1 handler(s) for this service...

Service "+ASM_FRA" has 1 instance(s).

  Instance "+ASM1", status READY, has 1 handler(s) for this service...

Service "+ASM_OCR" has 1 instance(s).

  Instance "+ASM1", status READY, has 1 handler(s) for this service...

Service "cbdps" has 1 instance(s).

  Instance "cbdps1", status READY, has 1 handler(s) for this service...

Service "cbdpsXDB" has 1 instance(s).

  Instance "cbdps1", status READY, has 1 handler(s) for this service...

The command completed successfully

 

说明:所有资源均正常,至此在RHEL 6.5上整个安装Oracle 12.2.0.1 GI&RAC的操作已结束.

 

特别说明:某核心生产库并没有安装mgmtdb,查资料后建议安装此组件.

参考网址:

https://www.cnblogs.com/aegis1019/p/8866756.html

https://cloud.tencent.com/developer/article/1431555

https://cloud.tencent.com/developer/article/1431536

https://cloud.tencent.com/developer/article/1431538

https://cloud.tencent.com/developer/user/1955618/search/article-Linux%E5%B9%B3%E5%8F%B0%20Oracle%2012cR2%20RAC

https://www.modb.pro/db/238492

https://www.modb.pro/db/193241#_2_rootsh__27

https://www.cnblogs.com/polestar/p/3827268.html



最后修改时间:2025-03-16 08:41:33
「喜欢这篇文章,您的关注和赞赏是给作者最好的鼓励」
关注作者
【版权声明】本文为墨天轮用户原创内容,转载时必须标注文章的来源(墨天轮),文章链接,文章作者等基本信息,否则作者和墨天轮有权追究责任。如果您发现墨天轮中有涉嫌抄袭或者侵权的内容,欢迎发送邮件至:contact@modb.pro进行举报,并提供相关证据,一经查实,墨天轮将立刻删除相关内容。

评论