创建oracle用户和grid用户要求aix数据库服务器的dba用户id一致
dba用户组要求和aix数据库服务器的dba用户组id一致
nfs服务器配置
groupadd -g 300 dba
useradd -m -u 311 -g dba -d /home/grid -s /bin/bash -c “Grid Infrastructure Owner” grid
useradd -m -u 301 -g dba -d /home/oracle -s /bin/bash -c “Oracle Software Owner” oracle
aix服务器节点
创建dba组
mkgroup -‘a’ id=‘300’ admin=false projects=‘System’ dba
grid 用户及属性
mkuser id=‘311’ admin=true pgrp=‘dba’ groups=‘dba’ admgroups=‘dba’ home=’/home/grid’ grid
chuser capabilities=CAP_BYPASS_RAC_VMM,CAP_PROPAGATE,CAP_NUMA_ATTACH,CAP_BYPASS_RAC_VMM grid
chown -R grid:dba /home/grid
oracle 用户及属性
mkdir /home/oracle
mkuser id=301 admin=true pgrp=dba groups=dba admgroups=dba home=/home/oracle shell=/usr/bin/ksh oracle
chuser capabilities=CAP_BYPASS_RAC_VMM,CAP_PROPAGATE,CAP_NUMA_ATTACH,CAP_BYPASS_RAC_VMM oracle
chown -R oracle:dba /home/oracle
添加磁盘
添加第二块磁盘
parted /dev/sdb
查看:(parted)p
将磁盘格式变成gpt的格式(因为parted只能针对gpt格式的磁盘进行操作)
mklabel gpt
mkpart primary 0 nG
查看:(parted) p
退出:(parted)quit ( parted分区自动保存,不用手动保存 )
lvm管理
linux上创建PV/VG/LV
LVM的整体思路是:
首先创建PV–>然后创建VG并将多个PV加到VG里–>然后创建LV–>格式化分区–>mount分区
创建物理卷PV
–对裸盘创建pv
pvcreate /dev/sdb
–对分区创建pv
pvcreate /dev/sdb1
创建完成后可以查看一下
pvs
pvdisplay /dev/sdb1
创建卷组VG
使用vgcreate创建卷组VG,并且此处可以-s选项指定PE(LE)的大小,(默认PE大小4M)
vgcreate vg1 /dev/sdb1
vgcreate -s 16M vg2 /dev/sdb2
–创建VG并将多个PV加到VG
vgcreate vg_test /dev/sdb /dev/sdc
创建完成后查看一下
vgs
vgdisplay vg1
注意:PE大,读取速度快,但浪费空间。反之,读取速度慢,但节省空间。类似于socket
创建逻辑卷LV
使用lvcreate创建LV。lvcreate -n lvname -L lvsize(M,G)|-l LEnumber vgname
lvcreate -n lv1 -L 64M vg1
lvcreate -n lv2 -L 10G vg1
lvs
格式化与挂载
mkfs.ext4 /dev/vg1/lv1
mkfs.ext4 /dev/vg1/data1
mount分区
mkdir /data
echo “/dev/vg_test/lv_test /data ext4 defaults 0 0” >> /etc/fstab
整体处理开机自动挂载
mkdir /ocr1
mkdir /ocr2
mkdir /ocr3
mkdir /vot1
mkdir /vot2
mkdir /vot3
mkdir /date1
mkdir /date2
mkdir /date3
mkdir /date4
mkdir /date5
mkdir /date6
修改目录用户属组和权限
chown -R grid:dba /ocr1
chown -R grid:dba /ocr2
chown -R grid:dba /ocr3
chown -R grid:dba /vot1
chown -R grid:dba /vot2
chown -R grid:dba /vot3
chown -R oracle:dba /date1
chown -R oracle:dba /date2
chown -R oracle:dba /date3
chmod -R 775 /ocr*
chmod -R 775 /vot*
chmod -R 775 /data*
添加到/etc/fstab文件
vi /etc/fstab
/dev/vg1/ocr1 /ocr1 ext4 defaults 0 0
/dev/vg1/ocr2 /ocr2 ext4 defaults 0 0
/dev/vg1/ocr3 /ocr3 ext4 defaults 0 0
/dev/vg1/vot1 /vot1 ext4 defaults 0 0
/dev/vg1/vot2 /vot2 ext4 defaults 0 0
/dev/vg1/vot3 /vot3 ext4 defaults 0 0
/dev/vg1/data1 /data1 ext4 defaults 0 0
/dev/vg1/data2 /data2 ext4 defaults 0 0
/dev/vg1/data3 /data3 ext4 defaults 0 0
/dev/vg1/data4 /data4 ext4 defaults 0 0
/dev/vg1/data5 /data5 ext4 defaults 0 0
部署nfs
NFS服务端所需的软件列表
nfs-utils: 这个是NFS服务主程序(包含rpc.nfsd、rpc.mountd、daemons)
rpcbind: 这个是CentOS6.X的RPC主程序(CentOS5.X的为portmap)
检查软件是否安装
客户端和服务端都要检查
rpm -qa nfs-utils rpcbind
如果没有安装在系统中通过yum 命令进行安装以上两个包
#yum install -y nfs-utils rpcbind
[root@shareddisk19 ~]# rpm -qa | grep nfs
nfs-utils-1.2.3-54.el6.x86_64
nfs4-acl-tools-0.3.3-6.el6.x86_64
nfs-utils-lib-1.1.5-9.el6.x86_64
启动NFS服务端相关服务
服务端操作:
#service rpcbind status 查看状态
#service rpcbind start 启动
#service rpcbind stop 停止
#service rpcbind restart 重启
启动NFS服务
#service nfs start 启动
#service nfs status 查看状态
#service nfs stop 停止
#service nfs restat 重启
设置开机启动
[root@h1 ~]# chkconfig nfs on
[root@h1 ~]# chkconfig rpcbind on
Add the following lines to the “/etc/exports” file.
vi /etc/exports
/ocr1 *(rw,sync,no_wdelay,insecure_locks,no_root_squash)
/ocr2 *(rw,sync,no_wdelay,insecure_locks,no_root_squash)
/ocr3 *(rw,sync,no_wdelay,insecure_locks,no_root_squash)
/vot1 *(rw,sync,no_wdelay,insecure_locks,no_root_squash)
/vot2 *(rw,sync,no_wdelay,insecure_locks,no_root_squash)
/vot3 *(rw,sync,no_wdelay,insecure_locks,no_root_squash)
/data1 *(rw,sync,no_wdelay,insecure_locks,no_root_squash)
/data2 *(rw,sync,no_wdelay,insecure_locks,no_root_squash)
/data3 *(rw,sync,no_wdelay,insecure_locks,no_root_squash)
/data4 *(rw,sync,no_wdelay,insecure_locks,no_root_squash)
/data5 *(rw,sync,no_wdelay,insecure_locks,no_root_squash)
重启nfs
chkconfig nfs on
service nfs restart
输出本地挂载点
[root@h1 ~]# showmount -e localhost
[root@h1 ~]# showmount -e 192.168.123.211
AIX7.2系统NFS目录mount方法
服务端和客户端配置/etc/hosts
服务端/etc/hosts
192.168.123.211 shareddisk19
192.168.123.202 aixnode1
192.168.123.202 aixnode2
客户端/etc/hosts
192.168.123.211 shareddisk19
192.168.123.202 aixnode1
192.168.123.202 aixnode2
AIX 开机自动挂载NFS共享
重新设置网络参数
nfso -p -o nfs_use_reserved_ports=1
提示信息显示写入了nextboot file
查看/etc/tunables/nextboot 新增加我们刚刚设置的参数
vi /etc/filesystems
/opt/app/ocr1:
dev = “/ocr1”
vfs = nfs
nodename = shareddisk19
mount = true
options = cio,rw,bg,hard,intr,rsize=65536,wsize=65536,timeo=600,proto=tcp,noac,vers=3,sec=sys
account = false
/opt/app/ocr2:
dev = “/ocr2”
vfs = nfs
nodename = shareddisk19
mount = true
options = cio,rw,bg,hard,intr,rsize=65536,wsize=65536,timeo=600,proto=tcp,noac,vers=3,sec=sys
account = false
/opt/app/ocr3:
dev = “/ocr3”
vfs = nfs
nodename = shareddisk19
mount = true
options = cio,rw,bg,hard,intr,rsize=65536,wsize=65536,timeo=600,proto=tcp,noac,vers=3,sec=sys
account = false
/opt/app/data1:
dev = “/data1”
vfs = nfs
nodename = shareddisk19
mount = true
options = bg,hard,nointr,noac,llock,rsize=32768,wsize=32768,sec=sys,nosuid
account = false
/opt/app/data2:
dev = “/data2”
vfs = nfs
nodename = shareddisk19
mount = true
options = bg,hard,nointr,noac,llock,rsize=32768,wsize=32768,sec=sys,nosuid
account = false
/opt/app/data3:
dev = “/data3”
vfs = nfs
nodename = shareddisk19
mount = true
options = bg,hard,nointr,noac,llock,rsize=32768,wsize=32768,sec=sys,nosuid
account = false
/opt/app/data4:
dev = “/data4”
vfs = nfs
nodename = shareddisk19
mount = true
options = bg,hard,nointr,noac,llock,rsize=32768,wsize=32768,sec=sys,nosuid
account = false
/opt/app/data5:
dev = “/data5”
vfs = nfs
nodename = shareddisk19
mount = true
options = bg,hard,nointr,noac,llock,rsize=32768,wsize=32768,sec=sys,nosuid
account = false
CentOS开机启动卡在进度条 提示NFS quotas: Cannot register service: RPC
CentOS6 开机启动卡在进度条,按ESC显示启动详细信息,如下错误:
Starting NFS services: [ OK ]
Starting NFS quotas: Cannot register service: RPC: Unable to receive; errno = Connection refused
rpc.rquotad: unable to register (RQUOTAPROG, RQUOTAVERS, udp).
[FAILED]
Starting NFS mountd: [FAILED]
Starting NFS daemon: rpc.nfsd: writing fd to kernel failed: errno 111 (Connection refused)
rpc.nfsd: unable to set any sockets for nfsd
[FAILED]
RH系操作系统在6.0版本之后使用rpcbind服务(5系版本为portmap服务)控制rpc的启动,由于NFS和nfslock的启动需要向rpc进行注册,rpcbind服务不启动的话就会报错
解决办法:重启进入单用户模式,把rpcbind服务设置为开机启动:chkconfig rpcbind on 即可。
racle RAC安装前准备工作
2.1.Oracle RAC Demo Environment
server AIX Boot IP Public VIP Private IP SCAN IP Shared Storage
dbnode1 7200-04-01-1939 192.168.123.201 172.16.8.12 192.168.123.210 172.16.102.225 5*300GB LUNs
dbnode2 7200-04-01-1939 192.168.123.203 172.16.8.13 192.168.123.210
2.2.依赖的AIX 系统软件包
lslpp -l |egrep “(opens|bos.adt|bos.perf|rsct.basic|rsct.compat|xlC.aix)”
检查下列软件包是否存在:
bos.adt.base 7.2.4.0 COMMITTED Base Application Development
bos.adt.debug 7.2.4.0 COMMITTED Base Application Development
bos.adt.include 7.2.4.0 COMMITTED Base Application Development
bos.adt.lib 7.2.4.0 COMMITTED Base Application Development
bos.adt.libm 7.2.3.0 COMMITTED Base Application Development
bos.perf.diag_tool 7.2.2.0 COMMITTED Performance Diagnostic Tool
bos.perf.fdpr 7.2.0.0 COMMITTED Feedback Directed Program
bos.perf.libperfstat 7.2.4.0 COMMITTED Performance Statistics Library
bos.perf.perfstat 7.2.4.0 COMMITTED Performance Statistics
bos.perf.pmaix 7.2.3.15 COMMITTED Performance Management
bos.perf.proctools 7.2.4.0 COMMITTED Proc Filesystem Tools
bos.perf.tools 7.2.4.0 COMMITTED Base Performance Tools
bos.perf.tune 7.2.4.0 COMMITTED Performance Tuning Support
openssh.base.client 7.5.102.1801 COMMITTED Open Secure Shell Commands
openssh.base.server 7.5.102.1801 COMMITTED Open Secure Shell Server
openssh.man.en_US 7.5.102.1801 COMMITTED Open Secure Shell
openssh.msg.en_US 7.5.102.1801 COMMITTED Open Secure Shell Messages -
openssl.base 1.0.2.1801 COMMITTED Open Secure Socket Layer
openssl.license 1.0.2.1801 COMMITTED Open Secure Socket License
openssl.man.en_US 1.0.2.1801 COMMITTED Open Secure Socket Layer
rsct.basic.hacmp 3.2.5.0 COMMITTED RSCT Basic Function (HACMP/ES
rsct.basic.rte 3.2.5.0 COMMITTED RSCT Basic Function
rsct.basic.sp 3.2.5.0 COMMITTED RSCT Basic Function (PSSP
rsct.compat.basic.hacmp 3.2.5.0 COMMITTED RSCT Event Management Basic
rsct.compat.basic.rte 3.2.5.0 COMMITTED RSCT Event Management Basic
rsct.compat.basic.sp 3.2.5.0 COMMITTED RSCT Event Management Basic
rsct.compat.clients.hacmp 3.2.5.0 COMMITTED RSCT Event Management Client
rsct.compat.clients.rte 3.2.5.0 COMMITTED RSCT Event Management Client
rsct.compat.clients.sp 3.2.5.0 COMMITTED RSCT Event Management Client
xlC.aix61.rte 16.1.0.2 COMMITTED IBM XL C++ Runtime for AIX 6.1
bos.adt.base 7.2.4.0 COMMITTED Base Application Development
bos.adt.lib 7.2.4.0 COMMITTED Base Application Development
bos.perf.diag_tool 7.2.2.0 COMMITTED Performance Diagnostic Tool
bos.perf.libperfstat 7.2.4.0 COMMITTED Performance Statistics Library
bos.perf.perfstat 7.2.4.0 COMMITTED Performance Statistics
bos.perf.pmaix 7.2.3.15 COMMITTED Performance Management
bos.perf.tools 7.2.4.0 COMMITTED Base Performance Tools
bos.perf.tune 7.2.4.0 COMMITTED Performance Tuning Support
openssh.base.client 7.5.102.1801 COMMITTED Open Secure Shell Commands
openssh.base.server 7.5.102.1801 COMMITTED Open Secure Shell Server
openssl.base 1.0.2.1801 COMMITTED Open Secure Socket Layer
rsct.basic.rte 3.2.5.0 COMMITTED RSCT Basic Function
rsct.compat.basic.rte 3.2.5.0 COMMITTED RSCT Event Management Basic
instfix -i |grep -i 6143 #for AIX 7.2 only
All filesets for IJ16143 were found.
Oracle 18c/19c依赖xlfrte,需要安装AIX第2张盘或者从XLF 编译器安装xlfrte这个包,以AIX 7.2为例:
loopmount -i Aix_7200-02-00_2of2_102017.iso -o “-V cdrfs -o ro” -m /mnt
installp -agYXd . xlfrte
xlfrte.msg.en_US 15.1.3.0 USR APPLY SUCCESS
注:Oracle 12.2 or 18C or 19C grid依赖xlf rte包
ldd /oracle/grid/lib/libora_netlib.so
/oracle/grid/lib/libora_netlib.so needs:
/usr/lib/libc.a(shr_64.o)
/usr/lib/libxlf90.a(xlfsys_64.o)
/usr/lib/libxlf90.a(io_64.o)
/unix
/usr/lib/libcrypt.a(shr_64.o)
2.3.AIX 参数
网络参数
no -p -o rfc1323=1
no -po tcp_fastlo=1
no -p -o sb_max=10485760
no -p -o tcp_recvspace=262144
no -p -o tcp_sendspace=262144
no -p -o udp_sendspace=1048576
no -p -o udp_recvspace=10485760
用户最大进程数
chdev -l sys0 -a maxuproc=16384
以支持锁定SGA 内存(lock_sga=true)
vmo -p -o v_pinshm=1
vmo -p -o vmm_klock_mode=2
chdev -l sys0 -a ncargs=‘1024’ -a maxuproc=16384
SSH parameter LoginGraceTime
vi /etc/ssh/sshd_config
LoginGraceTime 0 ## default #LoginGraceTime 2m
2.4.配置AIX iocp
mkdev -l iocp0
chdev -l iocp0 -P -a autoconfig=‘available’
或通过# smit iocp配置iocp
2.5.Setup NTP server & Client(option)
此示例中ntp server ip 为172.16.24.105, 7801lp1, 780l7为ntp client。
Ntp server 配置文件
vi /etc/ntp.conf
broadcastclient
server 127.127.1.0 prefer
driftfile /etc/ntp.drift
tracefile /etc/ntp.trace
启动ntp server
#startsrc -s xntpd -a -x
Ntp clinet 配置文件
vi /etc/ntp.conf
server 172.16.102.221
driftfile /etc/ntp.drift
tracefile /etc/ntp.trace
启动ntp client
#startsrc -s xntpd -a -x
修改vi /etc/rc.tcpip使得重起后生效
Start up Network Time Protocol (NTP) daemon
start /usr/sbin/xntpd “$src_running” “-x”
2.6.组、用户及属性
创建dba组
mkgroup -‘a’ id=‘300’ admin=false projects=‘System’ dba
grid 用户及属性
mkuser id=‘311’ admin=true pgrp=‘dba’ groups=‘dba’ admgroups=‘dba’ home=’/home/grid’ grid
chuser capabilities=CAP_BYPASS_RAC_VMM,CAP_PROPAGATE,CAP_NUMA_ATTACH,CAP_BYPASS_RAC_VMM grid
chown -R grid:dba /home/grid
oracle 用户及属性
mkdir /home/oracle
mkuser id=301 admin=true pgrp=dba groups=dba admgroups=dba home=/home/oracle shell=/usr/bin/ksh oracle
chuser capabilities=CAP_BYPASS_RAC_VMM,CAP_PROPAGATE,CAP_NUMA_ATTACH,CAP_BYPASS_RAC_VMM oracle
chown -R oracle:dba /home/oracle
2.7.修改grid,oracle用户密码,及profile
passwd grid
passwd oracle
用户grid .profile
export PS1="/usr/bin/hostnameHOME/bin:/usr/bin/X11:/sbin:.
export ORACLE_BASE=/oracle/19c
export ORACLE_HOME=/oracle/grid
export PATH=ORACLE_HOME/bin:.
export ORACLE_SID=+ASM1 # +ASM2 ## on node2
export TEMP=ORACLE_BASE/tmp
export LIBPATH=LIBPATH
用户oracle .profile
export PATH=/usr/bin:/etc:/usr/sbin:/usr/ucb:$HOME/bin:/usr/bin/X11:/sbin:.:/usr/vacpp/bin
export LIBPATH=/usr/lib:/usr/vacpp/lib:/usr/ccs/lib:/usr/lpp/xlf/lib:/usr/lib64:.
export ORACLE_BASE=/oracle/19cdb
export ORACLE_HOME=/oracle/db
export PATH=ORACLE_HOME/bin:.
export ORACLE_SID=orcl1 # orcl2 ## on node2
export LDR_CNTRL=TEXTPSIZE=64K@DATAPSIZE=64K@STACKPSIZE=64K@SHMPSIZE=64K
export VMM_CNTRL=vmm_fork_policy=COR
#export MALLOCTYPE=watson
#export MALLOCOPTIONS=pool,multiheap:4,no_mallinfo
#export MALLOCOPTIONS=threadcache:off,pool,watson,multiheap:32,considersize
##export LDR_PRELOAD64=/usr/lib/libtb64.a
export LIBPATH=LIBPATH:.
#export AIXTHREAD_SCOPE=S
export CLASSPATH=ORACLE_HOME/jre:ORACLE_HOME/rdbms/jlib:$ORACLE_HOME/network/jlib
#export NLS_LANG=“SIMPLIFIED CHINESE_CHINA.ZHS16GBK”
export NLS_DATE_FORMAT=‘YYYY-MM-DD HH24:MI:SS’
#ulimit -s 4194304
#ulimit -n 409600
export TEMP=ORACLE_BASE/tmp
umask 022
echo grid:rootroot|chpasswd -c
echo oracle:rootroot|chpasswd -c
2.8.创建Oracle软件安装存放所需文件系统
chfs -a size=10G /home
chfs -a size=10G /tmp
chfs -a size=10G /usr
chfs -a size=5G /opt
chfs -a size=5G /var
chfs -a size=5G /
mkvg -S -s 256m -y ora18csoftvg hdisk2
通过smit fs创建:# smit fs
或者
使用如下命令行创建:
mkdir -p /oracle
mklv -e x -t jfs2 -y lvora -U oracle -G dba ora18csoftvg 150
mklv -e x -t jfs2log -y lvoralog -U oracle -G dba ora18csoftvg 1
crfs -v jfs2 -a logname=lvoralog -a agblksize=4096 -a ea=v2 -A yes -d lvora -m /oracle -p rw -t no
mount /dev/lvora /oracle
chfs -a size=60G /oracle ##Oracle 18C RAC软件要求40 GB以上磁盘空间
chfs -a size=5G /tmp
mkdir -p /oracle/19c/tmp
mkdir -p /oracle/19cdb/tmp
chown -R oracle:dba /oracle
chmod -R 777 /oracle
grud用户
GI安装
$ xhost + 192.168.88.8
xhost + 192.168.88.9
##two node running in root :
sh rootpre.sh
./runInstaller
目录
/opt/app/ocr1/ocr
/opt/app/ocr2/ocr
/opt/app/ocr3/ocr
/opt/app/vot1/vot
/opt/app/vot2/vot
/opt/app/vot3/vot
aix7.1执行root.sh脚本ohasd failed to start
然后查询节点一和节点二/etc/inittab内容,果然存在该信息:
[root@node1 bin]# grep install /etc/inittab
install_assist:2:wait:/usr/sbin/install_assist </dev/console >/dev/console 2>&1
install_assist是系统的安装助手,是交互式工具,即假如没有响应,则会一直等待,那么在该行后面的命令将不会被执行,也就是说rc2.d(默认运行级别为2)下的服务将不会被启动,这也就是无法启动ohasd服务的真凶。
问题解决
将/etc/inittab里面的install_assist的一行注释掉或清理掉,重启系统,然后重新执行root.sh,数据库顺利安装。
[root@node1 bin]# grep install /etc/inittab
#install_assist:2:wait:/usr/sbin/install_assist </dev/console >/dev/console 2>&1
Oracle用户
database软件安装
##two node running in root :
sh rootpre.sh
$ xhost + 192.168.88.8
./runInstaller
dbca创建数据库
Select the “Use Oracle-Managed Files” option and enter “/opt/app” as the database location, then click the “Next” button
目录
/opt/app/data1/
/opt/app/data2/
/opt/app/data3/




