1. 程式人生 > >Centos 6.5 安裝Oracle 11g R2 on vbox

Centos 6.5 安裝Oracle 11g R2 on vbox

由於上一篇的rac安裝,截圖較多,這一篇選擇以txt的方式敘述,另外上一篇的時間比較久遠,這裡最近從新安裝

--2018-10-29

1 os環境初始化

[[email protected] yum.repos.d]# lsb_release -a
LSB Version: :base-4.0-amd64:base-4.0-noarch:core-4.0-amd64:core-4.0-noarch:graphics-4.0-amd64:graphics-4.0-noarch:printing-4.0-amd64:printing-4.0-noarch
Distributor ID: CentOS
Description: CentOS release 6.5 (Final)
Release: 6.5
Codename: Final
[

[email protected] yum.repos.d]# uname -r
2.6.32-431.el6.x86_64
[[email protected] ~]# ls -l /dev/cdrom |grep cdrom
lrwxrwxrwx. 1 root root 3 Oct 26 2018 /dev/cdrom -> sr0
[[email protected] ~]# mount -t iso9660 /dev/cdrom /mnt/
mount: block device /dev/sr0 is write-protected, mounting read-only
[[email protected]
~]# cd /etc/yum.repos.d/
[[email protected] yum.repos.d]# mv CentOS-Base.repo CentOS-Base.repo.bk
[[email protected] yum.repos.d]# vim public-yum-ol6.repo
[[email protected] yum.repos.d]# cat public-yum-ol6.repo
[ol6_latest]
name=CentOS6 $releasever Latest ($basearch)
gpgkey=file:///mnt/RPM-GPG-KEY-CentOS-6
baseurl=file:///mnt
gpgcheck=1
enabled=1
[
[email protected]
yum.repos.d]# yum clean all
[[email protected] yum.repos.d]# yum makecache
[[email protected] yum.repos.d]# yum install lrzsz -y

[[email protected] opt]# vi /etc/sysconfig/selinux
[[email protected] opt]# setenforce 0
[[email protected] opt]# service iptables stop
iptables: Setting chains to policy ACCEPT: filter [ OK ]
iptables: Flushing firewall rules: [ OK ]
iptables: Unloading modules: [ OK ]
[[email protected] opt]# chkconfig iptables off

2 oracle環境初始化

[[email protected] opt]# /usr/sbin/groupadd -g 1000 oinstall
[[email protected] opt]# /usr/sbin/groupadd -g 1020 asmadmin
[[email protected] opt]# /usr/sbin/groupadd -g 1021 asmdba
[[email protected] opt]# /usr/sbin/groupadd -g 1022 asmoper
[[email protected] opt]# /usr/sbin/groupadd -g 1031 dba
[[email protected] opt]# /usr/sbin/groupadd -g 1032 oper
[[email protected] opt]# useradd -u 1100 -g oinstall -G asmadmin,asmdba,asmoper,oper,dba grid
[[email protected] opt]# useradd -u 1101 -g oinstall -G dba,asmdba,oper oracle
[[email protected] opt]# mkdir -p /u01/app/11.2.0/grid
[[email protected] opt]# mkdir -p /u01/app/grid
[[email protected] opt]# mkdir /u01/app/oracle
[[email protected] opt]# chown -R grid:oinstall /u01
[[email protected] opt]# chown oracle:oinstall /u01/app/oracle
[[email protected] opt]# chmod -R 775 /u01/
[[email protected] ~]# passwd grid
[[email protected] ~]# passwd oracle
[[email protected] opt]# grep MemTotal /proc/meminfo
MemTotal: 3088656 kB
[[email protected] opt]# vi /etc/sysctl.conf
[[email protected] opt]# sysctl -p
[[email protected] opt]# vim /etc/security/limits.conf
[[email protected] opt]# vi /etc/pam.d/login

[[email protected] yum.repos.d]# echo "nameserver 8.8.8.8" >> /etc/resolv.conf
[[email protected] yum.repos.d]# echo "DNS1=8.8.8.8" >> /etc/sysconfig/network-scripts/ifcfg-eth0

yum install gcc gcc-c++ libaio* glibc* glibc-devel* ksh libgcc* libstdc++* libstdc++-devel* make sysstat \
unixODBC* compat-libstdc++-33.x86_64 elfutils-libelf-devel glibc.i686 compat-libcap1 smartmontools unzip openssh* parted cvuqdisk -y
[[email protected] ~]# yum install ntpdate -y
[[email protected] ~]# ntpdate time.windows.com
[[email protected] ~]# date
yum install xterm xclock -y

3 ip規劃

[[email protected] ~]# vim /etc/hosts
#Public IP
10.15.7.11 rac1
10.15.7.12 rac2
#Private IP
172.168.1.18 rac1priv
172.168.1.19 rac2priv
#Virtual IP
10.15.7.13 rac1vip
10.15.7.14 rac2vip
#Scan IP
10.15.7.15 racscan
[[email protected] ~]# cat /etc/sysconfig/network
NETWORKING=yes
HOSTNAME=rac1
GATEWAY=10.15.4.1
[[email protected] ~]# vim .bash_profile
umask 022
DISPLAY=10.15.7.115:0.0 ; export DISPLAY
HISTTIMEFORMAT="%Y:%M:%D %H-%m-%s"
export=HISTTIMEFORMAT
[[email protected] ~]# source .bash_profile

4 grid和oracle 的bash檔案

[[email protected] ~]$ cat .bash_profile
# .bash_profile

# Get the aliases and functions
if [ -f ~/.bashrc ]; then
. ~/.bashrc
fi

# User specific environment and startup programs

PATH=$PATH:$HOME/bin

export PATH
export TMP=/tmp
export TMPDIR=$TMP
export ORACLE_SID=+ASM1
export ORACLE_BASE=/u01/app/grid
export ORACLE_HOME=/u01/app/11.2.0/grid
export PATH=/usr/sbin:$PATH
export PATH=$ORACLE_HOME/bin:$PATH
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib
export CLASSPATH=$ORACLE_HOME/JRE:$ORACLE_HOME/jlib:$ORACLE_HOME/rdbms/jlib
umask 022
[[email protected] ~]# su - oracle
[[email protected] ~]$ vi .bash_profile
[[email protected] ~]# cat /home/oracle/.bash_profile
# .bash_profile

# Get the aliases and functions
if [ -f ~/.bashrc ]; then
. ~/.bashrc
fi

# User specific environment and startup programs

PATH=$PATH:$HOME/bin

export PATH

export TMP=/tmp
export TMPDIR=$TMP
export ORACLE_SID=bol1
export ORACLE_UNQNAME=bol
export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=$ORACLE_BASE/product/11.2.0/db_1
export TNS_ADMIN=$ORACLE_HOME/network/admin
export PATH=/usr/sbin:$PATH
export PATH=$ORACLE_HOME/bin:$PATH
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib

$ source .bash_profile使配置檔案生效
[[email protected] ~]# source /home/grid/.bash_profile
[[email protected] ~]# source /home/oracle/.bash_profile

5 克隆主機rac1並修改

克隆並配置虛擬主機rac2 或者重新配置rac2
[[email protected] ~]# cat /home/grid/.bash_profile
# .bash_profile

# Get the aliases and functions
if [ -f ~/.bashrc ]; then
. ~/.bashrc
fi

# User specific environment and startup programs

PATH=$PATH:$HOME/bin
export PATH
export TMP=/tmp
export TMPDIR=$TMP
export ORACLE_SID=+ASM2
export ORACLE_BASE=/u01/app/grid
export ORACLE_HOME=/u01/app/11.2.0/grid
export PATH=/usr/sbin:$PATH
export PATH=$ORACLE_HOME/bin:$PATH
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib
export CLASSPATH=$ORACLE_HOME/JRE:$ORACLE_HOME/jlib:$ORACLE_HOME/rdbms/jlib
umask 022
export PATH
[[email protected] ~]# cat /home/oracle/.bash_profile
# .bash_profile

# Get the aliases and functions
if [ -f ~/.bashrc ]; then
. ~/.bashrc
fi

# User specific environment and startup programs

PATH=$PATH:$HOME/bin
export TMP=/tmp
export TMPDIR=$TMP
export ORACLE_SID=bol2
export ORACLE_UNQNAME=bol
export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=$ORACLE_BASE/product/11.2.0/db_1
export TNS_ADMIN=$ORACLE_HOME/network/admin
export PATH=/usr/sbin:$PATH
export PATH=$ORACLE_HOME/bin:$PATH
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib
export PATH

6 配置ssh等效性

各節點生成Keys:
[[email protected] ~]# su - oracle
[[email protected] ~]$ mkdir ~/.ssh
[[email protected] ~]$ chmod 700 ~/.ssh
[[email protected] ~]$ ssh-keygen -t rsa
[[email protected] ~]$ ssh-keygen -t dsa
[[email protected] ~]# su - oracle
[[email protected] ~]$ mkdir ~/.ssh
[[email protected] ~]$ chmod 700 ~/.ssh
[[email protected] ~]$ ssh-keygen -t rsa
[[email protected] ~]$ ssh-keygen -t dsa
在節點1上進行互信配置:
[[email protected] ~]$ touch ~/.ssh/authorized_keys
[[email protected] ~]$ cd ~/.ssh
[[email protected] .ssh]$ ssh rac1 cat ~/.ssh/id_rsa.pub >> authorized_keys
[[email protected] .ssh]$ ssh rac2 cat ~/.ssh/id_rsa.pub >> authorized_keys
[[email protected] .ssh]$ ssh rac1 cat ~/.ssh/id_dsa.pub >> authorized_keys
[[email protected] .ssh]$ ssh rac2 cat ~/.ssh/id_dsa.pub >> authorized_keys
在rac1把儲存公鑰資訊的驗證檔案傳送到rac2上
[[email protected] .ssh]$ pwd
/home/oracle/.ssh
[[email protected] .ssh]$ scp authorized_keys [email protected]:/home/oracle/.ssh/.

設定驗證檔案的許可權
在每一個節點執行:
$ chmod 600 ~/.ssh/authorized_keys

啟用使用者一致性
在你要執行OUI的節點以oracle使用者執行(這裡選擇rac1):
[[email protected] .ssh]$ exec /usr/bin/ssh-agent $SHELL
[[email protected] .ssh]$ ssh-add
Identity added: /home/oracle/.ssh/id_rsa (/home/oracle/.ssh/id_rsa)
Identity added: /home/oracle/.ssh/id_dsa (/home/oracle/.ssh/id_dsa)

驗證ssh配置是否正確
以oracle使用者在所有節點分別執行:
ssh rac1 date
ssh rac2 date
ssh rac1priv date
ssh rac2priv date
===========
[[email protected] .ssh]$ su - grid
Password:
[[email protected] ~]$ mkdir ~/.ssh
[[email protected] ~]$ ssh-keygen -t rsa
[[email protected] ~]$ ssh-keygen -t dsa
[[email protected] ~]$ chmod 700 ~/.ssh

[[email protected] .ssh]$ su - grid
Password:
[[email protected] ~]$ mkdir ~/.ssh
[[email protected] ~]$ ssh-keygen -t rsa
[[email protected] ~]$ ssh-keygen -t dsa
[[email protected] ~]$ chmod 700 ~/.ssh

[[email protected] ~]$ touch ~/.ssh/authorized_keys
[[email protected] ~]$ cd ~/.ssh
[[email protected] .ssh]$ ssh rac1 cat ~/.ssh/id_rsa.pub >> authorized_keys
[[email protected] .ssh]$ ssh rac2 cat ~/.ssh/id_rsa.pub >> authorized_keys
[[email protected] .ssh]$ ssh rac1 cat ~/.ssh/id_dsa.pub >> authorized_keys
[[email protected] .ssh]$ ssh rac2 cat ~/.ssh/id_dsa.pub >> authorized_keys
[[email protected] .ssh]$ scp authorized_keys [email protected]:/home/grid/.ssh/.
[[email protected] .ssh]$ exec /usr/bin/ssh-agent $SHELL
[[email protected] .ssh]$ ssh-add
Identity added: /home/grid/.ssh/id_rsa (/home/grid/.ssh/id_rsa)
Identity added: /home/grid/.ssh/id_dsa (/home/grid/.ssh/id_dsa)

[[email protected] .ssh]$
ssh rac1 date
ssh rac2 date
ssh rac1priv date
ssh rac2priv date

[[email protected] .ssh]$
ssh rac1 date
ssh rac2 date
ssh rac1priv date
ssh rac2priv date

7 配置共享儲存

udev方式
[[email protected] ~]# cat /etc/issue
CentOS release 6.5 (Final)
Kernel \r on an \m
[[email protected] ~]# echo "options=--whitelisted --replace-whitespace" >> /etc/scsi_id.config
[[email protected] ~]# for i in b c d e f g;
> do
> echo "KERNEL==\"sd*\", BUS==\"scsi\", PROGRAM==\"/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/\$name\", RESULT==\"`/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/sd$i`\", NAME=\"asm-disk$i\", OWNER=\"grid\", GROUP=\"asmadmin\", MODE=\"0660\"" >> /etc/udev/rules.d/99-oracle-asmdevices.rules
> done
===
for i in b c d e f g;
do
echo "KERNEL==\"sd*\", BUS==\"scsi\", PROGRAM==\"/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/\$name\", RESULT==\"`/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/sd$i`\", NAME=\"asm-disk$i\", OWNER=\"grid\", GROUP=\"asmadmin\", MODE=\"0660\"" >> /etc/udev/rules.d/99-oracle-asmdevices.rules
done
===
[[email protected] ~]# cat /etc/udev/rules.d/99-oracle-asmdevices.rules
KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="1ATA_VBOX_HARDDISK_VBae01435a-3e52303e", NAME="asm-diskb", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="1ATA_VBOX_HARDDISK_VB714cf53b-64b30d94", NAME="asm-diskc", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="1ATA_VBOX_HARDDISK_VBdbf678d2-9ce1dd68", NAME="asm-diskd", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="1ATA_VBOX_HARDDISK_VBfe506a5f-89a411c1", NAME="asm-diske", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="1ATA_VBOX_HARDDISK_VB0b346c6a-a4a60e78", NAME="asm-diskf", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="1ATA_VBOX_HARDDISK_VB9a904921-a5958d54", NAME="asm-diskg", OWNER="grid", GROUP="asmadmin", MODE="0660"
[[email protected] ~]# /sbin/start_udev
Starting udev: [ OK ]
[[email protected] ~]# ls -l /dev/asm*
brw-rw---- 1 grid asmadmin 8, 16 Oct 29 08:15 /dev/asm-diskb
brw-rw---- 1 grid asmadmin 8, 32 Oct 29 08:15 /dev/asm-diskc
brw-rw---- 1 grid asmadmin 8, 48 Oct 29 08:15 /dev/asm-diskd
brw-rw---- 1 grid asmadmin 8, 64 Oct 29 08:15 /dev/asm-diske
brw-rw---- 1 grid asmadmin 8, 80 Oct 29 08:15 /dev/asm-diskf
brw-rw---- 1 grid asmadmin 8, 96 Oct 29 08:15 /dev/asm-diskg

[[email protected] ~]# ls -l /dev/asm*
brw-rw---- 1 grid asmadmin 8, 16 Oct 29 08:19 /dev/asm-diskb
brw-rw---- 1 grid asmadmin 8, 32 Oct 29 08:19 /dev/asm-diskc
brw-rw---- 1 grid asmadmin 8, 48 Oct 29 08:19 /dev/asm-diskd
brw-rw---- 1 grid asmadmin 8, 64 Oct 29 08:19 /dev/asm-diske
brw-rw---- 1 grid asmadmin 8, 80 Oct 29 08:19 /dev/asm-diskf
brw-rw---- 1 grid asmadmin 8, 96 Oct 29 08:19 /dev/asm-diskg

8 安裝gird

[[email protected] opt]# unzip p13390677_112040_Linux-x86-64_3of7.zip

/usr/bin/xterm -ls -display $DISPLAY
=[INS-41104]存在找不到private網絡卡,
幾番試過之後,把eth1刪除,然後重啟,ifconfig找到eth1的hwaddr,cp eth0 eth1 ,並修改hwaddr和ipaddr,device,name,註釋uuid
結果如下
===
[[email protected] ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth0
DEVICE=eth0
TYPE=Ethernet
UUID=7e047ed1-c530-478f-8704-65565ac802ab
ONBOOT=yes
NM_CONTROLLED=yes
BOOTPROTO=none
HWADDR=08:00:27:0E:DE:0E
IPADDR=10.15.7.11
PREFIX=22
GATEWAY=10.15.4.1
DNS1=8.8.8.8
DEFROUTE=yes
IPV4_FAILURE_FATAL=yes
IPV6INIT=no
NAME="System eth0"

[[email protected] ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth1
DEVICE=eth1
TYPE=Ethernet
#UUID=7e047ed1-c530-478f-8704-65565ac802ab
ONBOOT=yes
NM_CONTROLLED=yes
BOOTPROTO=none
HWADDR=08:00:27:8A:F5:38
IPADDR=1.1.1.1
PREFIX=22
GATEWAY=10.15.4.1
DNS1=8.8.8.8
DEFROUTE=yes
IPV4_FAILURE_FATAL=yes
IPV6INIT=no
NAME="System eth1"

[[email protected] ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth0
DEVICE=eth0
TYPE=Ethernet
UUID=288aa282-1d1f-4578-bd6a-1b45a48892b1
ONBOOT=yes
NM_CONTROLLED=yes
BOOTPROTO=none
HWADDR=08:00:27:33:6D:5A
IPADDR=10.15.7.12
PREFIX=22
GATEWAY=10.15.4.1
DEFROUTE=yes
IPV4_FAILURE_FATAL=yes
IPV6INIT=no
NAME="System eth0"
DNS1=8.8.8.8
[[email protected] ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth1
DEVICE=eth1
TYPE=Ethernet
#UUID=288aa282-1d1f-4578-bd6a-1b45a48892b1
ONBOOT=yes
NM_CONTROLLED=yes
BOOTPROTO=none
HWADDR=08:00:27:DC:E4:01
IPADDR=1.1.1.2
PREFIX=22
GATEWAY=10.15.4.1
DEFROUTE=yes
IPV4_FAILURE_FATAL=yes
IPV6INIT=no
NAME="System eth1"
DNS1=8.8.8.8

=======
[[email protected] ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
10.15.7.11 rac1
1.1.1.1 rac1priv
10.15.7.13 rac1vip
10.15.7.12 rac2
1.1.1.2 rac2priv
10.15.7.14 rac2vip
10.15.7.15 scanip

兩節點進行ping測試,分別ping“公有IP”和“私有IP”,兩節點ping通正常後,按如下方式配置:
[[email protected] ~]# ping 1.1.1.2
[[email protected] ~]# ping 10.15.7.12
[[email protected] ~]# ping rac2priv
[[email protected] ~]# ping 1.1.1.1
[[email protected] ~]# ping 10.15.7.11
[[email protected] ~]# ping rac1priv

ssh rac1 date
ssh rac2 date
ssh rac1priv date
ssh rac2priv date

# yum install -y ksh
[[email protected] opt]# rpm -ivh --nodeps pdksh-5.2.14-36.el5.i386.rpm
[[email protected] ~]# yum list|grep ksh
ksh.x86_64 20120801-37.el6_9 @base
pdksh.i386 5.2.14-36.el5 installed
mksh.x86_64 39-7.el6_4.1 l6_latest

[[email protected] rpm]# rpm -ivh cvuqdisk-1.0.9-1.rpm
[[email protected] ~]# vi /etc/sysctl.conf
#ORACLE SETTING
fs.aio-max-nr = 1048576
fs.file-max = 6815744
kernel.shmmax = 68719476736
kernel.shmall = 4294967296
kernel.shmmni = 4096
kernel.sem = 250 32000 100 128
net.ipv4.ip_local_port_range = 9000 65500
net.core.rmem_default = 262144
net.core.rmem_max = 4194304
net.core.wmem_default = 262144
net.core.wmem_max = 1048586

在rac1中執行指令碼
[[email protected] rpm]# /u01/app/oraInventory/orainstRoot.sh
[[email protected] rpm]# /u01/app/11.2.0/grid/root.sh
在rac2執行指令碼
[[email protected] grid]# /u01/app/oraInventory/orainstRoot.sh
[[email protected] grid]# /u01/app/11.2.0/grid/root.sh

[[email protected] ~]# /u01/app/oraInventory/orainstRoot.sh
Changing permissions of /u01/app/oraInventory.
Adding read,write permissions for group.
Removing read,write,execute permissions for world.

Changing groupname of /u01/app/oraInventory to oinstall.
The execution of the script is complete.
[[email protected] ~]# /u01/app/11.2.0/grid/root.sh
Performing root user operation for Oracle 11g

The following environment variables are set as:
ORACLE_OWNER= grid
ORACLE_HOME= /u01/app/11.2.0/grid

Enter the full pathname of the local bin directory: [/usr/local/bin]:
Copying dbhome to /usr/local/bin ...
Copying oraenv to /usr/local/bin ...
Copying coraenv to /usr/local/bin ...


Creating /etc/oratab file...
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
Using configuration parameter file: /u01/app/11.2.0/grid/crs/install/crsconfig_params
Creating trace directory
User ignored Prerequisites during installation
Installing Trace File Analyzer
OLR initialization - successful
root wallet
root wallet cert
root cert export
peer wallet
profile reader wallet
pa wallet
peer wallet keys
pa wallet keys
peer cert request
pa cert request
peer cert
pa cert
peer root cert TP
profile reader root cert TP
pa root cert TP
peer pa cert TP
pa peer cert TP
profile reader pa cert TP
profile reader peer cert TP
peer user cert
pa user cert
Adding Clusterware entries to upstart
CRS-2672: Attempting to start 'ora.mdnsd' on 'rac1'
CRS-2676: Start of 'ora.mdnsd' on 'rac1' succeeded
CRS-2672: Attempting to start 'ora.gpnpd' on 'rac1'
CRS-2676: Start of 'ora.gpnpd' on 'rac1' succeeded
CRS-2672: Attempting to start 'ora.cssdmonitor' on 'rac1'
CRS-2672: Attempting to start 'ora.gipcd' on 'rac1'
CRS-2676: Start of 'ora.cssdmonitor' on 'rac1' succeeded
CRS-2676: Start of 'ora.gipcd' on 'rac1' succeeded
CRS-2672: Attempting to start 'ora.cssd' on 'rac1'
CRS-2672: Attempting to start 'ora.diskmon' on 'rac1'
CRS-2676: Start of 'ora.diskmon' on 'rac1' succeeded
CRS-2676: Start of 'ora.cssd' on 'rac1' succeeded

ASM created and started successfully.

Disk Group OCR created successfully.

clscfg: -install mode specified
Successfully accumulated necessary OCR keys.
Creating OCR keys for user 'root', privgrp 'root'..
Operation successful.
CRS-4256: Updating the profile
Successful addition of voting disk 62cb5cc42bdc4ff8bfd1069706eeed4b.
Successfully replaced voting disk group with +OCR.
CRS-4256: Updating the profile
CRS-4266: Voting file(s) successfully replaced
## STATE File Universal Id File Name Disk group
-- ----- ----------------- --------- ---------
1. ONLINE 62cb5cc42bdc4ff8bfd1069706eeed4b (/dev/asm-diskb) [OCR]
Located 1 voting disk(s).
CRS-2672: Attempting to start 'ora.asm' on 'rac1'
CRS-2676: Start of 'ora.asm' on 'rac1' succeeded
CRS-2672: Attempting to start 'ora.OCR.dg' on 'rac1'
CRS-2676: Start of 'ora.OCR.dg' on 'rac1' succeeded
Configure Oracle Grid Infrastructure for a Cluster ... succeeded

[[email protected] ~]# su - grid
[[email protected] ~]$ crsctl check crs
CRS-4638: Oracle High Availability Services is online
CRS-4537: Cluster Ready Services is online
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
[[email protected] ~]$ crs_stat -t -v
Name Type R/RA F/FT Target State Host
----------------------------------------------------------------------
ora....ER.lsnr ora....er.type 0/5 0/ ONLINE ONLINE rac1
ora....N1.lsnr ora....er.type 0/5 0/0 ONLINE ONLINE rac1
ora.OCR.dg ora....up.type 0/5 0/ ONLINE ONLINE rac1
ora.asm ora.asm.type 0/5 0/ ONLINE ONLINE rac1
ora.cvu ora.cvu.type 0/5 0/0 ONLINE ONLINE rac1
ora.gsd ora.gsd.type 0/5 0/ OFFLINE OFFLINE
ora....network ora....rk.type 0/5 0/ ONLINE ONLINE rac1
ora.oc4j ora.oc4j.type 0/1 0/2 ONLINE ONLINE rac1
ora.ons ora.ons.type 0/3 0/ ONLINE ONLINE rac1
ora....SM1.asm application 0/5 0/0 ONLINE ONLINE rac1
ora....C1.lsnr application 0/5 0/0 ONLINE ONLINE rac1
ora.rac1.gsd application 0/5 0/0 OFFLINE OFFLINE
ora.rac1.ons application 0/3 0/0 ONLINE ONLINE rac1
ora.rac1.vip ora....t1.type 0/0 0/0 ONLINE ONLINE rac1
ora....SM2.asm application 0/5 0/0 ONLINE ONLINE rac2
ora....C2.lsnr application 0/5 0/0 ONLINE ONLINE rac2
ora.rac2.gsd application 0/5 0/0 OFFLINE OFFLINE
ora.rac2.ons application 0/3 0/0 ONLINE ONLINE rac2
ora.rac2.vip ora....t1.type 0/0 0/0 ONLINE ONLINE rac2
ora.scan1.vip ora....ip.type 0/0 0/0 ONLINE ONLINE rac1

[[email protected] ~]$ olsnodes -n #.檢視叢集中節點配置資訊
rac1 1
rac2 2
[[email protected] ~]$ olsnodes -n -i -s -t
rac1 1 rac1vip Active Unpinned
rac2 2 rac2vip Active Unpinned
[[email protected] ~]$ ps -ef|grep lsnr|grep -v 'grep'|grep -v 'ocfs'|awk '{print$9}'
LISTENER_SCAN1
LISTENER
[[email protected] ~]$ srvctl status asm -a
ASM is running on rac2,rac1
ASM is enabled.
[[email protected] ~]$ crsctl query css votedisk #檢視叢集件的表決磁碟資訊
## STATE File Universal Id File Name Disk group
-- ----- ----------------- --------- ---------
1. ONLINE 62cb5cc42bdc4ff8bfd1069706eeed4b (/dev/asm-diskb) [OCR]
Located 1 voting disk(s).

9 asmca

只在節點rac1執行即可
進入grid使用者下
[[email protected] ~]# su - grid
利用asmca
[[email protected] ~]$ asmca

具體安裝截圖可參看上一篇rac安裝

10 安裝db

[[email protected] ~]# /u01/app/oracle/product/11.2.0/db_1/root.sh
[[email protected] ~]# /u01/app/oracle/product/11.2.0/db_1/root.sh

11 dbca

[[email protected] ~]# su - oracel
[[email protected] ~]$ dbca

12 RAC維護

[[email protected] ~]$ crs_stat -t
[[email protected]c1 ~]$ crsctl check cluster
[[email protected] ~]$ crsctl check crs
[[email protected] ~]$ srvctl status database -d bol
$ olsnodes
$ olsnodes -n
$ olsnodes -n -i -s -t
$ crsctl query css votedisk
$ srvctl config scan #檢視叢集SCAN VIP資訊
[[email protected] ~]$ srvctl config scan
SCAN name: scanip, Network: 1/10.15.4.0/255.255.252.0/eth0
SCAN VIP name: scan1, IP: /scanip/10.15.7.15
[[email protected] ~]$ srvctl config scan_listener #檢視叢集SCAN Listener資訊
SCAN Listener LISTENER_SCAN1 exists. Port: TCP:1521

12.1 啟、停叢集資料庫


整個叢集的資料庫啟停
進入grid使用者
[[email protected] ~]$ srvctl stop database -d bol
[[email protected] ~]$ srvctl start database -d bol
[[email protected] ~]$ srvctl status database -d bol
關閉所有節點
進入root使用者
關閉所有節點
[[email protected] bin]# pwd
/u01/app/11.2.0/grid/bin
[[email protected] bin]# ./crs_stat -t -v #確認叢集各項資源和服務
[[email protected] bin]# ./crsctl stop crs #實際只關閉了當前結點
[[email protected] bin]# ./crsctl stop cluster -all#關閉叢集
[[email protected] bin]# ./crs_stat -t -v
開啟
[[email protected] bin]# ./crs_stat -t -v
[[email protected] bin]# ./crsctl start cluster -all
[[email protected] bin]# ./srvctl status database -d bol
[[email protected] bin]# ./srvctl start database -d bol
[[email protected] bin]# ./emctl start bol #開啟OEM

所有例項和服務的狀態
[[email protected] ~]$ srvctl status database -d bol
Instance bol1 is running on node rac1
Instance bol2 is running on node rac2
單個例項的狀態
[[email protected] ~]$ srvctl status instance -d bol -i bol1
Instance bol1 is running on node rac1
列出配置的所有資料庫
[[email protected] ~]$ srvctl config database
bol
特定節點上節點應用程式的狀態
[[email protected] ~]$ srvctl status nodeapps -n rac1
VIP rac1vip is enabled
VIP rac1vip is running on node: rac1
Network is enabled
Network is running on node: rac1
GSD is disabled
GSD is not running on node: rac1
ONS is enabled
ONS daemon is running on node: rac1
ASM 例項的狀態
[[email protected] ~]$ srvctl status asm -n rac1
ASM is running on rac1
顯示 RAC 資料庫的配置
[[email protected] ~]$ srvctl config database -d bol
Database unique name: bol
Database name: bol
Oracle home: /u01/app/oracle/product/11.2.0/db_1
Oracle user: oracle
Spfile: +DATA/bol/spfilebol.ora
Domain:
Start options: open
Stop options: immediate
Database role: PRIMARY
Management policy: AUTOMATIC
Server pools: bol
Database instances: bol1,bol2
Disk Groups: DATA,FRA
Mount point paths:
Services:
Type: RAC
Database is administrator managed
顯示節點應用程式的配置 —(VIP、GSD、ONS、監聽器)
[[email protected] ~]$ srvctl config nodeapps -n rac1 -a -g -s -l
-n <node_name> option has been deprecated.
Warning:-l option has been deprecated and will be ignored.
Network exists: 1/10.15.4.0/255.255.252.0/eth0, type static
VIP exists: /rac1vip/10.15.7.13/10.15.4.0/255.255.252.0/eth0, hosting node rac1
GSD exists
ONS exists: Local port 6100, remote port 6200, EM port 2016
Warning:-n option has been deprecated and will be ignored.
Name: LISTENER
Network: 1, Owner: grid
Home: <CRS home>
/u01/app/11.2.0/grid on node(s) rac1,rac2
End points: TCP:1521
顯示 ASM 例項的配置
[[email protected] ~]$ srvctl config asm -n rac1
Warning:-n option has been deprecated and will be ignored.
ASM home: /u01/app/11.2.0/grid
ASM listener: LISTENER

叢集中所有正在執行的例項
SELECT
inst_id
, instance_number inst_no
, instance_name inst_name
, parallel
, status
, database_status db_status
, active_state state
, host_name host
FROM gv$instance
ORDER BY inst_id;

1 1 bol1 YES OPEN ACTIVE NORMAL rac1
2 2 bol2 YES OPEN ACTIVE NORMAL rac2

位於磁碟組中的所有資料檔案
select name from v$datafile
union
select member from v$logfile
union
select name from v$controlfile
union
select name from v$tempfile;

屬於“OCR”磁碟組的所有 ASM 磁碟
SELECT path
FROM v$asm_disk
WHERE group_number IN (select group_number
from v$asm_diskgroup
where name = 'OCR');

/dev/asm-diskc
/dev/asm-diskd
/dev/asm-diskb

12.2 建立表空間和使用者


[[email protected] ~]$ asmcmd
ASMCMD> ls
DATA/
FRA/
OCR/
ASMCMD> pwd
+DATA/BOL/DATAFILE

#on scan_ip
$ sqlplus "/as sysdba"
create tablespace test datafile '+DATA/BOL/DATAFILE/test01.dbf' size 50m ;
create tablespace SDE_TBS
logging
datafile '+DATA/BOL/DATAFILE/SDE_TBS.dbf'
size 500m
autoextend on
next 200m maxsize 20480m
extent management local;
create user SDE identified by sde default tablespace SDE_TBS;
grant connect,resource,dba to sde;

12.3 單節點關閉和啟動

SQL> show parameter name;

NAME TYPE VALUE
------------------------------------ ----------- ------------------------------
cell_offloadgroup_name string
db_file_name_convert string
db_name string bol
db_unique_name string bol
global_names boolean FALSE
instance_name string bol1
lock_name_space string
log_file_name_convert string
processor_group_name string
service_names string bol
SQL> select instance_name,status from gv$instance;

INSTANCE_NAME STATUS
---------------- ------------
bol1 OPEN
bol2 OPEN

[[email protected] ~]$ srvctl stop listener -n rac1 #關閉監聽
[[email protected] ~]$ srvctl status listener -n rac1 #檢視監聽狀態
Listener LISTENER is enabled on node(s): rac1
Listener LISTENER is not running on node(s): rac1
[[email protected] ~]$ ps -ef |grep -i local=no |wc -l
8
[[email protected] ~]$ srvctl stop instance -o immediate -d bol -i bol1 #關閉節點1資料庫
[[email protected] ~]$ srvctl status database -d bol #檢視資料庫狀態
Instance bol1 is not running on node rac1
Instance bol2 is running on node rac2
[[email protected] ~]$ srvctl status instance -d bol -i bol1 #檢視節點1資料庫狀態
Instance bol1 is not running on node rac1

[[email protected] ~]# cd /u01/app/11.2.0/grid/bin/
[[email protected] bin]# ./crsctl stop crs
[[email protected] bin]# ./crs_stat -t -v #檢視狀態
CRS-0184: Cannot communicate with the CRS daemon.
[[email protected] bin]# ./srvctl status asm -n rac1 #在rac1節點上執行
PRKH-1010 : Unable to communicate with CRS services.
PRKH-3003 : An attempt to communicate with the CSS daemon failed
[[email protected] bin]# ./srvctl status asm -n rac1 #在rac2節點上執行
ASM is not running on rac1

$ ps -ef |grep -i ora
$ ps -ef |grep -i asm
#登入scan ip節點,檢視連線的例項
select instance_name,status from gv$instance;

#開啟rac1
[[email protected] bin]# ./crs_stat -t
CRS-0184: Cannot communicate with the CRS daemon.
[[email protected] bin]# ./crsctl start crs
CRS-4123: Oracle High Availability Services has been started.
[[email protected] bin]# ./srvctl status asm #確保ASM服務已經執行在兩個節點上
ASM is running on rac2,rac1
[[email protected] bin]# ./crs_stat -t -v
Name Type R/RA F/FT Target State Host
----------------------------------------------------------------------
ora.DATA.dg ora....up.type 0/5 0/ ONLINE ONLINE rac1
ora.FRA.dg ora....up.type 0/5 0/ ONLINE ONLINE rac1
ora....ER.lsnr ora....er.type 0/5 0/ ONLINE ONLINE rac2
ora....N1.lsnr ora....er.type 0/5 0/0 ONLINE ONLINE rac2
ora.OCR.dg ora....up.type 0/5 0/ ONLINE ONLINE rac1
ora.asm ora.asm.type 0/5 0/ ONLINE ONLINE rac1
ora.bol.db ora....se.type 0/2 0/1 ONLINE ONLINE rac2
ora.cvu ora.cvu.type 0/5 0/0 ONLINE ONLINE rac2
ora.gsd ora.gsd.type 0/5 0/ OFFLINE OFFLINE
ora....network ora....rk.type 0/5 0/ ONLINE ONLINE rac1
ora.oc4j ora.oc4j.type 0/1 0/2 ONLINE ONLINE rac2
ora.ons ora.ons.type 0/3 0/ ONLINE ONLINE rac1
ora....SM1.asm application 0/5 0/0 ONLINE ONLINE rac1
ora....C1.lsnr application 0/5 0/0 OFFLINE OFFLINE
ora.rac1.gsd application 0/5 0/0 OFFLINE OFFLINE
ora.rac1.ons application 0/3 0/0 ONLINE ONLINE rac1
ora.rac1.vip ora....t1.type 0/0 0/0 ONLINE ONLINE rac1
ora....SM2.asm application 0/5 0/0 ONLINE ONLINE rac2
ora....C2.lsnr application 0/5 0/0 ONLINE ONLINE rac2
ora.rac2.gsd application 0/5 0/0 OFFLINE OFFLINE
ora.rac2.ons application 0/3 0/0 ONLINE ONLINE rac2
ora.rac2.vip ora....t1.type 0/0 0/0 ONLINE ONLINE rac2
ora.scan1.vip ora....ip.type 0/0 0/0 ONLINE ONLINE rac2
[[email protected] ~]$ srvctl start instance -d bol -i bol1 #開啟叢集節點一資料庫例項
[[email protected] ~]$ srvctl status database -d bol
Instance bol1 is running on node rac1
Instance bol2 is running on node rac2
[[email protected] ~]$ crs_stat -t
Name Type Target State Host
------------------------------------------------------------
ora.DATA.dg ora....up.type ONLINE ONLINE rac1
ora.FRA.dg ora....up.type ONLINE ONLINE rac1
ora....ER.lsnr ora....er.type ONLINE ONLINE rac1
ora....N1.lsnr ora....er.type ONLINE ONLINE rac2
ora.OCR.dg ora....up.type ONLINE ONLINE rac1
ora.asm ora.asm.type ONLINE ONLINE rac1
ora.bol.db ora....se.type ONLINE ONLINE rac1
ora.cvu ora.cvu.type ONLINE ONLINE rac2
ora.gsd ora.gsd.type OFFLINE OFFLINE
ora....network ora....rk.type ONLINE ONLINE rac1
ora.oc4j ora.oc4j.type ONLINE ONLINE rac2
ora.ons ora.ons.type ONLINE ONLINE rac1
ora....SM1.asm application ONLINE ONLINE rac1
ora....C1.lsnr application ONLINE ONLINE rac1
ora.rac1.gsd application OFFLINE OFFLINE
ora.rac1.ons application ONLINE ONLINE rac1
ora.rac1.vip ora....t1.type ONLINE ONLINE rac1
ora....SM2.asm application ONLINE ONLINE rac2
ora....C2.lsnr application ONLINE ONLINE rac2
ora.rac2.gsd application OFFLINE OFFLINE
ora.rac2.ons application ONLINE ONLINE rac2
ora.rac2.vip ora....t1.type ONLINE ONLINE rac2
ora.scan1.vip ora....ip.type ONLINE ONLINE rac2

select value from v$parameter where name='processes';

12.4 網路檢視

[[email protected] bin]# ifconfig
eth0 Link encap:Ethernet HWaddr 08:00:27:0E:DE:0E
inet addr:10.15.7.11 Bcast:10.15.7.255 Mask:255.255.252.0
inet6 addr: fe80::a00:27ff:fe0e:de0e/64 Scope:Link
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:3907156 errors:0 dropped:0 overruns:0 frame:0
TX packets:7161180 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:536184469 (511.3 MiB) TX bytes:9769872694 (9.0 GiB)

eth0:1 Link encap:Ethernet HWaddr 08:00:27:0E:DE:0E
inet addr:10.15.7.13 Bcast:10.15.7.255 Mask:255.255.252.0
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1

eth1 Link encap:Ethernet HWaddr 08:00:27:8A:F5:38
inet addr:1.1.1.1 Bcast:1.1.3.255 Mask:255.255.252.0
inet6 addr: fe80::a00:27ff:fe8a:f538/64 Scope:Link
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:3636608 errors:0 dropped:0 overruns:0 frame:0
TX packets:3017320 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:2524862742 (2.3 GiB) TX bytes:1669614220 (1.5 GiB)

eth1:1 Link encap:Ethernet HWaddr 08:00:27:8A:F5:38
inet addr:169.254.204.165 Bcast:169.254.255.255 Mask:255.255.0.0
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1

lo Link encap:Local Loopback
inet addr:127.0.0.1 Mask:255.0.0.0
inet6 addr: ::1/128 Scope:Host
UP LOOPBACK RUNNING MTU:16436 Metric:1
RX packets:2272848 errors:0 dropped:0 overruns:0 frame:0
TX packets:2272848 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:1524528859 (1.4 GiB) TX bytes:1524528859 (1.4 GiB)
[[email protected] bin]# ifconfig
eth0 Link encap:Ethernet HWaddr 08:00:27:33:6D:5A
inet addr:10.15.7.12 Bcast:10.15.7.255 Mask:255.255.252.0
inet6 addr: fe80::a00:27ff:fe33:6d5a/64 Scope:Link
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:8414792 errors:0 dropped:0 overruns:0 frame:0
TX packets:1577797 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:9466367813 (8.8 GiB) TX bytes:172203059 (164.2 MiB)

eth0:1 Link encap:Ethernet HWaddr 08:00:27:33:6D:5A
inet addr:10.15.7.14 Bcast:10.15.7.255 Mask:255.255.252.0
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1

eth0:3 Link encap:Ethernet HWaddr 08:00:27:33:6D:5A
inet addr:10.15.7.15 Bcast:10.15.7.255 Mask:255.255.252.0
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1

eth1 Link encap:Ethernet HWaddr 08:00:27:DC:E4:01
inet addr:1.1.1.2 Bcast:1.1.3.255 Mask:255.255.252.0
inet6 addr: fe80::a00:27ff:fedc:e401/64 Scope:Link
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:3017568 errors:0 dropped:0 overruns:0 frame:0
TX packets:3637072 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:1669901139 (1.5 GiB) TX bytes:2525032018 (2.3 GiB)

eth1:1 Link encap:Ethernet HWaddr 08:00:27:DC:E4:01
inet addr:169.254.202.95 Bcast:169.254.255.255 Mask:255.255.0.0
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1

lo Link encap:Local Loopback
inet addr:127.0.0.1 Mask:255.0.0.0
inet6 addr: ::1/128 Scope:Host
UP LOOPBACK RUNNING MTU:16436 Metric:1
RX packets:848199 errors:0 dropped:0 overruns:0 frame:0
TX packets:848199 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:472739764 (450.8 MiB) TX bytes:472739764 (450.8 MiB)