[BEGIN] 2016/10/5 10:32:31
[c:\~]$ ssh 192.168.1.155
Connecting to 192.168.1.155:22...
Connection established.
To escape to local shell, press 'Ctrl+Alt+]'.
Last login: Wed Oct 5 18:30:56 2016 from 192.168.1.1
[root@node1 ~]#
[root@node1 ~]#
[root@node1 ~]#
[root@node1 ~]#
[root@node1 ~]# cat /etc/hosts
127.0.0.1 localhost
192.168.1.155 node1
192.168.1.156 node2
192.168.1.157 node1-vip
192.168.1.158 node2-vip
192.168.1.159 node-scan
10.10.5.155 node1-priv
10.10.5.156 node2-priv
[root@node1 ~]# su - oragrid
[oragrid@node1 ~]$ crs_stat -t
Name Type Target State Host
------------------------------------------------------------
ora.ARCH.dg ora....up.type ONLINE ONLINE node1
ora.DATA.dg ora....up.type ONLINE ONLINE node1
ora....ER.lsnr ora....er.type ONLINE ONLINE node1
ora....N1.lsnr ora....er.type ONLINE ONLINE node2
ora.OCRVOTE.dg ora....up.type ONLINE ONLINE node1
ora.asm ora.asm.type ONLINE ONLINE node1
ora.cvu ora.cvu.type ONLINE ONLINE node1
ora.gsd ora.gsd.type OFFLINE OFFLINE
ora....network ora....rk.type ONLINE ONLINE node1
ora....SM1.asm application ONLINE ONLINE node1
ora....E1.lsnr application ONLINE ONLINE node1
ora.node1.gsd application OFFLINE OFFLINE
ora.node1.ons application ONLINE ONLINE node1
ora.node1.vip ora....t1.type ONLINE ONLINE node1
ora....SM2.asm application ONLINE ONLINE node2
ora....E2.lsnr application ONLINE ONLINE node2
ora.node2.gsd application OFFLINE OFFLINE
ora.node2.ons application ONLINE ONLINE node2
ora.node2.vip ora....t1.type ONLINE ONLINE node2
ora.oc4j ora.oc4j.type ONLINE ONLINE node1
ora.ons ora.ons.type ONLINE ONLINE node1
ora.orcc.db ora....se.type ONLINE ONLINE node1
ora....ry.acfs ora....fs.type ONLINE ONLINE node1
ora.scan1.vip ora....ip.type ONLINE ONLINE node2
[oragrid@node1 ~]$ crsctl check cluster -all
**************************************************************
node1:
CRS-4537: Cluster Ready Services is online
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
**************************************************************
node2:
CRS-4537: Cluster Ready Services is online
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
**************************************************************
[oragrid@node1 ~]$ multipath -ll
need to be root
[oragrid@node1 ~]$ exit
logout
[root@node1 ~]# multipath -ll
arch (14f504e46494c4500764e577843332d547243472d37417072) dm-4 OPNFILER,VIRTUAL-DISK
size=8.3G features='0' hwhandler='0' wp=rw
|-+- policy='round-robin 0' prio=1 status=active
| `- 37:0:0:0 sdg 8:96 active ready running
`-+- policy='round-robin 0' prio=1 status=enabled
`- 38:0:0:0 sdh 8:112 active ready running
vote_ocr (14f504e46494c45004932325a59732d757266762d4f486c59) dm-5 OPNFILER,VIRTUAL-DISK
size=5.6G features='0' hwhandler='0' wp=rw
|-+- policy='round-robin 0' prio=1 status=active
| `- 34:0:0:0 sdd 8:48 active ready running
`-+- policy='round-robin 0' prio=1 status=enabled
`- 33:0:0:0 sdc 8:32 active ready running
data (14f504e46494c45007469355367682d513879662d6d344836) dm-3 OPNFILER,VIRTUAL-DISK
size=11G features='0' hwhandler='0' wp=rw
|-+- policy='round-robin 0' prio=1 status=active
| `- 36:0:0:0 sdf 8:80 active ready running
`-+- policy='round-robin 0' prio=1 status=enabled
`- 35:0:0:0 sde 8:64 active ready running
[root@node1 ~]#
[root@node1 ~]#
[root@node1 ~]#
[root@node1 ~]# fdisk -l
Disk /dev/sda: 53.7 GB, 53687091200 bytes
255 heads, 63 sectors/track, 6527 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x0002d1a4
Device Boot Start End Blocks Id System
/dev/sda1 * 1 256 2048000 83 Linux
Partition 1 does not end on cylinder boundary.
/dev/sda2 256 1275 8192000 82 Linux swap / Solaris
/dev/sda3 1275 6528 42187776 8e Linux LVM
Disk /dev/sdb: 21.5 GB, 21474836480 bytes
255 heads, 63 sectors/track, 2610 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0xf289c154
Device Boot Start End Blocks Id System
/dev/sdb1 1 2610 20964793+ 8e Linux LVM
Disk /dev/mapper/vg01-LogVol00: 20.3 GB, 20329791488 bytes
255 heads, 63 sectors/track, 2471 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00000000
Disk /dev/mapper/vg01-Log_ora: 15.7 GB, 15728640000 bytes
255 heads, 63 sectors/track, 1912 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00000000
Disk /dev/mapper/vg01-LogVol02: 15.7 GB, 15728640000 bytes
255 heads, 63 sectors/track, 1912 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00000000
Disk /dev/sdc: 6006 MB, 6006243328 bytes
185 heads, 62 sectors/track, 1022 cylinders
Units = cylinders of 11470 * 512 = 5872640 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00000000
Disk /dev/sdd: 6006 MB, 6006243328 bytes
185 heads, 62 sectors/track, 1022 cylinders
Units = cylinders of 11470 * 512 = 5872640 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00000000
Disk /dev/sde: 11.8 GB, 11844714496 bytes
64 heads, 32 sectors/track, 11296 cylinders
Units = cylinders of 2048 * 512 = 1048576 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00000000
Disk /dev/sdf: 11.8 GB, 11844714496 bytes
64 heads, 32 sectors/track, 11296 cylinders
Units = cylinders of 2048 * 512 = 1048576 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00000000
Disk /dev/sdg: 8959 MB, 8959033344 bytes
64 heads, 32 sectors/track, 8544 cylinders
Units = cylinders of 2048 * 512 = 1048576 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00000000
Disk /dev/sdh: 8959 MB, 8959033344 bytes
64 heads, 32 sectors/track, 8544 cylinders
Units = cylinders of 2048 * 512 = 1048576 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00000000
Disk /dev/mapper/data: 11.8 GB, 11844714496 bytes
255 heads, 63 sectors/track, 1440 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00000000
Disk /dev/mapper/arch: 8959 MB, 8959033344 bytes
255 heads, 63 sectors/track, 1089 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00000000
Disk /dev/mapper/vote_ocr: 6006 MB, 6006243328 bytes
255 heads, 63 sectors/track, 730 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00000000
[root@node1 ~]# lsscsi
[2:0:0:0] disk VMware, VMware Virtual S 1.0 /dev/sda
[2:0:1:0] disk VMware, VMware Virtual S 1.0 /dev/sdb
[4:0:0:0] cd/dvd NECVMWar VMware SATA CD01 1.00 /dev/sr0
[33:0:0:0] disk OPNFILER VIRTUAL-DISK 0 /dev/sdc
[34:0:0:0] disk OPNFILER VIRTUAL-DISK 0 /dev/sdd
[35:0:0:0] disk OPNFILER VIRTUAL-DISK 0 /dev/sde
[36:0:0:0] disk OPNFILER VIRTUAL-DISK 0 /dev/sdf
[37:0:0:0] disk OPNFILER VIRTUAL-DISK 0 /dev/sdg
[38:0:0:0] disk OPNFILER VIRTUAL-DISK 0 /dev/sdh
[root@node1 ~]#
[root@node1 ~]#
[root@node1 ~]# su - oragrid
[oragrid@node1 ~]$ asmcmd
ASMCMD> lsdg
State Type Rebal Sector Block AU Total_MB Free_MB Req_mir_free_MB Usable_file_MB Offline_disks Voting_files Name
MOUNTED EXTERN N 512 4096 1048576 8544 8135 0 8135 0 N ARCH/
MOUNTED EXTERN N 512 4096 1048576 11296 8543 0 8543 0 N DATA/
MOUNTED EXTERN N 512 4096 1048576 5728 5332 0 5332 0 Y OCRVOTE/
ASMCMD> lsdsk
Path
/dev/mapper/arch
/dev/mapper/data
/dev/mapper/vote_ocr
ASMCMD>
ASMCMD>
ASMCMD>
ASMCMD>
ASMCMD> exit
[oragrid@node1 ~]$ more /etc/multipath.conf
defaults {
user_friendly_names yes
}
multipaths {
multipath {
wwid 14f504e46494c45004932325a59732d757266762d4f486c59
alias vote_ocr
}
multipath {
wwid 14f504e46494c45007469355367682d513879662d6d344836
alias data
}
multipath {
wwid 14f504e46494c4500764e577843332d547243472d37417072
alias arch
}
}
[oragrid@node1 ~]$
[oragrid@node1 ~]$
[oragrid@node1 ~]$
[oragrid@node1 ~]$
[oragrid@node1 ~]$ cd $ORACLE_HOME/dbs
[oragrid@node1 dbs]$ ls
ab_+ASM1.dat hc_+ASM1.dat init.ora orapw+ASM
[oragrid@node1 dbs]$ ls -l
?????16
-rw-rw---- 1 oragrid oinstall 1053 10?? 5 18:27 ab_+ASM1.dat
-rw-rw---- 1 oragrid oinstall 1544 10?? 5 18:27 hc_+ASM1.dat
-rw-r--r-- 1 oragrid oinstall 2851 5?? 15 2009 init.ora
-rw-r----- 1 oragrid oinstall 1536 7?? 17 00:17 orapw+ASM
[oragrid@node1 dbs]$
[oragrid@node1 dbs]$
[oragrid@node1 dbs]$
[oragrid@node1 dbs]$
[oragrid@node1 dbs]$
[oragrid@node1 dbs]$
[oragrid@node1 dbs]$ ls -l
?????16
-rw-rw---- 1 oragrid oinstall 1053 10?? 5 18:27 ab_+ASM1.dat
-rw-rw---- 1 oragrid oinstall 1544 10?? 5 18:27 hc_+ASM1.dat
-rw-r--r-- 1 oragrid oinstall 2851 5?? 15 2009 init.ora
-rw-r----- 1 oragrid oinstall 1536 7?? 17 00:17 orapw+ASM
[oragrid@node1 dbs]$ ls -l
??? 16
-rw-rw---- 1 oragrid oinstall 1053 10? 5 18:27 ab_+ASM1.dat
-rw-rw---- 1 oragrid oinstall 1544 10? 5 18:27 hc_+ASM1.dat
-rw-r--r-- 1 oragrid oinstall 2851 5? 15 2009 init.ora
-rw-r----- 1 oragrid oinstall 1536 7? 17 00:17 orapw+ASM
[oragrid@node1 dbs]$
[oragrid@node1 dbs]$
[oragrid@node1 dbs]$
[oragrid@node1 dbs]$ cat /etc/multipath.conf
defaults {
user_friendly_names yes
}
multipaths {
multipath {
wwid 14f504e46494c45004932325a59732d757266762d4f486c59
alias vote_ocr
}
multipath {
wwid 14f504e46494c45007469355367682d513879662d6d344836
alias data
}
multipath {
wwid 14f504e46494c4500764e577843332d547243472d37417072
alias arch
}
}
[oragrid@node1 dbs]$ cat /etc/udev/rules.d/99-oracle-asmdevices.rules
KERNEL=="dm-*",ENV{DM_UUID}=="mpath-14f504e46494c45004932325a59732d757266762d4f486c59",OWNER="oragrid",GROUP="asmadmin",MODE="0660"
KERNEL=="dm-*",ENV{DM_UUID}=="mpath-14f504e46494c45007469355367682d513879662d6d344836",OWNER="oragrid",GROUP="asmadmin",MODE="0660"
KERNEL=="dm-*",ENV{DM_UUID}=="mpath-14f504e46494c4500764e577843332d547243472d37417072",OWNER="oragrid",GROUP="asmadmin",MODE="0660"
[oragrid@node1 dbs]$
[oragrid@node1 dbs]$
[oragrid@node1 dbs]$
[oragrid@node1 dbs]$
[oragrid@node1 dbs]$
[oragrid@node1 dbs]$ chkconfig multipathd off
?????????????
[oragrid@node1 dbs]$ exit
logout
[root@node1 ~]# chkconfig multipathd off
[root@node1 ~]# ssh node2
root@node2's password:
Last login: Sat Jul 16 23:47:59 2016 from 192.168.1.1
[root@node2 ~]# chkconfig multipathd off
[root@node2 ~]# service multipathd stop
????multipathd ??????: [??]
[root@node2 ~]# exit
logout
Connection to node2 closed.
[root@node1 ~]# service multipathd stop
????multipathd ??????: [??]
[root@node1 ~]#
[root@node1 ~]#
[root@node1 ~]#
[root@node1 ~]#
[root@node1 ~]# cd /etc/udev/rules.d/
[root@node1 rules.d]# mv 99-oracle-asmdevices.rules 99-oracle-asmdevices.rules.bak
[root@node1 rules.d]#
[root@node1 rules.d]#
[root@node1 rules.d]#
[root@node1 rules.d]# ssh node2
root@node2's password:
Last login: Wed Oct 5 18:36:50 2016 from node1
[root@node2 ~]# cd /etc/udev/rules.d/
[root@node2 rules.d]# mv 99-oracle-asmdevices.rules 99-oracle-asmdevices.rules.bak
[root@node2 rules.d]# exit
logout
Connection to node2 closed.
[root@node1 rules.d]#
[root@node1 rules.d]#
[root@node1 rules.d]# pwd
/etc/udev/rules.d
[root@node1 rules.d]# vi 99-oracle-asmdevices.rules
[root@node1 rules.d]# fdisk -l | grep sd
Disk /dev/sda: 53.7 GB, 53687091200 bytes
/dev/sda1 * 1 256 2048000 83 Linux
/dev/sda2 256 1275 8192000 82 Linux swap / Solaris
/dev/sda3 1275 6528 42187776 8e Linux LVM
Disk /dev/sdb: 21.5 GB, 21474836480 bytes
/dev/sdb1 1 2610 20964793+ 8e Linux LVM
Disk /dev/sdc: 6006 MB, 6006243328 bytes
Disk /dev/sdd: 6006 MB, 6006243328 bytes
Disk /dev/sde: 11.8 GB, 11844714496 bytes
Disk /dev/sdf: 11.8 GB, 11844714496 bytes
Disk /dev/sdg: 8959 MB, 8959033344 bytes
Disk /dev/sdh: 8959 MB, 8959033344 bytes
[root@node1 rules.d]# for i in c d e f g h ;
> do
> echo "KERNEL==\"sd*\", BUS==\"scsi\", PROGRAM==\"/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/\$name\", RESULT==\"`/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/sd$i`\", NAME=\"asm-disk$i\", OWNER=\"grid\", GROUP=\"asmadmin\", MODE=\"0660\""
> done
KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="14f504e46494c45004932325a59732d757266762d4f486c59", NAME="asm-diskc", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="14f504e46494c45004932325a59732d757266762d4f486c59", NAME="asm-diskd", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="14f504e46494c45007469355367682d513879662d6d344836", NAME="asm-diske", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="14f504e46494c45007469355367682d513879662d6d344836", NAME="asm-diskf", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="14f504e46494c4500764e577843332d547243472d37417072", NAME="asm-diskg", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="14f504e46494c4500764e577843332d547243472d37417072", NAME="asm-diskh", OWNER="grid", GROUP="asmadmin", MODE="0660"
[root@node1 rules.d]#
[root@node1 rules.d]#
[root@node1 rules.d]# fdisk -l | grep sd
[root@node1 rules.d]# vi 99-oracle-asmdevices.rules
[root@node1 rules.d]# cat 99-oracle-asmdevices.rules
KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="14f504e46494c45004932325a59732d757266762d4f486c59", NAME="asm-diskc", OWNER="oragrid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="14f504e46494c45007469355367682d513879662d6d344836", NAME="asm-diske", OWNER="oragrid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="14f504e46494c4500764e577843332d547243472d37417072", NAME="asm-diskg", OWNER="oragrid", GROUP="asmadmin", MODE="0660"
[root@node1 rules.d]#
[root@node1 rules.d]#
[root@node1 rules.d]#
[root@node1 rules.d]#
[root@node1 rules.d]# start_udev
???? udev: [??]
[root@node1 rules.d]# ls -l /dev/asm-*
brw-rw---- 1 oragrid asmadmin 8, 48 10? 5 18:44 /dev/asm-diskc
brw-rw---- 1 oragrid asmadmin 8, 64 10? 5 18:44 /dev/asm-diske
brw-rw---- 1 oragrid asmadmin 8, 112 10? 5 18:44 /dev/asm-diskg
[root@node1 rules.d]# scp 99-oracle-asmdevices.rules node2:/etc/udev/rules.d/
root@node2's password:
99-oracle-asmdevices.rules 100% 705 0.7KB/s 00:00
[root@node1 rules.d]# ssh node2
root@node2's password:
Last login: Wed Oct 5 18:38:07 2016 from node1
[root@node2 ~]# start_udev
???? udev: [??]
[root@node2 ~]# ls -l /dev/asm-*
brw-rw---- 1 oragrid asmadmin 8, 48 10? 5 18:44 /dev/asm-diskc
brw-rw---- 1 oragrid asmadmin 8, 80 10? 5 18:44 /dev/asm-diske
brw-rw---- 1 oragrid asmadmin 8, 112 10? 5 18:44 /dev/asm-diskg
[root@node2 ~]#
[root@node2 ~]#
[root@node2 ~]#
[root@node2 ~]#
[root@node2 ~]#
[root@node2 ~]# su - oragrid
[oragrid@node2 ~]$ cd $ORACLE_HOME/log
[oragrid@node2 log]$ ls
crs diag node2
[oragrid@node2 log]$ cd node2
[oragrid@node2 node2]$ ls
acfs acfslog acfsrepl acfsreplroot acfssec admin agent alertnode2.log client crflogd crfmond crsd cssd ctssd cvu diskmon evmd gipcd gnsd gpnpd mdnsd ohasd racg srvm
[oragrid@node2 node2]$ tail -300 alertnode2.log
2016-07-17 00:11:12.818:
[client(6826)]CRS-2101:The OLR was formatted using version 3.
2016-07-17 00:11:45.527:
[ohasd(7046)]CRS-2112:The OLR service started on node node2.
2016-07-17 00:11:45.536:
[ohasd(7046)]CRS-1301:Oracle High Availability Service started on node node2.
[client(7245)]CRS-10001:17-Jul-16 00:11 ACFS-9200: Supported
[client(17547)]CRS-10001:17-Jul-16 00:12 ACFS-9300: ADVM/ACFS distribution files found.
[client(17553)]CRS-10001:17-Jul-16 00:12 ACFS-9307: Installing requested ADVM/ACFS software.
[client(17586)]CRS-10001:17-Jul-16 00:12 ACFS-9308: Loading installed ADVM/ACFS drivers.
[client(17588)]CRS-10001:17-Jul-16 00:12 ACFS-9321: Creating udev for ADVM/ACFS.
[client(17590)]CRS-10001:17-Jul-16 00:12 ACFS-9323: Creating module dependencies - this may take some time.
[client(27760)]CRS-10001:17-Jul-16 00:13 ACFS-9154: Loading 'oracleoks.ko' driver.
[client(27771)]CRS-10001:17-Jul-16 00:13 ACFS-9154: Loading 'oracleadvm.ko' driver.
[client(27797)]CRS-10001:17-Jul-16 00:13 ACFS-9154: Loading 'oracleacfs.ko' driver.
[client(27886)]CRS-10001:17-Jul-16 00:13 ACFS-9327: Verifying ADVM/ACFS devices.
[client(27894)]CRS-10001:17-Jul-16 00:13 ACFS-9156: Detecting control device '/dev/asm/.asm_ctl_spec'.
[client(27898)]CRS-10001:17-Jul-16 00:13 ACFS-9156: Detecting control device '/dev/ofsctl'.
[client(27903)]CRS-10001:17-Jul-16 00:13 ACFS-9309: ADVM/ACFS installation correctness verified.
2016-07-17 00:13:24.628:
[gpnpd(28019)]CRS-2328:GPNPD started on node node2.
2016-07-17 00:13:26.868:
[cssd(28076)]CRS-1713:CSSD daemon is started in exclusive mode
2016-07-17 00:13:28.745:
[ohasd(7046)]CRS-2767:Resource state recovery not attempted for 'ora.diskmon' as its target state is OFFLINE
2016-07-17 00:13:28.746:
[ohasd(7046)]CRS-2769:Unable to failover resource 'ora.diskmon'.
2016-07-17 00:14:03.533:
[cssd(28076)]CRS-1707ease acquisition for node node2 number 2 completed
[cssd(28076)]CRS-1636:The CSS daemon was started in exclusive mode but found an active CSS daemon on node node1 and is terminating; details at (:CSSNM00006 in /grid/grid_home/log/node2/cssd/ocssd.log
2016-07-17 00:14:03.674:
[ohasd(7046)]CRS-2765:Resource 'ora.cssdmonitor' has failed on server 'node2'.
2016-07-17 00:14:05.403:
[gpnpd(28019)]CRS-2329:GPNPD on node node2 shutdown.
2016-07-17 00:14:06.763:
[mdnsd(28008)]CRS-5602:mDNS service stopping by request.
2016-07-17 00:14:22.904:
[gpnpd(28617)]CRS-2328:GPNPD started on node node2.
2016-07-17 00:14:25.512:
[cssd(28680)]CRS-1713:CSSD daemon is started in clustered mode
2016-07-17 00:14:27.378:
[ohasd(7046)]CRS-2767:Resource state recovery not attempted for 'ora.diskmon' as its target state is OFFLINE
2016-07-17 00:14:27.379:
[ohasd(7046)]CRS-2769:Unable to failover resource 'ora.diskmon'.
2016-07-17 00:14:37.096:
[cssd(28680)]CRS-1707ease acquisition for node node2 number 2 completed
2016-07-17 00:14:38.356:
[cssd(28680)]CRS-1605:CSSD voting file is online: /dev/mapper/vote_ocr; details in /grid/grid_home/log/node2/cssd/ocssd.log.
2016-07-17 00:14:40.537:
[cssd(28680)]CRS-1601:CSSD Reconfiguration complete. Active nodes are node1 node2 .
2016-07-17 00:14:42.952:
[ctssd(28871)]CRS-2407:The new Cluster Time Synchronization Service reference node is host node1.
2016-07-17 00:14:42.951:
[ctssd(28871)]CRS-2401:The Cluster Time Synchronization Service started on host node2.
2016-07-17 00:14:48.211:
[ctssd(28871)]CRS-2408:The clock on host node2 has been updated by the Cluster Time Synchronization Service to be synchronous with the mean cluster time.
[client(28938)]CRS-10001:17-Jul-16 00:14 ACFS-9391: Checking for existing ADVM/ACFS installation.
[client(28943)]CRS-10001:17-Jul-16 00:14 ACFS-9392: Validating ADVM/ACFS installation files for operating system.
[client(28945)]CRS-10001:17-Jul-16 00:14 ACFS-9393: Verifying ASM Administrator setup.
[client(28948)]CRS-10001:17-Jul-16 00:14 ACFS-9308: Loading installed ADVM/ACFS drivers.
[client(28951)]CRS-10001:17-Jul-16 00:14 ACFS-9327: Verifying ADVM/ACFS devices.
[client(28953)]CRS-10001:17-Jul-16 00:14 ACFS-9156: Detecting control device '/dev/asm/.asm_ctl_spec'.
[client(28957)]CRS-10001:17-Jul-16 00:14 ACFS-9156: Detecting control device '/dev/ofsctl'.
[client(28962)]CRS-10001:17-Jul-16 00:14 ACFS-9322: completed
2016-07-17 00:15:00.705:
[/grid/grid_home/bin/orarootagent.bin(28860)]CRS-5018:CLSN00037 Removed unused HAIP route: 169.254.0.0 / 255.255.0.0 / 0.0.0.0 / bond0
2016-07-17 00:15:15.805:
[crsd(29151)]CRS-1012:The OCR service started on node node2.
2016-07-17 00:15:15.815:
[evmd(29167)]CRS-1401:EVMD started on node node2.
2016-07-17 00:15:17.201:
[crsd(29151)]CRS-1201:CRSD started on node node2.
2016-07-17 00:58:43.472:
[ctssd(28871)]CRS-2408:The clock on host node2 has been updated by the Cluster Time Synchronization Service to be synchronous with the mean cluster time.
2016-07-17 01:34:43.983:
[ctssd(28871)]CRS-2408:The clock on host node2 has been updated by the Cluster Time Synchronization Service to be synchronous with the mean cluster time.
2016-07-17 01:35:53.526:
[/grid/grid_home/bin/orarootagent.bin(29291)]CRS-5822:Agent '/grid/grid_home/bin/orarootagent_root' disconnected from server. Details at (:CRSAGF00117 {0:3:125} in /grid/grid_home/log/node2/agent/crsd/orarootagent_root/orarootagent_root.log.
2016-07-17 01:35:54.444:
[ctssd(28871)]CRS-2405:The Cluster Time Synchronization Service on host node2 is shutdown by user
2016-07-17 01:36:04.751:
[cssd(28680)]CRS-1603:CSSD on node node2 shutdown by user.
2016-07-17 01:36:04.857:
[ohasd(7046)]CRS-2767:Resource state recovery not attempted for 'ora.cssdmonitor' as its target state is OFFLINE
2016-07-17 01:36:04.857:
[ohasd(7046)]CRS-2769:Unable to failover resource 'ora.cssdmonitor'.
2016-07-17 01:36:04.954:
[cssd(28680)]CRS-1660:The CSS daemon shutdown has completed
2016-07-17 01:36:06.154:
[ohasd(7046)]CRS-2767:Resource state recovery not attempted for 'ora.cssd' as its target state is OFFLINE
2016-07-17 01:36:06.154:
[ohasd(7046)]CRS-2769:Unable to failover resource 'ora.cssd'.
2016-07-17 01:37:08.545:
[mdnsd(28604)]CRS-5602:mDNS service stopping by request.
2016-07-17 01:37:11.266:
[gpnpd(28617)]CRS-2329:GPNPD on node node2 shutdown.
2016-10-05 18:26:23.110:
[ohasd(2661)]CRS-2112:The OLR service started on node node2.
2016-10-05 18:26:23.147:
[ohasd(2661)]CRS-1301:Oracle High Availability Service started on node node2.
2016-10-05 18:26:23.170:
[ohasd(2661)]CRS-8017:location: /etc/oracle/lastgasp has 2 reboot advisory log files, 0 were announced and 0 errors occurred
2016-10-05 18:26:27.485:
[/grid/grid_home/bin/orarootagent.bin(2900)]CRS-2302:Cannot get GPnP profile. Error CLSGPNP_NO_DAEMON (GPNPD daemon is not running).
2016-10-05 18:26:31.893:
[gpnpd(2994)]CRS-2328:GPNPD started on node node2.
2016-10-05 18:26:35.350:
[cssd(3061)]CRS-1713:CSSD daemon is started in clustered mode
2016-10-05 18:26:36.967:
[ohasd(2661)]CRS-2767:Resource state recovery not attempted for 'ora.diskmon' as its target state is OFFLINE
2016-10-05 18:26:36.968:
[ohasd(2661)]CRS-2769:Unable to failover resource 'ora.diskmon'.
2016-10-05 18:26:45.353:
[cssd(3061)]CRS-1707ease acquisition for node node2 number 2 completed
2016-10-05 18:26:46.618:
[cssd(3061)]CRS-1605:CSSD voting file is online: /dev/mapper/vote_ocr; details in /grid/grid_home/log/node2/cssd/ocssd.log.
2016-10-05 18:26:56.042:
[cssd(3061)]CRS-1601:CSSD Reconfiguration complete. Active nodes are node1 node2 .
2016-10-05 18:26:58.581:
[ctssd(3263)]CRS-2401:The Cluster Time Synchronization Service started on host node2.
2016-10-05 18:26:58.581:
[ctssd(3263)]CRS-2407:The new Cluster Time Synchronization Service reference node is host node1.
2016-10-05 18:27:03.284:
[ctssd(3263)]CRS-2408:The clock on host node2 has been updated by the Cluster Time Synchronization Service to be synchronous with the mean cluster time.
[client(3301)]CRS-10001:05-Oct-16 18:27 ACFS-9391: Checking for existing ADVM/ACFS installation.
[client(3306)]CRS-10001:05-Oct-16 18:27 ACFS-9392: Validating ADVM/ACFS installation files for operating system.
[client(3308)]CRS-10001:05-Oct-16 18:27 ACFS-9393: Verifying ASM Administrator setup.
[client(3311)]CRS-10001:05-Oct-16 18:27 ACFS-9308: Loading installed ADVM/ACFS drivers.
[client(3314)]CRS-10001:05-Oct-16 18:27 ACFS-9154: Loading 'oracleoks.ko' driver.
[client(3324)]CRS-10001:05-Oct-16 18:27 ACFS-9154: Loading 'oracleadvm.ko' driver.
[client(3361)]CRS-10001:05-Oct-16 18:27 ACFS-9154: Loading 'oracleacfs.ko' driver.
[client(3437)]CRS-10001:05-Oct-16 18:27 ACFS-9327: Verifying ADVM/ACFS devices.
[client(3445)]CRS-10001:05-Oct-16 18:27 ACFS-9156: Detecting control device '/dev/asm/.asm_ctl_spec'.
[client(3449)]CRS-10001:05-Oct-16 18:27 ACFS-9156: Detecting control device '/dev/ofsctl'.
[client(3454)]CRS-10001:05-Oct-16 18:27 ACFS-9322: completed
2016-10-05 18:27:36.366:
[crsd(3644)]CRS-1012:The OCR service started on node node2.
2016-10-05 18:27:36.428:
[evmd(3284)]CRS-1401:EVMD started on node node2.
2016-10-05 18:27:38.397:
[crsd(3644)]CRS-1201:CRSD started on node node2.
[oragrid@node2 node2]$ exit
logout
[root@node2 ~]#
[root@node2 ~]#
[root@node2 ~]# exit
logout
Connection to node2 closed.
[root@node1 rules.d]# su - oragrid
[oragrid@node1 ~]$ cd $ORACLE_HOME/log
[oragrid@node1 log]$ ls
crs diag node1
[oragrid@node1 log]$ cd node1
[oragrid@node1 node1]$ ls
acfs acfslog acfsrepl acfsreplroot acfssec admin agent alertnode1.log client crflogd crfmond crsd cssd ctssd cvu diskmon evmd gipcd gnsd gpnpd mdnsd ohasd racg srvm
[oragrid@node1 node1]$ tail -f alertnode1.log
2016-10-05 18:27:44.520:
[crsd(3698)]CRS-2772:Server 'node1' has been assigned to pool 'Generic'.
2016-10-05 18:27:44.531:
[crsd(3698)]CRS-2772:Server 'node1' has been assigned to pool 'ora.orcc'.
2016-10-05 18:27:45.125:
[client(4012)]CRS-4743:File /grid/grid_home/oc4j/j2ee/home/OC4J_DBWLM_config/system-jazn-data.xml was updated from OCR(Size: 13365(New), 13378(Old) bytes)
2016-10-05 18:28:01.879:
[crsd(3698)]CRS-2772:Server 'node2' has been assigned to pool 'Generic'.
2016-10-05 18:28:01.880:
[crsd(3698)]CRS-2772:Server 'node2' has been assigned to pool 'ora.orcc'.
^C
[oragrid@node1 node1]$ tail -200f alertnode1.log
[gpnpd(31061)]CRS-2328:GPNPD started on node node1.
2016-07-17 00:05:07.025:
[cssd(31119)]CRS-1713:CSSD daemon is started in exclusive mode
2016-07-17 00:05:08.917:
[ohasd(10088)]CRS-2767:Resource state recovery not attempted for 'ora.diskmon' as its target state is OFFLINE
2016-07-17 00:05:08.917:
[ohasd(10088)]CRS-2769:Unable to failover resource 'ora.diskmon'.
2016-07-17 00:05:13.559:
[cssd(31119)]CRS-1709ease acquisition failed for node node1 because no voting file has been configured; Details at (:CSSNM00031 in /grid/grid_home/log/node1/cssd/ocssd.log
2016-07-17 00:05:22.325:
[cssd(31119)]CRS-1601:CSSD Reconfiguration complete. Active nodes are node1 .
2016-07-17 00:05:24.846:
[ctssd(31170)]CRS-2401:The Cluster Time Synchronization Service started on host node1.
2016-07-17 00:05:24.846:
[ctssd(31170)]CRS-2407:The new Cluster Time Synchronization Service reference node is host node1.
[client(31231)]CRS-10001:17-Jul-16 00:05 ACFS-9203: true
[client(31381)]CRS-10001:17-Jul-16 00:05 ACFS-9391: Checking for existing ADVM/ACFS installation.
[client(31386)]CRS-10001:17-Jul-16 00:05 ACFS-9392: Validating ADVM/ACFS installation files for operating system.
[client(31388)]CRS-10001:17-Jul-16 00:05 ACFS-9393: Verifying ASM Administrator setup.
[client(31391)]CRS-10001:17-Jul-16 00:05 ACFS-9308: Loading installed ADVM/ACFS drivers.
[client(31400)]CRS-10001:17-Jul-16 00:05 ACFS-9327: Verifying ADVM/ACFS devices.
[client(31404)]CRS-10001:17-Jul-16 00:05 ACFS-9156: Detecting control device '/dev/asm/.asm_ctl_spec'.
[client(31408)]CRS-10001:17-Jul-16 00:05 ACFS-9156: Detecting control device '/dev/ofsctl'.
[client(31417)]CRS-10001:17-Jul-16 00:05 ACFS-9322: completed
2016-07-17 00:05:46.497:
[/grid/grid_home/bin/orarootagent.bin(31159)]CRS-5018:CLSN00037 Removed unused HAIP route: 169.254.0.0 / 255.255.0.0 / 0.0.0.0 / bond0
2016-07-17 00:05:50.451:
[client(31700)]CRS-1001:The OCR was formatted using version 3.
2016-07-17 00:05:52.046:
[crsd(31813)]CRS-1012:The OCR service started on node node1.
2016-07-17 00:05:53.777:
[cssd(31119)]CRS-1605:CSSD voting file is online: /dev/mapper/vote_ocr; details in /grid/grid_home/log/node1/cssd/ocssd.log.
2016-07-17 00:05:53.777:
[cssd(31119)]CRS-1626:A Configuration change request completed successfully
2016-07-17 00:05:53.782:
[cssd(31119)]CRS-1601:CSSD Reconfiguration complete. Active nodes are node1 .
2016-07-17 00:06:06.810:
[ctssd(31170)]CRS-2405:The Cluster Time Synchronization Service on host node1 is shutdown by user
2016-07-17 00:06:18.913:
[cssd(31119)]CRS-1603:CSSD on node node1 shutdown by user.
2016-07-17 00:06:19.119:
[cssd(31119)]CRS-1660:The CSS daemon shutdown has completed
2016-07-17 00:06:19.394:
[ohasd(10088)]CRS-2767:Resource state recovery not attempted for 'ora.cssdmonitor' as its target state is OFFLINE
2016-07-17 00:06:19.394:
[ohasd(10088)]CRS-2769:Unable to failover resource 'ora.cssdmonitor'.
2016-07-17 00:06:20.317:
[ohasd(10088)]CRS-2767:Resource state recovery not attempted for 'ora.cssd' as its target state is OFFLINE
2016-07-17 00:06:20.317:
[ohasd(10088)]CRS-2769:Unable to failover resource 'ora.cssd'.
2016-07-17 00:06:21.723:
[gpnpd(31061)]CRS-2329:GPNPD on node node1 shutdown.
2016-07-17 00:06:23.220:
[mdnsd(31050)]CRS-5602:mDNS service stopping by request.
2016-07-17 00:06:31.290:
[gpnpd(32363)]CRS-2328:GPNPD started on node node1.
2016-07-17 00:06:33.901:
[cssd(32415)]CRS-1713:CSSD daemon is started in clustered mode
2016-07-17 00:06:35.760:
[ohasd(10088)]CRS-2767:Resource state recovery not attempted for 'ora.diskmon' as its target state is OFFLINE
2016-07-17 00:06:35.760:
[ohasd(10088)]CRS-2769:Unable to failover resource 'ora.diskmon'.
2016-07-17 00:07:06.673:
[cssd(32415)]CRS-1707ease acquisition for node node1 number 1 completed
2016-07-17 00:07:07.934:
[cssd(32415)]CRS-1605:CSSD voting file is online: /dev/mapper/vote_ocr; details in /grid/grid_home/log/node1/cssd/ocssd.log.
2016-07-17 00:07:17.060:
[cssd(32415)]CRS-1601:CSSD Reconfiguration complete. Active nodes are node1 .
2016-07-17 00:07:19.336:
[ctssd(32884)]CRS-2407:The new Cluster Time Synchronization Service reference node is host node1.
2016-07-17 00:07:19.335:
[ctssd(32884)]CRS-2401:The Cluster Time Synchronization Service started on host node1.
2016-07-17 00:07:41.530:
[crsd(33133)]CRS-1012:The OCR service started on node node1.
2016-07-17 00:07:42.028:
[evmd(33149)]CRS-1401:EVMD started on node node1.
2016-07-17 00:07:43.106:
[crsd(33133)]CRS-1201:CRSD started on node node1.
2016-07-17 00:07:43.454:
[crsd(33133)]CRS-2772:Server 'node1' has been assigned to pool 'Free'.
[client(33566)]CRS-10001:17-Jul-16 00:08 ACFS-9200: Supported
2016-07-17 00:08:26.456:
[client(33737)]CRS-4742:OCR updated with contents of /grid/grid_home/oc4j/j2ee/home/OC4J_DBWLM_config/system-jazn-data.xml (New = 13378, Old = 0 bytes)
2016-07-17 00:08:26.545:
[client(33745)]CRS-4743:File /grid/grid_home/oc4j/j2ee/home/OC4J_DBWLM_config/system-jazn-data.xml was updated from OCR(Size: 13377(New), 13378(Old) bytes)
2016-07-17 00:08:26.584:
[client(33749)]CRS-4743:File /grid/grid_home/oc4j/j2ee/home/OC4J_DBWLM_config/system-jazn-data.xml was updated from OCR(Size: 13377(New), 13378(Old) bytes)
2016-07-17 00:08:26.652:
[client(33753)]CRS-4742:OCR updated with contents of /grid/grid_home/oc4j/j2ee/home/OC4J_DBWLM_config/system-jazn-data.xml (New = 13378, Old = 13378 bytes)
2016-07-17 00:08:37.884:
[client(34067)]CRS-4743:File /grid/grid_home/oc4j/j2ee/home/OC4J_DBWLM_config/system-jazn-data.xml was updated from OCR(Size: 13377(New), 13378(Old) bytes)
[client(34198)]CRS-10001:17-Jul-16 00:08 ACFS-9200: Supported
2016-07-17 00:14:40.763:
[cssd(32415)]CRS-1601:CSSD Reconfiguration complete. Active nodes are node1 node2 .
2016-07-17 00:15:20.258:
[crsd(33133)]CRS-2772:Server 'node2' has been assigned to pool 'Free'.
[client(35283)]CRS-10001:17-Jul-16 00:17 ACFS-9203: true
[client(35670)]CRS-10001:17-Jul-16 00:17 ACFS-9200: Supported
[client(37160)]CRS-10001:17-Jul-16 00:17 ACFS-9200: Supported
[client(46431)]CRS-10001:17-Jul-16 00:46 ACFS-9203: true
[client(46778)]CRS-10001:17-Jul-16 00:46 ACFS-9203: true
2016-07-17 00:54:02.914:
[crsd(33133)]CRS-2773:Server 'node2' has been removed from pool 'Free'.
2016-07-17 00:54:02.914:
[crsd(33133)]CRS-2772:Server 'node2' has been assigned to pool 'Generic'.
2016-07-17 00:54:02.915:
[crsd(33133)]CRS-2773:Server 'node1' has been removed from pool 'Free'.
2016-07-17 00:54:02.915:
[crsd(33133)]CRS-2772:Server 'node1' has been assigned to pool 'Generic'.
2016-07-17 00:54:02.915:
[crsd(33133)]CRS-2772:Server 'node1' has been assigned to pool 'ora.orcc'.
2016-07-17 00:54:02.916:
[crsd(33133)]CRS-2772:Server 'node2' has been assigned to pool 'ora.orcc'.
2016-07-17 01:35:55.522:
[ctssd(32884)]CRS-2405:The Cluster Time Synchronization Service on host node1 is shutdown by user
2016-07-17 01:35:55.531:
[/grid/grid_home/bin/orarootagent.bin(33837)]CRS-5822:Agent '/grid/grid_home/bin/orarootagent_root' disconnected from server. Details at (:CRSAGF00117 {0:3:32} in /grid/grid_home/log/node1/agent/crsd/orarootagent_root/orarootagent_root.log.
2016-07-17 01:36:04.852:
[cssd(32415)]CRS-1625:Node node2, number 2, was manually shut down
2016-07-17 01:36:04.854:
[cssd(32415)]CRS-1601:CSSD Reconfiguration complete. Active nodes are node1 .
2016-07-17 01:36:06.742:
[cssd(32415)]CRS-1603:CSSD on node node1 shutdown by user.
2016-07-17 01:36:06.847:
[ohasd(10088)]CRS-2767:Resource state recovery not attempted for 'ora.cssdmonitor' as its target state is OFFLINE
2016-07-17 01:36:06.847:
[ohasd(10088)]CRS-2769:Unable to failover resource 'ora.cssdmonitor'.
2016-07-17 01:36:06.945:
[cssd(32415)]CRS-1660:The CSS daemon shutdown has completed
2016-07-17 01:36:08.135:
[ohasd(10088)]CRS-2767:Resource state recovery not attempted for 'ora.cssd' as its target state is OFFLINE
2016-07-17 01:36:08.135:
[ohasd(10088)]CRS-2769:Unable to failover resource 'ora.cssd'.
2016-07-17 01:36:10.675:
[client(61045)]CRS-1013:The OCR location in an ASM disk group is inaccessible. Details in /grid/grid_home/log/node1/client/crsctl_orardbms.log.
2016-07-17 01:36:20.399:
[client(61176)]CRS-1013:The OCR location in an ASM disk group is inaccessible. Details in /grid/grid_home/log/node1/client/crsctl_orardbms.log.
2016-07-17 01:36:30.495:
[client(61256)]CRS-1013:The OCR location in an ASM disk group is inaccessible. Details in /grid/grid_home/log/node1/client/crsctl_orardbms.log.
2016-07-17 01:36:40.782:
[client(61354)]CRS-1013:The OCR location in an ASM disk group is inaccessible. Details in /grid/grid_home/log/node1/client/crsctl_orardbms.log.
2016-07-17 01:36:50.208:
[client(61375)]CRS-1013:The OCR location in an ASM disk group is inaccessible. Details in /grid/grid_home/log/node1/client/crsctl_orardbms.log.
2016-07-17 01:37:03.264:
[mdnsd(32350)]CRS-5602:mDNS service stopping by request.
2016-07-17 01:37:06.085:
[gpnpd(32363)]CRS-2329:GPNPD on node node1 shutdown.
2016-10-05 18:26:19.904:
[ohasd(2704)]CRS-2112:The OLR service started on node node1.
2016-10-05 18:26:20.192:
[ohasd(2704)]CRS-1301:Oracle High Availability Service started on node node1.
2016-10-05 18:26:20.295:
[ohasd(2704)]CRS-8017:location: /etc/oracle/lastgasp has 2 reboot advisory log files, 0 were announced and 0 errors occurred
2016-10-05 18:26:28.827:
[/grid/grid_home/bin/orarootagent.bin(2939)]CRS-2302:Cannot get GPnP profile. Error CLSGPNP_NO_DAEMON (GPNPD daemon is not running).
2016-10-05 18:26:33.095:
[gpnpd(3040)]CRS-2328:GPNPD started on node node1.
2016-10-05 18:26:35.882:
[cssd(3108)]CRS-1713:CSSD daemon is started in clustered mode
2016-10-05 18:26:37.461:
[ohasd(2704)]CRS-2767:Resource state recovery not attempted for 'ora.diskmon' as its target state is OFFLINE
2016-10-05 18:26:37.494:
[ohasd(2704)]CRS-2769:Unable to failover resource 'ora.diskmon'.
2016-10-05 18:26:45.859:
[cssd(3108)]CRS-1707ease acquisition for node node1 number 1 completed
2016-10-05 18:26:47.144:
[cssd(3108)]CRS-1605:CSSD voting file is online: /dev/mapper/vote_ocr; details in /grid/grid_home/log/node1/cssd/ocssd.log.
2016-10-05 18:26:55.712:
[cssd(3108)]CRS-1601:CSSD Reconfiguration complete. Active nodes are node1 node2 .
2016-10-05 18:26:58.047:
[ctssd(3314)]CRS-2407:The new Cluster Time Synchronization Service reference node is host node1.
2016-10-05 18:26:58.047:
[ctssd(3314)]CRS-2401:The Cluster Time Synchronization Service started on host node1.
[client(3353)]CRS-10001:05-Oct-16 18:27 ACFS-9391: Checking for existing ADVM/ACFS installation.
[client(3358)]CRS-10001:05-Oct-16 18:27 ACFS-9392: Validating ADVM/ACFS installation files for operating system.
[client(3360)]CRS-10001:05-Oct-16 18:27 ACFS-9393: Verifying ASM Administrator setup.
[client(3363)]CRS-10001:05-Oct-16 18:27 ACFS-9308: Loading installed ADVM/ACFS drivers.
[client(3366)]CRS-10001:05-Oct-16 18:27 ACFS-9154: Loading 'oracleoks.ko' driver.
[client(3376)]CRS-10001:05-Oct-16 18:27 ACFS-9154: Loading 'oracleadvm.ko' driver.
[client(3398)]CRS-10001:05-Oct-16 18:27 ACFS-9154: Loading 'oracleacfs.ko' driver.
[client(3490)]CRS-10001:05-Oct-16 18:27 ACFS-9327: Verifying ADVM/ACFS devices.
[client(3498)]CRS-10001:05-Oct-16 18:27 ACFS-9156: Detecting control device '/dev/asm/.asm_ctl_spec'.
[client(3502)]CRS-10001:05-Oct-16 18:27 ACFS-9156: Detecting control device '/dev/ofsctl'.
[client(3509)]CRS-10001:05-Oct-16 18:27 ACFS-9322: completed
2016-10-05 18:27:31.947:
[crsd(3698)]CRS-1012:The OCR service started on node node1.
2016-10-05 18:27:31.978:
[evmd(3335)]CRS-1401:EVMD started on node node1.
2016-10-05 18:27:36.399:
[crsd(3698)]CRS-1201:CRSD started on node node1.
2016-10-05 18:27:44.520:
[crsd(3698)]CRS-2772:Server 'node1' has been assigned to pool 'Generic'.
2016-10-05 18:27:44.531:
[crsd(3698)]CRS-2772:Server 'node1' has been assigned to pool 'ora.orcc'.
2016-10-05 18:27:45.125:
[client(4012)]CRS-4743:File /grid/grid_home/oc4j/j2ee/home/OC4J_DBWLM_config/system-jazn-data.xml was updated from OCR(Size: 13365(New), 13378(Old) bytes)
2016-10-05 18:28:01.879:
[crsd(3698)]CRS-2772:Server 'node2' has been assigned to pool 'Generic'.
2016-10-05 18:28:01.880:
[crsd(3698)]CRS-2772:Server 'node2' has been assigned to pool 'ora.orcc'.
^C
[oragrid@node1 node1]$ ls -ltr
??? 108
drwxr-x--- 2 oragrid oinstall 4096 7? 17 00:00 srvm
drwxr-x--- 2 root oinstall 4096 7? 17 00:00 gnsd
drwxr-x--- 2 oragrid oinstall 4096 7? 17 00:00 diskmon
drwxr-x--- 4 oragrid oinstall 4096 7? 17 00:00 cvu
drwxr-xr-x 2 root oinstall 4096 7? 17 00:00 acfssec
drwxr-x--- 2 oragrid oinstall 4096 7? 17 00:00 acfsrepl
drwxr-x--- 2 oragrid oinstall 4096 7? 17 00:00 acfslog
drwxrwxr-t 4 root oinstall 4096 7? 17 00:00 agent
drwxrwxr-t 5 oragrid oinstall 4096 7? 17 00:00 racg
drwxr-x--- 2 oragrid oinstall 4096 7? 17 00:00 admin
drwxr-x--- 2 root oinstall 4096 7? 17 00:00 acfsreplroot
drwxr-x--- 2 root oinstall 4096 7? 17 00:03 ohasd
drwxr-xr-x 3 root root 4096 7? 17 00:05 acfs
drwxr-x--- 2 oragrid oinstall 4096 7? 17 00:05 mdnsd
drwxr-x--- 2 oragrid oinstall 4096 7? 17 00:05 gipcd
drwxr-x--- 2 oragrid oinstall 4096 7? 17 00:05 cssd
drwxr-x--- 2 root oinstall 4096 7? 17 00:05 ctssd
drwxr-x--- 2 root oinstall 4096 7? 17 00:05 crsd
drwxr-x--- 2 root oinstall 4096 7? 17 00:07 crfmond
drwxr-x--- 2 root oinstall 4096 7? 17 00:07 crflogd
drwxr-x--- 2 oragrid oinstall 4096 7? 17 00:07 evmd
drwxr-x--- 2 oragrid oinstall 4096 10? 5 18:26 gpnpd
-rw-rw-r-- 1 oragrid oinstall 13847 10? 5 18:28 alertnode1.log
drwxrwxrwt 2 oragrid oinstall 4096 10? 5 18:32 client
[oragrid@node1 node1]$ cd cssd
[oragrid@node1 cssd]$ ls
cssdOUT.log ocssd.log
[oragrid@node1 cssd]$ tail -100f ocssd.log
2016-10-05 18:45:49.707: [ CSSD][4088366848]clssnmSendingThread: sending status msg to all nodes
2016-10-05 18:45:49.707: [ CSSD][4088366848]clssnmSendingThread: sent 5 status msgs to all nodes
2016-10-05 18:45:53.710: [ CSSD][4088366848]clssnmSendingThread: sending status msg to all nodes
2016-10-05 18:45:53.710: [ CSSD][4088366848]clssnmSendingThread: sent 4 status msgs to all nodes
2016-10-05 18:45:58.714: [ CSSD][4088366848]clssnmSendingThread: sending status msg to all nodes
2016-10-05 18:45:58.714: [ CSSD][4088366848]clssnmSendingThread: sent 5 status msgs to all nodes
2016-10-05 18:46:03.721: [ CSSD][4088366848]clssnmSendingThread: sending status msg to all nodes
2016-10-05 18:46:03.721: [ CSSD][4088366848]clssnmSendingThread: sent 5 status msgs to all nodes
2016-10-05 18:46:07.724: [ CSSD][4088366848]clssnmSendingThread: sending status msg to all nodes
2016-10-05 18:46:07.724: [ CSSD][4088366848]clssnmSendingThread: sent 4 status msgs to all nodes
2016-10-05 18:46:12.727: [ CSSD][4088366848]clssnmSendingThread: sending status msg to all nodes
2016-10-05 18:46:12.727: [ CSSD][4088366848]clssnmSendingThread: sent 5 status msgs to all nodes
2016-10-05 18:46:17.731: [ CSSD][4088366848]clssnmSendingThread: sending status msg to all nodes
2016-10-05 18:46:17.731: [ CSSD][4088366848]clssnmSendingThread: sent 5 status msgs to all nodes
^C
[oragrid@node1 cssd]$
[oragrid@node1 cssd]$
[oragrid@node1 cssd]$
[oragrid@node1 cssd]$
[oragrid@node1 cssd]$ su
??:
[root@node1 cssd]# cd $ORACLE_HOME/bin
[root@node1 bin]# ./crsctl check cluster -all
**************************************************************
node1:
CRS-4537: Cluster Ready Services is online
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
**************************************************************
node2:
CRS-4537: Cluster Ready Services is online
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
**************************************************************
[root@node1 bin]# ./crsctl stop cluster -all
CRS-2673: Attempting to stop 'ora.crsd' on 'node2'
CRS-2673: Attempting to stop 'ora.crsd' on 'node1'
CRS-2790: Starting shutdown of Cluster Ready Services-managed resources on 'node1'
CRS-2673: Attempting to stop 'ora.LISTENER.lsnr' on 'node1'
CRS-2673: Attempting to stop 'ora.cvu' on 'node1'
CRS-2673: Attempting to stop 'ora.OCRVOTE.dg' on 'node1'
CRS-2673: Attempting to stop 'ora.registry.acfs' on 'node1'
CRS-2673: Attempting to stop 'ora.orcc.db' on 'node1'
CRS-2673: Attempting to stop 'ora.oc4j' on 'node1'
CRS-2790: Starting shutdown of Cluster Ready Services-managed resources on 'node2'
CRS-2673: Attempting to stop 'ora.OCRVOTE.dg' on 'node2'
CRS-2673: Attempting to stop 'ora.registry.acfs' on 'node2'
CRS-2673: Attempting to stop 'ora.orcc.db' on 'node2'
CRS-2673: Attempting to stop 'ora.LISTENER.lsnr' on 'node2'
CRS-2673: Attempting to stop 'ora.LISTENER_SCAN1.lsnr' on 'node2'
CRS-2677: Stop of 'ora.cvu' on 'node1' succeeded
CRS-2677: Stop of 'ora.LISTENER.lsnr' on 'node1' succeeded
CRS-2673: Attempting to stop 'ora.node1.vip' on 'node1'
CRS-2677: Stop of 'ora.LISTENER.lsnr' on 'node2' succeeded
CRS-2673: Attempting to stop 'ora.node2.vip' on 'node2'
CRS-2677: Stop of 'ora.LISTENER_SCAN1.lsnr' on 'node2' succeeded
CRS-2673: Attempting to stop 'ora.scan1.vip' on 'node2'
CRS-2677: Stop of 'ora.registry.acfs' on 'node1' succeeded
CRS-2677: Stop of 'ora.registry.acfs' on 'node2' succeeded
CRS-2677: Stop of 'ora.orcc.db' on 'node1' succeeded
CRS-2673: Attempting to stop 'ora.ARCH.dg' on 'node1'
CRS-2673: Attempting to stop 'ora.DATA.dg' on 'node1'
CRS-2677: Stop of 'ora.node1.vip' on 'node1' succeeded
CRS-2677: Stop of 'ora.ARCH.dg' on 'node1' succeeded
CRS-2677: Stop of 'ora.node2.vip' on 'node2' succeeded
CRS-2677: Stop of 'ora.DATA.dg' on 'node1' succeeded
CRS-2677: Stop of 'ora.scan1.vip' on 'node2' succeeded
CRS-2677: Stop of 'ora.orcc.db' on 'node2' succeeded
CRS-2673: Attempting to stop 'ora.ARCH.dg' on 'node2'
CRS-2673: Attempting to stop 'ora.DATA.dg' on 'node2'
CRS-2677: Stop of 'ora.DATA.dg' on 'node2' succeeded
CRS-2677: Stop of 'ora.ARCH.dg' on 'node2' succeeded
CRS-2677: Stop of 'ora.oc4j' on 'node1' succeeded
CRS-2677: Stop of 'ora.OCRVOTE.dg' on 'node1' succeeded
CRS-2673: Attempting to stop 'ora.asm' on 'node1'
CRS-2677: Stop of 'ora.OCRVOTE.dg' on 'node2' succeeded
CRS-2673: Attempting to stop 'ora.asm' on 'node2'
CRS-2677: Stop of 'ora.asm' on 'node1' succeeded
CRS-2677: Stop of 'ora.asm' on 'node2' succeeded
CRS-2673: Attempting to stop 'ora.ons' on 'node2'
CRS-2677: Stop of 'ora.ons' on 'node2' succeeded
CRS-2673: Attempting to stop 'ora.net1.network' on 'node2'
CRS-2673: Attempting to stop 'ora.ons' on 'node1'
CRS-2677: Stop of 'ora.net1.network' on 'node2' succeeded
CRS-2792: Shutdown of Cluster Ready Services-managed resources on 'node2' has completed
CRS-2677: Stop of 'ora.crsd' on 'node2' succeeded
CRS-2673: Attempting to stop 'ora.ctssd' on 'node2'
CRS-2673: Attempting to stop 'ora.evmd' on 'node2'
CRS-2673: Attempting to stop 'ora.asm' on 'node2'
CRS-2677: Stop of 'ora.ons' on 'node1' succeeded
CRS-2673: Attempting to stop 'ora.net1.network' on 'node1'
CRS-2677: Stop of 'ora.net1.network' on 'node1' succeeded
CRS-2792: Shutdown of Cluster Ready Services-managed resources on 'node1' has completed
CRS-2677: Stop of 'ora.crsd' on 'node1' succeeded
CRS-2673: Attempting to stop 'ora.ctssd' on 'node1'
CRS-2673: Attempting to stop 'ora.evmd' on 'node1'
CRS-2673: Attempting to stop 'ora.asm' on 'node1'
CRS-2677: Stop of 'ora.evmd' on 'node2' succeeded
CRS-2677: Stop of 'ora.evmd' on 'node1' succeeded
CRS-2677: Stop of 'ora.ctssd' on 'node2' succeeded
CRS-2677: Stop of 'ora.asm' on 'node2' succeeded
CRS-2673: Attempting to stop 'ora.cluster_interconnect.haip' on 'node2'
CRS-2677: Stop of 'ora.ctssd' on 'node1' succeeded
CRS-2677: Stop of 'ora.asm' on 'node1' succeeded
CRS-2673: Attempting to stop 'ora.cluster_interconnect.haip' on 'node1'
CRS-2677: Stop of 'ora.cluster_interconnect.haip' on 'node2' succeeded
CRS-2673: Attempting to stop 'ora.cssd' on 'node2'
CRS-2677: Stop of 'ora.cssd' on 'node2' succeeded
CRS-2677: Stop of 'ora.cluster_interconnect.haip' on 'node1' succeeded
CRS-2673: Attempting to stop 'ora.cssd' on 'node1'
CRS-2677: Stop of 'ora.cssd' on 'node1' succeeded
[root@node1 bin]# ps -ef | grep d.bin
root 2704 1 0 18:26 ? 00:00:11 /grid/grid_home/bin/ohasd.bin reboot
oragrid 3030 1 0 18:26 ? 00:00:00 /grid/grid_home/bin/mdnsd.bin
oragrid 3040 1 0 18:26 ? 00:00:01 /grid/grid_home/bin/gpnpd.bin
oragrid 3053 1 0 18:26 ? 00:00:07 /grid/grid_home/bin/gipcd.bin
root 3065 1 2 18:26 ? 00:00:35 /grid/grid_home/bin/osysmond.bin
root 7390 6931 0 18:47 pts/0 00:00:00 grep d.bin
[root@node1 bin]#
[root@node1 bin]#
[root@node1 bin]#
[root@node1 bin]#
[root@node1 bin]# crsctl start cluster -all
CRS-2672: Attempting to start 'ora.cssdmonitor' on 'node1'
CRS-2672: Attempting to start 'ora.cssdmonitor' on 'node2'
CRS-2676: Start of 'ora.cssdmonitor' on 'node2' succeeded
CRS-2672: Attempting to start 'ora.cssd' on 'node2'
CRS-2676: Start of 'ora.cssdmonitor' on 'node1' succeeded
CRS-2672: Attempting to start 'ora.diskmon' on 'node2'
CRS-2672: Attempting to start 'ora.cssd' on 'node1'
CRS-2672: Attempting to start 'ora.diskmon' on 'node1'
CRS-2676: Start of 'ora.diskmon' on 'node1' succeeded
CRS-2676: Start of 'ora.diskmon' on 'node2' succeeded
^C
[root@node1 bin]# cd $ORACLE_HOME/log/node1
[root@node1 node1]# ls
acfs acfslog acfsrepl acfsreplroot acfssec admin agent alertnode1.log client crflogd crfmond crsd cssd ctssd cvu diskmon evmd gipcd gnsd gpnpd mdnsd ohasd racg srvm
[root@node1 node1]# tail -100f alertnode1.log
[cssd(32415)]CRS-1660:The CSS daemon shutdown has completed
2016-07-17 01:36:08.135:
[ohasd(10088)]CRS-2767:Resource state recovery not attempted for 'ora.cssd' as its target state is OFFLINE
2016-07-17 01:36:08.135:
[ohasd(10088)]CRS-2769:Unable to failover resource 'ora.cssd'.
2016-07-17 01:36:10.675:
[client(61045)]CRS-1013:The OCR location in an ASM disk group is inaccessible. Details in /grid/grid_home/log/node1/client/crsctl_orardbms.log.
2016-07-17 01:36:20.399:
[client(61176)]CRS-1013:The OCR location in an ASM disk group is inaccessible. Details in /grid/grid_home/log/node1/client/crsctl_orardbms.log.
2016-07-17 01:36:30.495:
[client(61256)]CRS-1013:The OCR location in an ASM disk group is inaccessible. Details in /grid/grid_home/log/node1/client/crsctl_orardbms.log.
2016-07-17 01:36:40.782:
[client(61354)]CRS-1013:The OCR location in an ASM disk group is inaccessible. Details in /grid/grid_home/log/node1/client/crsctl_orardbms.log.
2016-07-17 01:36:50.208:
[client(61375)]CRS-1013:The OCR location in an ASM disk group is inaccessible. Details in /grid/grid_home/log/node1/client/crsctl_orardbms.log.
2016-07-17 01:37:03.264:
[mdnsd(32350)]CRS-5602:mDNS service stopping by request.
2016-07-17 01:37:06.085:
[gpnpd(32363)]CRS-2329:GPNPD on node node1 shutdown.
2016-10-05 18:26:19.904:
[ohasd(2704)]CRS-2112:The OLR service started on node node1.
2016-10-05 18:26:20.192:
[ohasd(2704)]CRS-1301:Oracle High Availability Service started on node node1.
2016-10-05 18:26:20.295:
[ohasd(2704)]CRS-8017:location: /etc/oracle/lastgasp has 2 reboot advisory log files, 0 were announced and 0 errors occurred
2016-10-05 18:26:28.827:
[/grid/grid_home/bin/orarootagent.bin(2939)]CRS-2302:Cannot get GPnP profile. Error CLSGPNP_NO_DAEMON (GPNPD daemon is not running).
2016-10-05 18:26:33.095:
[gpnpd(3040)]CRS-2328:GPNPD started on node node1.
2016-10-05 18:26:35.882:
[cssd(3108)]CRS-1713:CSSD daemon is started in clustered mode
2016-10-05 18:26:37.461:
[ohasd(2704)]CRS-2767:Resource state recovery not attempted for 'ora.diskmon' as its target state is OFFLINE
2016-10-05 18:26:37.494:
[ohasd(2704)]CRS-2769:Unable to failover resource 'ora.diskmon'.
2016-10-05 18:26:45.859:
[cssd(3108)]CRS-1707ease acquisition for node node1 number 1 completed
2016-10-05 18:26:47.144:
[cssd(3108)]CRS-1605:CSSD voting file is online: /dev/mapper/vote_ocr; details in /grid/grid_home/log/node1/cssd/ocssd.log.
2016-10-05 18:26:55.712:
[cssd(3108)]CRS-1601:CSSD Reconfiguration complete. Active nodes are node1 node2 .
2016-10-05 18:26:58.047:
[ctssd(3314)]CRS-2407:The new Cluster Time Synchronization Service reference node is host node1.
2016-10-05 18:26:58.047:
[ctssd(3314)]CRS-2401:The Cluster Time Synchronization Service started on host node1.
[client(3353)]CRS-10001:05-Oct-16 18:27 ACFS-9391: Checking for existing ADVM/ACFS installation.
[client(3358)]CRS-10001:05-Oct-16 18:27 ACFS-9392: Validating ADVM/ACFS installation files for operating system.
[client(3360)]CRS-10001:05-Oct-16 18:27 ACFS-9393: Verifying ASM Administrator setup.
[client(3363)]CRS-10001:05-Oct-16 18:27 ACFS-9308: Loading installed ADVM/ACFS drivers.
[client(3366)]CRS-10001:05-Oct-16 18:27 ACFS-9154: Loading 'oracleoks.ko' driver.
[client(3376)]CRS-10001:05-Oct-16 18:27 ACFS-9154: Loading 'oracleadvm.ko' driver.
[client(3398)]CRS-10001:05-Oct-16 18:27 ACFS-9154: Loading 'oracleacfs.ko' driver.
[client(3490)]CRS-10001:05-Oct-16 18:27 ACFS-9327: Verifying ADVM/ACFS devices.
[client(3498)]CRS-10001:05-Oct-16 18:27 ACFS-9156: Detecting control device '/dev/asm/.asm_ctl_spec'.
[client(3502)]CRS-10001:05-Oct-16 18:27 ACFS-9156: Detecting control device '/dev/ofsctl'.
[client(3509)]CRS-10001:05-Oct-16 18:27 ACFS-9322: completed
2016-10-05 18:27:31.947:
[crsd(3698)]CRS-1012:The OCR service started on node node1.
2016-10-05 18:27:31.978:
[evmd(3335)]CRS-1401:EVMD started on node node1.
2016-10-05 18:27:36.399:
[crsd(3698)]CRS-1201:CRSD started on node node1.
2016-10-05 18:27:44.520:
[crsd(3698)]CRS-2772:Server 'node1' has been assigned to pool 'Generic'.
2016-10-05 18:27:44.531:
[crsd(3698)]CRS-2772:Server 'node1' has been assigned to pool 'ora.orcc'.
2016-10-05 18:27:45.125:
[client(4012)]CRS-4743:File /grid/grid_home/oc4j/j2ee/home/OC4J_DBWLM_config/system-jazn-data.xml was updated from OCR(Size: 13365(New), 13378(Old) bytes)
2016-10-05 18:28:01.879:
[crsd(3698)]CRS-2772:Server 'node2' has been assigned to pool 'Generic'.
2016-10-05 18:28:01.880:
[crsd(3698)]CRS-2772:Server 'node2' has been assigned to pool 'ora.orcc'.
2016-10-05 18:46:58.408:
ent_root/orarootagent_root.log.
2016-10-05 18:46:59.354:
[ctssd(3314)]CRS-2405:The Cluster Time Synchronization Service on host node1 is shutdown by user
2016-10-05 18:47:08.480:
[cssd(3108)]CRS-1625:Node node2, number 2, was manually shut down
2016-10-05 18:47:08.486:
[cssd(3108)]CRS-1601:CSSD Reconfiguration complete. Active nodes are node1 .
2016-10-05 18:47:10.382:
[cssd(3108)]CRS-1603:CSSD on node node1 shutdown by user.
2016-10-05 18:47:10.499:
[ohasd(2704)]CRS-2767:Resource state recovery not attempted for 'ora.cssdmonitor' as its target state is OFFLINE
2016-10-05 18:47:10.499:
[ohasd(2704)]CRS-2769:Unable to failover resource 'ora.cssdmonitor'.
2016-10-05 18:47:55.311:
[cssd(7420)]CRS-1713:CSSD daemon is started in clustered mode
2016-10-05 18:47:55.399:
[cssd(7420)]CRS-1714:Unable to discover any voting files, retrying discovery in 15 seconds; Details at (:CSSNM00070 in /grid/grid_home/log/node1/cssd/ocssd.log
2016-10-05 18:47:57.155:
[ohasd(2704)]CRS-2767:Resource state recovery not attempted for 'ora.diskmon' as its target state is OFFLINE
2016-10-05 18:47:57.156:
[ohasd(2704)]CRS-2769:Unable to failover resource 'ora.diskmon'.
2016-10-05 18:48:10.416:
[cssd(7420)]CRS-1714:Unable to discover any voting files, retrying discovery in 15 seconds; Details at (:CSSNM00070 in /grid/grid_home/log/node1/cssd/ocssd.log
2016-10-05 18:48:25.429:
[cssd(7420)]CRS-1714:Unable to discover any voting files, retrying discovery in 15 seconds; Details at (:CSSNM00070 in /grid/grid_home/log/node1/cssd/ocssd.log
2016-10-05 18:48:40.452:
[cssd(7420)]CRS-1714:Unable to discover any voting files, retrying discovery in 15 seconds; Details at (:CSSNM00070 in /grid/grid_home/log/node1/cssd/ocssd.log
2016-10-05 18:48:55.467:
[cssd(7420)]CRS-1714:Unable to discover any voting files, retrying discovery in 15 seconds; Details at (:CSSNM00070:) in /grid/grid_home/log/node1/cssd/ocssd.log
^C
[root@node1 node1]#
[root@node1 node1]#
[root@node1 node1]#
[root@node1 node1]# tail -200f /grid/grid_home/log/node1/cssd/ocssd.log
2016-10-05 18:49:23.020: [ CSSD][1360275200]clssscSelect: cookie accept request 0x7f3d48084620
2016-10-05 18:49:23.020: [ CSSD][1360275200]clssscevtypSHRCON: getting client with cmproc 0x7f3d48084620
2016-10-05 18:49:23.020: [ CSSD][1360275200]clssgmRegisterClient: proc(4/0x7f3d48084620), client(86/0x7f3d480596c0)
2016-10-05 18:49:23.020: [ CSSD][1360275200]clssgmExecuteClientRequest(): type(6) size(684) only connect and exit messages are allowed before lease acquisition proc(0x7f3d48084620) client(0x7f3d480596c0)
2016-10-05 18:49:23.020: [ CSSD][1360275200]clssgmDiscEndpcl: gipcDestroy 0x16fe
2016-10-05 18:49:23.371: [ CSSD][1360275200]clssscSelect: cookie accept request 0xc7f8f0
2016-10-05 18:49:23.371: [ CSSD][1360275200]clssgmAllocProc: (0x7f3d480717d0) allocated
2016-10-05 18:49:23.372: [ CSSD][1360275200]clssgmClientConnectMsg: properties of cmProc 0x7f3d480717d0 - 1,2,3,4,5
2016-10-05 18:49:23.372: [ CSSD][1360275200]clssgmClientConnectMsg: Connect from con(0x1730) proc(0x7f3d480717d0) pid(3053) version 11:2:1:4, properties: 1,2,3,4,5
2016-10-05 18:49:23.372: [ CSSD][1360275200]clssgmClientConnectMsg: msg flags 0x0000
2016-10-05 18:49:23.857: [ CSSD][1360275200]clssscSelect: cookie accept request 0x7f3d4806ed00
2016-10-05 18:49:23.857: [ CSSD][1360275200]clssscevtypSHRCON: getting client with cmproc 0x7f3d4806ed00
2016-10-05 18:49:23.857: [ CSSD][1360275200]clssgmRegisterClient: proc(3/0x7f3d4806ed00), client(87/0x7f3d48096b90)
2016-10-05 18:49:23.857: [ CSSD][1360275200]clssgmExecuteClientRequest(): type(6) size(684) only connect and exit messages are allowed before lease acquisition proc(0x7f3d4806ed00) client(0x7f3d48096b90)
2016-10-05 18:49:23.857: [ CSSD][1360275200]clssgmDiscEndpcl: gipcDestroy 0x1756
2016-10-05 18:49:24.022: [ CSSD][1360275200]clssscSelect: cookie accept request 0x7f3d48084620
2016-10-05 18:49:24.023: [ CSSD][1360275200]clssscevtypSHRCON: getting client with cmproc 0x7f3d48084620
2016-10-05 18:49:24.023: [ CSSD][1360275200]clssgmRegisterClient: proc(4/0x7f3d48084620), client(87/0x7f3d48096b90)
2016-10-05 18:49:24.023: [ CSSD][1360275200]clssgmExecuteClientRequest(): type(6) size(684) only connect and exit messages are allowed before lease acquisition proc(0x7f3d48084620) client(0x7f3d48096b90)
2016-10-05 18:49:24.023: [ CSSD][1360275200]clssgmDiscEndpcl: gipcDestroy 0x176c
2016-10-05 18:49:24.860: [ CSSD][1360275200]clssscSelect: cookie accept request 0x7f3d4806ed00
2016-10-05 18:49:24.860: [ CSSD][1360275200]clssscevtypSHRCON: getting client with cmproc 0x7f3d4806ed00
2016-10-05 18:49:24.860: [ CSSD][1360275200]clssgmRegisterClient: proc(3/0x7f3d4806ed00), client(88/0x7f3d48096b90)
2016-10-05 18:49:24.860: [ CSSD][1360275200]clssgmExecuteClientRequest(): type(6) size(684) only connect and exit messages are allowed before lease acquisition proc(0x7f3d4806ed00) client(0x7f3d48096b90)
2016-10-05 18:49:24.860: [ CSSD][1360275200]clssgmDiscEndpcl: gipcDestroy 0x1782
2016-10-05 18:49:25.025: [ CSSD][1360275200]clssscSelect: cookie accept request 0x7f3d48084620
2016-10-05 18:49:25.025: [ CSSD][1360275200]clssscevtypSHRCON: getting client with cmproc 0x7f3d48084620
2016-10-05 18:49:25.026: [ CSSD][1360275200]clssgmRegisterClient: proc(4/0x7f3d48084620), client(88/0x7f3d480916b0)
2016-10-05 18:49:25.026: [ CSSD][1360275200]clssgmExecuteClientRequest(): type(6) size(684) only connect and exit messages are allowed before lease acquisition proc(0x7f3d48084620) client(0x7f3d480916b0)
2016-10-05 18:49:25.026: [ CSSD][1360275200]clssgmDiscEndpcl: gipcDestroy 0x1798
2016-10-05 18:49:25.486: [ GPNP][1357584128]clsgpnp_profileCallUrlInt: [at clsgpnp.c:2104] get-profile call to url "ipc://GPNPD_node1" disco "" [f=0 claimed- host: cname: seq: auth:]
2016-10-05 18:49:25.499: [ GPNP][1357584128]clsgpnp_profileCallUrlInt: [at clsgpnp.c:2234] Result: (0) CLSGPNP_OK. Successful get-profile CALL to remote "ipc://GPNPD_node1" disco ""
2016-10-05 18:49:25.500: [ CSSD][1357584128]clssnmReadDiscoveryProfile: voting file discovery string(/dev/mapper/*)
2016-10-05 18:49:25.500: [ CSSD][1357584128]clssnmvDDiscThread: using discovery string /dev/mapper/* for initial discovery
2016-10-05 18:49:25.500: [ SKGFD][1357584128]Discovery with str:/dev/mapper/*:
2016-10-05 18:49:25.500: [ SKGFD][1357584128]UFS discovery with :/dev/mapper/*:
2016-10-05 18:49:25.500: [ SKGFD][1357584128]Execute glob on the string /dev/mapper/*
2016-10-05 18:49:25.500: [ SKGFD][1357584128]running stat on disk:/dev/mapper/arch
2016-10-05 18:49:25.500: [ SKGFD][1357584128]running stat on disk:/dev/mapper/vote_ocr
2016-10-05 18:49:25.500: [ SKGFD][1357584128]running stat on disk:/dev/mapper/data
2016-10-05 18:49:25.500: [ SKGFD][1357584128]running stat on disk:/dev/mapper/vg01-LogVol02
2016-10-05 18:49:25.500: [ SKGFD][1357584128]running stat on disk:/dev/mapper/vg01-Log_ora
2016-10-05 18:49:25.500: [ SKGFD][1357584128]running stat on disk:/dev/mapper/vg01-LogVol00
2016-10-05 18:49:25.500: [ SKGFD][1357584128]running stat on disk:/dev/mapper/control
2016-10-05 18:49:25.500: [ SKGFD][1357584128]Fetching UFS disk :/dev/mapper/control:
2016-10-05 18:49:25.500: [ SKGFD][1357584128]Fetching UFS disk :/dev/mapper/vg01-LogVol00:
2016-10-05 18:49:25.500: [ SKGFD][1357584128]Fetching UFS disk :/dev/mapper/vg01-Log_ora:
2016-10-05 18:49:25.500: [ SKGFD][1357584128]Fetching UFS disk :/dev/mapper/vg01-LogVol02:
2016-10-05 18:49:25.500: [ SKGFD][1357584128]Fetching UFS disk :/dev/mapper/data:
2016-10-05 18:49:25.500: [ SKGFD][1357584128]Fetching UFS disk :/dev/mapper/vote_ocr:
2016-10-05 18:49:25.500: [ SKGFD][1357584128]Fetching UFS disk :/dev/mapper/arch:
2016-10-05 18:49:25.500: [ SKGFD][1357584128]OSS discovery with :/dev/mapper/*:
2016-10-05 18:49:25.500: [ CSSD][1357584128]clssnmvDiskVerify: Successful discovery of 0 disks
2016-10-05 18:49:25.500: [ CSSD][1357584128]clssnmCompleteInitVFDiscovery: Completing initial voting file discovery
2016-10-05 18:49:25.500: [ CSSD][1357584128]clssnmvFindInitialConfigs: No voting files found
2016-10-05 18:49:25.500: [ CSSD][1357584128](:CSSNM00070:)clssnmCompleteInitVFDiscovery: Voting file not found. Retrying discovery in 15 seconds
2016-10-05 18:49:25.862: [ CSSD][1360275200]clssscSelect: cookie accept request 0x7f3d4806ed00
2016-10-05 18:49:25.863: [ CSSD][1360275200]clssscevtypSHRCON: getting client with cmproc 0x7f3d4806ed00
2016-10-05 18:49:25.863: [ CSSD][1360275200]clssgmRegisterClient: proc(3/0x7f3d4806ed00), client(89/0x7f3d480630a0)
2016-10-05 18:49:25.863: [ CSSD][1360275200]clssgmExecuteClientRequest(): type(6) size(684) only connect and exit messages are allowed before lease acquisition proc(0x7f3d4806ed00) client(0x7f3d480630a0)
2016-10-05 18:49:25.863: [ CSSD][1360275200]clssgmDiscEndpcl: gipcDestroy 0x17d6
2016-10-05 18:49:26.028: [ CSSD][1360275200]clssscSelect: cookie accept request 0x7f3d48084620
2016-10-05 18:49:26.028: [ CSSD][1360275200]clssscevtypSHRCON: getting client with cmproc 0x7f3d48084620
2016-10-05 18:49:26.028: [ CSSD][1360275200]clssgmRegisterClient: proc(4/0x7f3d48084620), client(89/0x7f3d48096b90)
2016-10-05 18:49:26.028: [ CSSD][1360275200]clssgmExecuteClientRequest(): type(6) size(684) only connect and exit messages are allowed before lease acquisition proc(0x7f3d48084620) client(0x7f3d48096b90)
2016-10-05 18:49:26.028: [ CSSD][1360275200]clssgmDiscEndpcl: gipcDestroy 0x17ec
2016-10-05 18:49:26.866: [ CSSD][1360275200]clssscSelect: cookie accept request 0x7f3d4806ed00
2016-10-05 18:49:26.866: [ CSSD][1360275200]clssscevtypSHRCON: getting client with cmproc 0x7f3d4806ed00
2016-10-05 18:49:26.866: [ CSSD][1360275200]clssgmRegisterClient: proc(3/0x7f3d4806ed00), client(90/0x7f3d480916b0)
2016-10-05 18:49:26.866: [ CSSD][1360275200]clssgmExecuteClientRequest(): type(6) size(684) only connect and exit messages are allowed before lease acquisition proc(0x7f3d4806ed00) client(0x7f3d480916b0)
2016-10-05 18:49:26.866: [ CSSD][1360275200]clssgmDiscEndpcl: gipcDestroy 0x1802
2016-10-05 18:49:27.031: [ CSSD][1360275200]clssscSelect: cookie accept request 0x7f3d48084620
2016-10-05 18:49:27.031: [ CSSD][1360275200]clssscevtypSHRCON: getting client with cmproc 0x7f3d48084620
2016-10-05 18:49:27.031: [ CSSD][1360275200]clssgmRegisterClient: proc(4/0x7f3d48084620), client(90/0x7f3d480630a0)
2016-10-05 18:49:27.031: [ CSSD][1360275200]clssgmExecuteClientRequest(): type(6) size(684) only connect and exit messages are allowed before lease acquisition proc(0x7f3d48084620) client(0x7f3d480630a0)
2016-10-05 18:49:27.031: [ CSSD][1360275200]clssgmDiscEndpcl: gipcDestroy 0x1818
2016-10-05 18:49:27.291: [ CSSD][1360275200]clssgmExecuteClientRequest(): type(37) size(80) only connect and exit messages are allowed before lease acquisition proc(0x7f3d480717d0) client((nil))
2016-10-05 18:49:27.293: [ CSSD][1360275200]clssgmDeadProc: proc 0x7f3d480717d0
2016-10-05 18:49:27.293: [ CSSD][1360275200]clssgmDestroyProc: cleaning up proc(0x7f3d480717d0) con(0x1730) skgpid ospid 3053 with 0 clients, refcount 0
2016-10-05 18:49:27.293: [ CSSD][1360275200]clssgmDiscEndpcl: gipcDestroy 0x1730
^C
[root@node1 node1]#
[root@node1 node1]#
[root@node1 node1]# pwd
/grid/grid_home/log/node1
[root@node1 node1]# ls -l /dev/asm-*
brw-rw---- 1 oragrid asmadmin 8, 48 10? 5 18:44 /dev/asm-diskc
brw-rw---- 1 oragrid asmadmin 8, 64 10? 5 18:44 /dev/asm-diske
brw-rw---- 1 oragrid asmadmin 8, 112 10? 5 18:44 /dev/asm-diskg
[root@node1 node1]# kfed read /dev/asm-diskc| grep kfdhdb.grpname
kfdhdb.grpname: OCRVOTE ; 0x048: length=7
[root@node1 node1]# kfed read /dev/asm-diske| grep kfdhdb.grpname
kfdhdb.grpname: DATA ; 0x048: length=4
[root@node1 node1]# kfed read /dev/asm-diskg| grep kfdhdb.grpname
kfdhdb.grpname: ARCH ; 0x048: length=4
[root@node1 node1]#
[root@node1 node1]#
[root@node1 node1]#
[root@node1 node1]#
[root@node1 node1]#
[root@node1 node1]#
[root@node1 node1]#
[root@node1 node1]# crsctl check crs
CRS-4638: Oracle High Availability Services is online
CRS-4535: Cannot communicate with Cluster Ready Services
CRS-4530: Communications failure contacting Cluster Synchronization Services daemon
CRS-4534: Cannot communicate with Event Manager
[root@node1 node1]#
[root@node1 node1]#
[root@node1 node1]#
[root@node1 node1]# ssh node2
root@node2's password:
Last login: Wed Oct 5 18:44:42 2016 from node1
[root@node2 ~]# crsctl check crs
-bash: crsctl: command not found
[root@node2 ~]# su - oragrid
[oragrid@node2 ~]$ cd $ORACLE_HOME/bin
[oragrid@node2 bin]$ su
??:
[root@node2 bin]# crsctl check crs
CRS-4638: Oracle High Availability Services is online
CRS-4535: Cannot communicate with Cluster Ready Services
CRS-4530: Communications failure contacting Cluster Synchronization Services daemon
CRS-4534: Cannot communicate with Event Manager
[root@node2 bin]# exit
exit
[oragrid@node2 bin]$ exit
logout
[root@node2 ~]# exit
logout
Connection to node2 closed.
[root@node1 node1]#
[root@node1 node1]#
[root@node1 node1]#
[root@node1 node1]# whoami
root
[root@node1 node1]#
[root@node1 node1]#
[root@node1 node1]# crsctl stop has -f
CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'node1'
CRS-2673: Attempting to stop 'ora.mdnsd' on 'node1'
CRS-2677: Stop of 'ora.mdnsd' on 'node1' succeeded
CRS-2673: Attempting to stop 'ora.drivers.acfs' on 'node1'
CRS-2673: Attempting to stop 'ora.crf' on 'node1'
CRS-2677: Stop of 'ora.drivers.acfs' on 'node1' succeeded
CRS-2677: Stop of 'ora.crf' on 'node1' succeeded
CRS-2673: Attempting to stop 'ora.gipcd' on 'node1'
CRS-2677: Stop of 'ora.gipcd' on 'node1' succeeded
CRS-2673: Attempting to stop 'ora.gpnpd' on 'node1'
CRS-2677: Stop of 'ora.gpnpd' on 'node1' succeeded
CRS-2793: Shutdown of Oracle High Availability Services-managed resources on 'node1' has completed
CRS-4133: Oracle High Availability Services has been stopped.
[root@node1 node1]#
[root@node1 node1]#
[root@node1 node1]#
[root@node1 node1]#
[root@node1 node1]#
[root@node1 node1]# crsctl start crs -excl -nocrs
CRS-4123: Oracle High Availability Services has been started.
CRS-2672: Attempting to start 'ora.mdnsd' on 'node1'
CRS-2676: Start of 'ora.mdnsd' on 'node1' succeeded
CRS-2672: Attempting to start 'ora.gpnpd' on 'node1'
CRS-2676: Start of 'ora.gpnpd' on 'node1' succeeded
CRS-2672: Attempting to start 'ora.cssdmonitor' on 'node1'
CRS-2672: Attempting to start 'ora.gipcd' on 'node1'
CRS-2676: Start of 'ora.cssdmonitor' on 'node1' succeeded
CRS-2676: Start of 'ora.gipcd' on 'node1' succeeded
CRS-2672: Attempting to start 'ora.cssd' on 'node1'
CRS-2672: Attempting to start 'ora.diskmon' on 'node1'
CRS-2676: Start of 'ora.diskmon' on 'node1' succeeded
CRS-2676: Start of 'ora.cssd' on 'node1' succeeded
CRS-2672: Attempting to start 'ora.drivers.acfs' on 'node1'
CRS-2679: Attempting to clean 'ora.cluster_interconnect.haip' on 'node1'
CRS-2672: Attempting to start 'ora.ctssd' on 'node1'
CRS-2681: Clean of 'ora.cluster_interconnect.haip' on 'node1' succeeded
CRS-2672: Attempting to start 'ora.cluster_interconnect.haip' on 'node1'
CRS-2676: Start of 'ora.drivers.acfs' on 'node1' succeeded
CRS-2676: Start of 'ora.ctssd' on 'node1' succeeded
CRS-2676: Start of 'ora.cluster_interconnect.haip' on 'node1' succeeded
CRS-2672: Attempting to start 'ora.asm' on 'node1'
CRS-2676: Start of 'ora.asm' on 'node1' succeeded
[root@node1 node1]#
[root@node1 node1]#
[root@node1 node1]# exit
exit
[oragrid@node1 cssd]$
[oragrid@node1 cssd]$
[oragrid@node1 cssd]$
[oragrid@node1 cssd]$
[oragrid@node1 cssd]$ echo $ORACLE_SID
+ASM1
[oragrid@node1 cssd]$ sqlplus /nolog
SQL*Plus: Release 11.2.0.4.0 Production on Wed Oct 5 19:00:05 2016
Copyright (c) 1982, 2013, Oracle. All rights reserved.
SQL> connect /as sysasm
Connected.
SQL> select open_mode from v$instance;
select open_mode from v$instance
*
ERROR at line 1:
ORA-00904: "OPEN_MODE": invalid identifier
SQL> desc v$instance;
Name Null? Type
----------------------------------------- -------- ----------------------------
INSTANCE_NUMBER NUMBER
INSTANCE_NAME VARCHAR2(16)
HOST_NAME VARCHAR2(64)
VERSION VARCHAR2(17)
STARTUP_TIME DATE
STATUS VARCHAR2(12)
PARALLEL VARCHAR2(3)
THREAD# NUMBER
ARCHIVER VARCHAR2(7)
LOG_SWITCH_WAIT VARCHAR2(15)
LOGINS VARCHAR2(10)
SHUTDOWN_PENDING VARCHAR2(3)
DATABASE_STATUS VARCHAR2(17)
INSTANCE_ROLE VARCHAR2(18)
ACTIVE_STATE VARCHAR2(9)
BLOCKED VARCHAR2(3)
SQL> select status from v$instance;
STATUS
------------
STARTED
SQL>
SQL>
SQL>
SQL>
SQL> exit
Disconnected from Oracle Database 11g Enterprise Edition Release 11.2.0.4.0 - 64bit Production
With the Real Application Clusters and Automatic Storage Management options
[oragrid@node1 cssd]$ ps -ef | grep -i asm
oragrid 8024 1 0 18:59 ? 00:00:00 asm_pmon_+ASM1
oragrid 8026 1 0 18:59 ? 00:00:00 asm_psp0_+ASM1
oragrid 8028 1 2 18:59 ? 00:00:02 asm_vktm_+ASM1
oragrid 8032 1 0 18:59 ? 00:00:00 asm_gen0_+ASM1
oragrid 8034 1 0 18:59 ? 00:00:00 asm_diag_+ASM1
oragrid 8036 1 0 18:59 ? 00:00:00 asm_ping_+ASM1
oragrid 8038 1 0 18:59 ? 00:00:00 asm_dia0_+ASM1
oragrid 8040 1 0 18:59 ? 00:00:00 asm_lmon_+ASM1
oragrid 8042 1 0 18:59 ? 00:00:00 asm_lmd0_+ASM1
oragrid 8044 1 0 18:59 ? 00:00:00 asm_lms0_+ASM1
oragrid 8048 1 0 18:59 ? 00:00:00 asm_lmhb_+ASM1
oragrid 8050 1 0 18:59 ? 00:00:00 asm_mman_+ASM1
oragrid 8052 1 0 18:59 ? 00:00:00 asm_dbw0_+ASM1
oragrid 8054 1 0 18:59 ? 00:00:00 asm_lgwr_+ASM1
oragrid 8056 1 0 18:59 ? 00:00:00 asm_ckpt_+ASM1
oragrid 8058 1 0 18:59 ? 00:00:00 asm_smon_+ASM1
oragrid 8060 1 0 18:59 ? 00:00:00 asm_rbal_+ASM1
oragrid 8062 1 0 18:59 ? 00:00:00 asm_gmon_+ASM1
oragrid 8064 1 0 18:59 ? 00:00:00 asm_mmon_+ASM1
oragrid 8066 1 0 18:59 ? 00:00:00 asm_mmnl_+ASM1
oragrid 8068 1 0 18:59 ? 00:00:00 asm_lck0_+ASM1
oragrid 8099 1 0 18:59 ? 00:00:00 oracle+ASM1 (DESCRIPTION=(LOCAL=YES)(ADDRESS=(PROTOCOL=beq)))
oragrid 8209 6865 0 19:00 pts/0 00:00:00 grep -i asm
[oragrid@node1 cssd]$
[oragrid@node1 cssd]$
[oragrid@node1 cssd]$
[oragrid@node1 cssd]$
[oragrid@node1 cssd]$ sqlplus /nolog
SQL*Plus: Release 11.2.0.4.0 Production on Wed Oct 5 19:00:53 2016
Copyright (c) 1982, 2013, Oracle. All rights reserved.
SQL> connect /as sysasm
Connected.
SQL> show parameter disk
NAME TYPE VALUE
------------------------------------ ----------- ------------------------------
asm_diskgroups string
asm_diskstring string
SQL> show parameter spfile;
NAME TYPE VALUE
------------------------------------ ----------- ------------------------------
spfile string
SQL>
SQL>
SQL> show parameter
NAME TYPE VALUE
------------------------------------ ----------- ------------------------------
_dummy_instance boolean TRUE
asm_diskgroups string
asm_diskstring string
asm_power_limit integer 1
asm_preferred_read_failure_groups string
audit_file_dest string /grid/grid_home/rdbms/audit
audit_sys_operations boolean FALSE
audit_syslog_level string
background_core_dump string partial
background_dump_dest string /grid/grid_home/log/diag/asm/+
asm/+ASM1/trace
NAME TYPE VALUE
------------------------------------ ----------- ------------------------------
cluster_database boolean TRUE
cluster_database_instances integer 4
cluster_interconnects string
core_dump_dest string /grid/grid_home/log/diag/asm/+
asm/+ASM1/cdump
cpu_count integer 4
db_cache_size big integer 0
db_ultra_safe string OFF
db_unique_name string +ASM
diagnostic_dest string /grid/grid_home/log
event string
NAME TYPE VALUE
------------------------------------ ----------- ------------------------------
file_mapping boolean FALSE
filesystemio_options string none
ifile file
instance_name string +ASM1
instance_number integer 1
instance_type string ASM
large_pool_size big integer 0
ldap_directory_sysauth string no
listener_networks string
local_listener string
lock_name_space string
NAME TYPE VALUE
------------------------------------ ----------- ------------------------------
lock_sga boolean FALSE
max_dump_file_size string unlimited
memory_max_target big integer 1076M
memory_target big integer 1076M
nls_calendar string
nls_comp string BINARY
nls_currency string
nls_date_format string
nls_date_language string
nls_dual_currency string
nls_iso_currency string
NAME TYPE VALUE
------------------------------------ ----------- ------------------------------
nls_language string AMERICAN
nls_length_semantics string BYTE
nls_nchar_conv_excp string FALSE
nls_numeric_characters string
nls_sort string
nls_territory string AMERICA
nls_time_format string
nls_time_tz_format string
nls_timestamp_format string
nls_timestamp_tz_format string
os_authent_prefix string ops$
NAME TYPE VALUE
------------------------------------ ----------- ------------------------------
os_roles boolean FALSE
parallel_execution_message_size integer 16384
pga_aggregate_target big integer 0
processes integer 360
remote_listener string
remote_login_passwordfile string EXCLUSIVE
remote_os_authent boolean FALSE
remote_os_roles boolean FALSE
service_names string +ASM
sessions integer 564
sga_max_size big integer 1088M
NAME TYPE VALUE
------------------------------------ ----------- ------------------------------
sga_target big integer 0
shadow_core_dump string partial
shared_pool_reserved_size big integer 30M
shared_pool_size big integer 0
sort_area_size integer 65536
spfile string
sql_trace boolean FALSE
statistics_level string TYPICAL
timed_os_statistics integer 0
timed_statistics boolean TRUE
trace_enabled boolean TRUE
NAME TYPE VALUE
------------------------------------ ----------- ------------------------------
use_large_pages string TRUE
user_dump_dest string /grid/grid_home/log/diag/asm/+
asm/+ASM1/trace
workarea_size_policy string AUTO
SQL>
SQL>
SQL>
SQL>
SQL> alter system set asm_diskstring='/dev/asm-*';
System altered.
SQL> alter diskgroup data mount;
Diskgroup altered.
SQL> alter diskgroup ocrvote mount;
Diskgroup altered.
SQL> alter diskgroup arch mount;
Diskgroup altered.
SQL> show parameter spfile;
NAME TYPE VALUE
------------------------------------ ----------- ------------------------------
spfile string
SQL> show parameter disk
NAME TYPE VALUE
------------------------------------ ----------- ------------------------------
asm_diskgroups string DATA, OCRVOTE, ARCH
asm_diskstring string /dev/asm-*
SQL> create spfile from memory;
create spfile from memory
*
ERROR at line 1:
ORA-17502: ksfdcre:4 Failed to create file
+OCRVOTE/node-cluster/asmparameterfile/registry.253.917395543
ORA-15177: cannot operate on system aliases
SQL> create spfile from memory;
File created.
SQL> startup force mount;
ORA-32004: obsolete or deprecated parameter(s) specified for ASM instance
ASM instance started
Total System Global Area 1135747072 bytes
Fixed Size 2260728 bytes
Variable Size 1108320520 bytes
ASM Cache 25165824 bytes
ASM diskgroups mounted
ASM diskgroups volume enabled
SQL> show parameter spfile
NAME TYPE VALUE
------------------------------------ ----------- ------------------------------
spfile string /grid/grid_home/dbs/spfile+ASM
1.ora
SQL> show parameter disk
NAME TYPE VALUE
------------------------------------ ----------- ------------------------------
asm_diskgroups string DATA, OCRVOTE, ARCH
asm_diskstring string /dev/asm-*
SQL>
SQL>
SQL>
SQL>
SQL> create spfile='+OCRVOTE' FROM pfile;
create spfile='+OCRVOTE' FROM pfile
*
ERROR at line 1:
ORA-01078: failure in processing system parameters
LRM-00109: could not open parameter file '/grid/grid_home/dbs/init+ASM1.ora'
SQL> create pfile from spfile;
File created.
SQL> create spfile='+OCRVOTE' FROM pfile;
File created.
SQL> startup force;
ORA-32004: obsolete or deprecated parameter(s) specified for ASM instance
ASM instance started
Total System Global Area 1135747072 bytes
Fixed Size 2260728 bytes
Variable Size 1108320520 bytes
ASM Cache 25165824 bytes
ASM diskgroups mounted
ASM diskgroups volume enabled
SQL> show parameter spfile;
NAME TYPE VALUE
------------------------------------ ----------- ------------------------------
spfile string +OCRVOTE/node-cluster/asmparam
eterfile/registry.253.92446232
5
SQL> exit
Disconnected from Oracle Database 11g Enterprise Edition Release 11.2.0.4.0 - 64bit Production
With the Real Application Clusters and Automatic Storage Management options
[oragrid@node1 cssd]$ asmcmd
ASMCMD> lsdg
State Type Rebal Sector Block AU Total_MB Free_MB Req_mir_free_MB Usable_file_MB Offline_disks Voting_files Name
MOUNTED EXTERN N 512 4096 1048576 8544 8135 0 8135 0 N ARCH/
MOUNTED EXTERN N 512 4096 1048576 11296 8543 0 8543 0 N DATA/
MOUNTED EXTERN N 512 4096 1048576 5728 5364 0 5364 0 N OCRVOTE/
ASMCMD> lsdsk
Path
/dev/asm-diskc
/dev/asm-diske
/dev/asm-diskg
ASMCMD> pwd
+
ASMCMD> ls
ARCH/
DATA/
OCRVOTE/
ASMCMD> cd ocrvote
ASMCMD> ls
node-cluster/
ASMCMD> cd node-cluster
ASMCMD> ls
ASMPARAMETERFILE/
OCRFILE/
ASMCMD> cd asmparameterfile
ASMCMD> ls
REGISTRY.253.924462325
ASMCMD> exit
[oragrid@node1 cssd]$ crsctl query css votedisk
Located 0 voting disk(s).
[oragrid@node1 cssd]$ ocrcheck
Status of Oracle Cluster Registry is as follows :
Version : 3
Total space (kbytes) : 262120
Used space (kbytes) : 3100
Available space (kbytes) : 259020
ID : 16088418
Device/File Name : +OCRVOTE
Device/File integrity check succeeded
Device/File not configured
Device/File not configured
Device/File not configured
Device/File not configured
Cluster registry integrity check succeeded
Logical corruption check bypassed due to non-privileged user
[oragrid@node1 cssd]$ crsctl replace votedisk +OCRVOTE
Successful addition of voting disk 8a8c153dabef4f71bfe5c3ebc4074c1e.
Successfully replaced voting disk group with +OCRVOTE.
CRS-4266: Voting file(s) successfully replaced
[oragrid@node1 cssd]$
[oragrid@node1 cssd]$
[oragrid@node1 cssd]$ crsctl query css votedisk
## STATE File Universal Id File Name Disk group
-- ----- ----------------- --------- ---------
1. ONLINE 8a8c153dabef4f71bfe5c3ebc4074c1e (/dev/asm-diskc) [OCRVOTE]
Located 1 voting disk(s).
[oragrid@node1 cssd]$
[oragrid@node1 cssd]$
[oragrid@node1 cssd]$
[oragrid@node1 cssd]$ pwd
/grid/grid_home/log/node1/cssd
[oragrid@node1 cssd]$ su
??:
[root@node1 cssd]#
[root@node1 cssd]#
[root@node1 cssd]# crsctl stop cluster -all
CRS-2673: Attempting to stop 'ora.ctssd' on 'node1'
CRS-2673: Attempting to stop 'ora.cluster_interconnect.haip' on 'node1'
CRS-2677: Stop of 'ora.cluster_interconnect.haip' on 'node1' succeeded
CRS-2796: The command may not proceed when Cluster Ready Services is not running
CRS-2677: Stop of 'ora.ctssd' on 'node1' succeeded
CRS-2673: Attempting to stop 'ora.cssd' on 'node1'
CRS-2677: Stop of 'ora.cssd' on 'node1' succeeded
[root@node1 cssd]# crsctl check cluster -all
**************************************************************
node1:
CRS-4535: Cannot communicate with Cluster Ready Services
CRS-4530: Communications failure contacting Cluster Synchronization Services daemon
CRS-4534: Cannot communicate with Event Manager
**************************************************************
node2:
CRS-4535: Cannot communicate with Cluster Ready Services
CRS-4530: Communications failure contacting Cluster Synchronization Services daemon
CRS-4534: Cannot communicate with Event Manager
**************************************************************
[root@node1 cssd]# crsctl start cluster -all
CRS-2672: Attempting to start 'ora.cssdmonitor' on 'node1'
CRS-2676: Start of 'ora.cssdmonitor' on 'node1' succeeded
CRS-2672: Attempting to start 'ora.cssd' on 'node1'
CRS-2672: Attempting to start 'ora.diskmon' on 'node1'
CRS-2676: Start of 'ora.diskmon' on 'node1' succeeded
CRS-2676: Start of 'ora.cssd' on 'node1' succeeded
CRS-2672: Attempting to start 'ora.ctssd' on 'node1'
CRS-2676: Start of 'ora.ctssd' on 'node1' succeeded
CRS-2672: Attempting to start 'ora.evmd' on 'node1'
CRS-2672: Attempting to start 'ora.cluster_interconnect.haip' on 'node1'
CRS-5702: Resource 'ora.evmd' is already running on 'node2'
CRS-2676: Start of 'ora.evmd' on 'node1' succeeded
CRS-2676: Start of 'ora.cluster_interconnect.haip' on 'node1' succeeded
CRS-2672: Attempting to start 'ora.asm' on 'node1'
CRS-2676: Start of 'ora.asm' on 'node1' succeeded
CRS-2672: Attempting to start 'ora.crsd' on 'node1'
CRS-2676: Start of 'ora.crsd' on 'node1' succeeded
CRS-5702: Resource 'ora.crsd' is already running on 'node2'
[root@node1 cssd]# crsctl check cluster -all
**************************************************************
node1:
CRS-4535: Cannot communicate with Cluster Ready Services
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
**************************************************************
node2:
CRS-4535: Cannot communicate with Cluster Ready Services
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
**************************************************************
[root@node1 cssd]# crsctl check cluster -all
**************************************************************
node1:
CRS-4535: Cannot communicate with Cluster Ready Services
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
**************************************************************
node2:
CRS-4535: Cannot communicate with Cluster Ready Services
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
**************************************************************
[root@node1 cssd]# crsctl check cluster -all
**************************************************************
node1:
CRS-4535: Cannot communicate with Cluster Ready Services
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
**************************************************************
node2:
CRS-4535: Cannot communicate with Cluster Ready Services
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
**************************************************************
[root@node1 cssd]# ssh node2
root@node2's password:
Last login: Wed Oct 5 18:55:56 2016 from node1
[root@node2 ~]# shutdown -r now
Broadcast message from root@node2
(/dev/pts/1) at 19:14 ...
The system is going down for reboot NOW!
[root@node2 ~]# exit
logout
Connection to node2 closed.
[root@node1 cssd]# shutdown -r now
Broadcast message from root@node1
(/dev/pts/0) at 19:14 ...
The system is going down for reboot NOW!
[BEGIN] 2016/10/5 11:15:23
[c:\~]$ ping 192.168.1.155
?? Ping 192.168.1.155 ?? 32 ?????:
?? 192.168.1.155 ???: ??=32 ??<1ms TTL=64
?? 192.168.1.155 ???: ??=32 ??<1ms TTL=64
192.168.1.155 ? Ping ????:
???: ??? = 2,??? = 2,?? = 0 (0% ??),
?????????(??????):
?? = 0ms,?? = 0ms,?? = 0ms
[c:\~]$ ssh 192.168.1.155
Connecting to 192.168.1.155:22...
Connection established.
To escape to local shell, press 'Ctrl+Alt+]'.
Last login: Wed Oct 5 18:59:04 2016 from 192.168.1.1
[root@node1 ~]#
[root@node1 ~]#
[root@node1 ~]#
[root@node1 ~]#
[root@node1 ~]# ps -ef | grep d.bin
root 2587 1 5 19:15 ? 00:00:01 /grid/grid_home/bin/ohasd.bin reboot
oragrid 2914 1 0 19:16 ? 00:00:00 /grid/grid_home/bin/mdnsd.bin
oragrid 2927 1 1 19:16 ? 00:00:00 /grid/grid_home/bin/gpnpd.bin
oragrid 2937 1 1 19:16 ? 00:00:00 /grid/grid_home/bin/gipcd.bin
root 2951 1 2 19:16 ? 00:00:00 /grid/grid_home/bin/osysmond.bin
oragrid 2995 1 2 19:16 ? 00:00:00 /grid/grid_home/bin/ocssd.bin
root 3127 3001 0 19:16 pts/0 00:00:00 grep d.bin
[root@node1 ~]# su - oragrid
[oragrid@node1 ~]$ crsctl check cluster -all
**************************************************************
node1:
CRS-4535: Cannot communicate with Cluster Ready Services
CRS-4529: Cluster Synchronization Services is online
CRS-4534: Cannot communicate with Event Manager
**************************************************************
[oragrid@node1 ~]$ crsctl check cluster -all
**************************************************************
node1:
CRS-4535: Cannot communicate with Cluster Ready Services
CRS-4529: Cluster Synchronization Services is online
CRS-4534: Cannot communicate with Event Manager
**************************************************************
[oragrid@node1 ~]$ crsctl check cluster -all
**************************************************************
node1:
CRS-4535: Cannot communicate with Cluster Ready Services
CRS-4529: Cluster Synchronization Services is online
CRS-4534: Cannot communicate with Event Manager
**************************************************************
[oragrid@node1 ~]$ crsctl check cluster -all
**************************************************************
node1:
CRS-4535: Cannot communicate with Cluster Ready Services
CRS-4529: Cluster Synchronization Services is online
CRS-4534: Cannot communicate with Event Manager
**************************************************************
node2:
CRS-4535: Cannot communicate with Cluster Ready Services
CRS-4530: Communications failure contacting Cluster Synchronization Services daemon
CRS-4534: Cannot communicate with Event Manager
**************************************************************
[oragrid@node1 ~]$ crsctl check cluster -all
**************************************************************
node1:
CRS-4537: Cluster Ready Services is online
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
**************************************************************
node2:
CRS-4537: Cluster Ready Services is online
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
**************************************************************
[oragrid@node1 ~]$ crs_stat -t
Name Type Target State Host
------------------------------------------------------------
ora.ARCH.dg ora....up.type ONLINE ONLINE node1
ora.DATA.dg ora....up.type ONLINE ONLINE node1
ora....ER.lsnr ora....er.type ONLINE ONLINE node1
ora....N1.lsnr ora....er.type ONLINE ONLINE node1
ora.OCRVOTE.dg ora....up.type ONLINE ONLINE node1
ora.asm ora.asm.type ONLINE ONLINE node1
ora.cvu ora.cvu.type ONLINE ONLINE node1
ora.gsd ora.gsd.type OFFLINE OFFLINE
ora....network ora....rk.type ONLINE ONLINE node1
ora....SM1.asm application ONLINE ONLINE node1
ora....E1.lsnr application ONLINE ONLINE node1
ora.node1.gsd application OFFLINE OFFLINE
ora.node1.ons application ONLINE ONLINE node1
ora.node1.vip ora....t1.type ONLINE ONLINE node1
ora....SM2.asm application ONLINE ONLINE node2
ora....E2.lsnr application ONLINE ONLINE node2
ora.node2.gsd application OFFLINE OFFLINE
ora.node2.ons application ONLINE OFFLINE
ora.node2.vip ora....t1.type ONLINE ONLINE node2
ora.oc4j ora.oc4j.type ONLINE ONLINE node1
ora.ons ora.ons.type ONLINE ONLINE node1
ora.orcc.db ora....se.type ONLINE OFFLINE
ora....ry.acfs ora....fs.type ONLINE ONLINE node1
ora.scan1.vip ora....ip.type ONLINE ONLINE node1
[oragrid@node1 ~]$ crs_stat -t
Name Type Target State Host
------------------------------------------------------------
ora.ARCH.dg ora....up.type ONLINE ONLINE node1
ora.DATA.dg ora....up.type ONLINE ONLINE node1
ora....ER.lsnr ora....er.type ONLINE ONLINE node1
ora....N1.lsnr ora....er.type ONLINE ONLINE node1
ora.OCRVOTE.dg ora....up.type ONLINE ONLINE node1
ora.asm ora.asm.type ONLINE ONLINE node1
ora.cvu ora.cvu.type ONLINE ONLINE node1
ora.gsd ora.gsd.type OFFLINE OFFLINE
ora....network ora....rk.type ONLINE ONLINE node1
ora....SM1.asm application ONLINE ONLINE node1
ora....E1.lsnr application ONLINE ONLINE node1
ora.node1.gsd application OFFLINE OFFLINE
ora.node1.ons application ONLINE ONLINE node1
ora.node1.vip ora....t1.type ONLINE ONLINE node1
ora....SM2.asm application ONLINE ONLINE node2
ora....E2.lsnr application ONLINE ONLINE node2
ora.node2.gsd application OFFLINE OFFLINE
ora.node2.ons application ONLINE ONLINE node2
ora.node2.vip ora....t1.type ONLINE ONLINE node2
ora.oc4j ora.oc4j.type ONLINE ONLINE node1
ora.ons ora.ons.type ONLINE ONLINE node1
ora.orcc.db ora....se.type ONLINE ONLINE node1
ora....ry.acfs ora....fs.type ONLINE ONLINE node1
ora.scan1.vip ora....ip.type ONLINE ONLINE node1
[oragrid@node1 ~]$ asmcmd
ASMCMD> lsdsk
Path
/dev/asm-diskc
/dev/asm-diske
/dev/asm-diskg
ASMCMD> exit
[oragrid@node1 ~]$ ocrcheck
Status of Oracle Cluster Registry is as follows :
Version : 3
Total space (kbytes) : 262120
Used space (kbytes) : 3100
Available space (kbytes) : 259020
ID : 16088418
Device/File Name : +OCRVOTE
Device/File integrity check succeeded
Device/File not configured
Device/File not configured
Device/File not configured
Device/File not configured
Cluster registry integrity check succeeded
Logical corruption check bypassed due to non-privileged user
[oragrid@node1 ~]$ crsctl query css votedisk
## STATE File Universal Id File Name Disk group
-- ----- ----------------- --------- ---------
1. ONLINE 8a8c153dabef4f71bfe5c3ebc4074c1e (/dev/asm-diskc) [OCRVOTE]
Located 1 voting disk(s).
[oragrid@node1 ~]$
[oragrid@node1 ~]$
[oragrid@node1 ~]$
[oragrid@node1 ~]$
[END] 2016/10/5 11:18:41
|
|