|
|
|
|
|
The following document describes the procedure to
build and configure an existing AIX system as an Oracle
database server. It is assumed the base AIX system is built
according to the standard policies and procedures, and
exists as a virtual LPAR client in a VIO environment.
- Login to the LPAR and create ssh keys for
password-less logins and command execution.
Oracle LPAR:
ssh-keygen -t rsa
- Build a "dolvs" script for the LPAR and assign it a name using the
following syntax: "dolvs.${LPAR_NAME}.sh". An example of this script is
shown here which builds multiple resource groups with multiple volume
groups. A unique resource group section should be configured in this
script for each resource group that will be supported on the Oracle
LPAR. Additionally, paging resource group is configured to contain the
paging space LUN's.
Oracle LPAR: dolvs.${LPAR_NAME}.sh
#!/usr/bin/ksh93
################################################################
douser()
{
UID=$( print "${U}" | sum -r )
UID="$(( ${UID// /} + 0 ))"
GID=$( print "${G}" | sum -r )
GID="$(( ${GID// /} + 0 ))"
rmuser ${U}
rmgroup ${G}
print "${G}=${GID}"
mkgroup id=${GID} ${G}
print "${U}=${UID}"
mkuser id=${UID} pgrp=${G} ${U}
lsuser ${U}
}
################################################################
dovg()
{
umount ${MTPT}
/usr/sbin/varyoffvg ${RG}${VGID}
/usr/sbin/exportvg ${RG}${VGID}
mkvg -f -y ${RG}${VGID} -V ${VGMJ} ${VGDISKS}
chvg -a n ${RG}${VGID}
chvg -c ${RG}${VGID}
varyoffvg ${RG}${VGID}
exportvg ${RG}${VGID}
importvg -y ${RG}${VGID} -V ${VGMJ} ${VGDISKS%%[$' \t\n']*}
varyonvg ${RG}${VGID}
/usr/sbin/mklv -y ${RG}${LGID}lv -t jfs2log -a e "${RG}${VGID}" 1
LVSIZE=$(( $( lsvg ${RG}${VGID} | grep "FREE PPs:" |
sed -e "s/.*FREE PPs: *//g;s/ (.*//g" ) ))
print "${LVSIZE}"
/usr/sbin/mklv -y ${RG}${LVID}lv -t jfs2 -a e "${RG}${VGID}" ${LVSIZE%%.*}
/usr/sbin/crfs -v jfs2 -d "${RG}${LVID}lv" -m "${MTPT}" -A y \
-p rw -a agblksize=4096 -a logname="${RG}${LGID}lv"
chown -R ${U}:${G} /${RG}
ls -ld ${MTPT}
ls -ld ${MTPT}
mount ${MTPT}
ls -ld ${MTPT}
ls -ld ${MTPT}
mkdir -p "${MTPT}/app/oracle/admin"
chuser home="${MTPT}/app/oracle/admin" ${U}
chown -R ${U}:${G} ${MTPT}
lsuser -a home ${U}
ls -ld ${MTPT}
print
lsvg
print
df
print
}
################################################################
RG="eegadtu0"
U=oraeeg
G=dbaeeg
VGMJ="430"
# VGDISKS=$( lscfg -l 'hdisk*' | egrep -- '-C240-|-C245-' | awk '{ print $1 }' )
VGDISKS="hdisk2 hdisk3 hdisk4 hdisk5"
VGID="01vg"
LVID="ora1"
LGID="jfs1"
MTPT="/${RG}/u01"
[[ "_${VGDISKS}" == "_" ]] && exit 1
douser
dovg
################################################################
RG="eegadtu0"
U=oraeeg
G=dbaeeg
VGMJ="431"
# VGDISKS=$( lscfg -l 'hdisk*' | egrep -- '-C240-|-C245-' | awk '{ print $1 }' )
VGDISKS="hdisk6 hdisk7 hdisk8 hdisk9 hdisk10 hdisk11 hdisk12 hdisk13 hdisk14 hdisk15"
VGID="02vg"
LVID="ora2"
LGID="jfs2"
MTPT="/${RG}/u02"
[[ "_${VGDISKS}" == "_" ]] && exit 1
# douser
dovg
################################################################
RG="eegadtu0"
U=oraeeg
G=dbaeeg
VGMJ="432"
# VGDISKS=$( lscfg -l 'hdisk*' | egrep -- '-C240-|-C245-' | awk '{ print $1 }' )
VGDISKS="hdisk16 hdisk17 hdisk18 hdisk19 hdisk20 hdisk21 hdisk22 hdisk23 hdisk24 hdisk25"
VGID="03vg"
LVID="ora3"
LGID="jfs3"
MTPT="/${RG}/u03"
[[ "_${VGDISKS}" == "_" ]] && exit 1
# douser
dovg
################################################################
RG="sfaadtu0"
U=orasfa
G=dbasfa
VGMJ="433"
# VGDISKS=$( lscfg -l 'hdisk*' | egrep -- '-C250-|-C255-' | awk '{ print $1 }' )
VGDISKS="hdisk26 hdisk27 hdisk28"
VGID="01vg"
LVID="ora1"
LGID="jfs1"
MTPT="/${RG}/u01"
[[ "_${VGDISKS}" == "_" ]] && exit 1
douser
dovg
################################################################
RG="sfaadtu0"
U=orasfa
G=dbasfa
VGMJ="434"
# VGDISKS=$( lscfg -l 'hdisk*' | egrep -- '-C250-|-C255-' | awk '{ print $1 }' )
VGDISKS="hdisk29 hdisk30 hdisk31"
VGID="02vg"
LVID="ora2"
LGID="jfs2"
MTPT="/${RG}/u02"
[[ "_${VGDISKS}" == "_" ]] && exit 1
# douser
dovg
################################################################
################################################################
####
#### Paging Space
####
RG="pagadtu1"
PGMJ="911"
PGDISKS=$( lscfg -l 'hdisk*' | egrep -- '-C58-|-C59-' | awk '{ print $1 }' )
[[ "_${PGDISKS}" == "_" ]] && exit 1
/usr/sbin/varyoffvg ${RG}00vg
/usr/sbin/exportvg ${RG}00vg
/usr/sbin/mkvg -f -S -y ${RG}00vg -V ${PGMJ} ${PGDISKS}
LVSIZE=$(( $( lsvg ${RG}00vg | grep "FREE PPs:" |
sed -e "s/.*FREE PPs: *//g;s/ (.*//g" ) + 0 ))
print "${LVSIZE}"
/usr/sbin/mklv -y ${RG}pag0lv -t paging -a e "${RG}00vg" ${LVSIZE%%.*}
chps -a y ${RG}pag0lv
lsps -a
- Edit and transfer the "dolvs.${LPAR_NAME}.sh" script to the Oracle
LPAR. Execute this script on the Oracle LPAR to build the resource
groups, volume groups, logical volumes, filesystems and users.
Oracle LPAR
scp dolvs.${LPAR_NAME}.sh root@${LPAR_NAME}:.
ssh root@${LPAR_NAME} dolvs.${LPAR_NAME}.sh
- Remove any existing disks and virtual SCSI adapters
on the Oracle LPAR, then reconfigure and rediscover the Virtual SCSI
adapters and disks.
Oracle LPAR
for i in $( lsdev -Cc disk -F name )
do
rmdev -Rdl ${i}
done
for i in $( lsdev -Cc adapter -F name | grep vscsi )
do
rmdev -Rdl ${i}
done
cfgmgr
- Make sure all disks have a PV ID number associated with it.
Oracle LPAR
for i in $( lsdev -Cc disk -F name )
do
chdev -l ${i} -a pv=yes
done
- Edit and transfer the "vscsiPriority.sh" script to the Oracle
LPAR. Execute this script on the Oracle LPAR to prioritize the
communication paths to each disk.
Oracle LPAR
ssh root@${LPAR_NAME} mkdir -p /usr/local/scripts
scp vscsiPriority.ksh root@${LPAR_NAME}:/usr/local/scripts
ssh root@${LPAR_NAME} chmod 755 /usr/local/scripts/vscsiPriority.ksh
ssh root@${LPAR_NAME} /usr/local/scripts/vscsiPriority.ksh -vori 20
- Set the SMTP mail relay host to "mail-relay.txu.com"
Oracle LPAR
sed -e "s/^DS.*/DSmail-relay.txu.com/g" /etc/sendmail/sendmail.cf > /tmp/tmp${$}.out
sed -e "s/^#DS.*/DSmail-relay.txu.com/g" /tmp/tmp${$}.out > /etc/sendmail/sendmail.cf
rm -f /tmp/tmp${$}.out
refresh -s sendmail
- Configure the network time protocol daemon and refresh
Oracle LPAR
print "server 146.61.47.25 prefer
server tick.cs.unlv.edu
server wuarchive.wustl.edu
tracefile /etc/ntp.trace" > /etc/ntp.conf
refresh -s xntpd
- Set the "ulimits" for each oracle user
Oracle LPAR
for UNAME in $( grep "^ora" /etc/passwd | awk -F: '{ print $1 }' )
do
chuser cpu=-1 ${UNAME}
chuser fsize=-1 ${UNAME}
chuser data=-1 ${UNAME}
chuser stack=32768 ${UNAME}
chuser rss=-1 ${UNAME}
chuser core=2097151 ${UNAME}
chuser nofiles=2000 ${UNAME}
grep -p "${UNAME}:" /etc/security/limits
done
- Manually edit the "/etc/passwd" file to change the home directories
to their appropriate location for each oracle user:
Oracle LPAR
# "/mtpt/u01/app/oracle/admin"
vi /etc/passwd
- To prepare for Oracle install, create the "hagsuser" group
Oracle LPAR
mkgroup hagsuser
- Allow the Oracle users to schedule cron jobs by adding their
user names to the "cron.allow" file.
Oracle LPAR
print "root" > /var/adm/cron/cron.allow
for UNAME in $( grep "^ora" /etc/passwd | awk -F: '{ print $1 }' )
do
print "${UNAME}" >> /var/adm/cron/cron.allow
done
- Configure WLM to run in passive mode and configure it to be started
at boot time from the /etc/inittab
Oracle LPAR
nohup wlmcntrl -p > /dev/console 2>&1 &
rmitab wlm
mkitab "wlm:2:once:wlmcntrl -p > /dev/console 2>&1"
- Configure system and Asynchronous I/O parameters into the kernel
Oracle LPAR
# Maximum number of processes per user = 3000
chdev -l sys0 -a maxuproc=3000 -P
# Maximum Async I/O requests = 16386
chdev -l aio0 -a maxreqs=16386 -P
# Maximum number of Async I/O servers = 300
chdev -l aio0 -a maxservers=300 -P
# Minimum number of Async I/O servers = 150
chdev -l aio0 -a minservers=150 -P
chdev -l aio0 -a autoconfig='available'
mkdev -l aio0
- Configure performance monitoring on LPAR's with storage on the NIM
server. The specific NIM server will depend upon in which datacenter
the LPAR resides.
-
Oracle LPAR
# Begin by starting a Korn Shell 93 session
/usr/bin/ksh93
# Set a shell variable to contain the hostname of the Oracle LPAR
AIXSYS="$( hostname )"
AIXSYS="${AIXSYS%%.*}"
# Set a shell variable to contain the hostname of the NIM Server.
# In this example "ddcapnim01" is used, this should be changed to
# the actual NIM server desired.
NFSSRV="ddcapnim01"
print "AIXSYS=${AIXSYS} NFSSRV=${NFSSRV}"
# Set an array of values to contain the performance data mount
# points for each NIM Server
typeset -A MTPT
MTPT[mdcapnim01]="/prfpmce0"
MTPT[ddcapnim01]="/prfdmce0"
- Create an NFS mount for the performance data on the Oracle LPAR and
mount the filesystem.
Oracle LPAR
umount ${MTPT[${NFSSRV}]}
/usr/sbin/rmnfsmnt -f ${MTPT[${NFSSRV}]} -B
/usr/sbin/mknfsmnt -f ${MTPT[${NFSSRV}]} -d ${MTPT[${NFSSRV}]} -h ${NFSSRV} -M sys -t rw -B -A -w bg -Y -X -H -j -q -g
mount ${MTPT[${NFSSRV}]}
grep -p "${MTPT[${NFSSRV}]}:" /etc/filesystems
df
- Start xmwlm and xmtrend in init level "a" with HACMP after NFS starts
Oracle LPAR
rmitab ptxwlm
rmitab ptxtrend
mkitab "ptxwlm:a:respawn:/usr/bin/xmwlm -d ${MTPT[${NFSSRV}]}/${AIXSYS}/wlm -n xmwlm > /dev/null 2>&1 # Start xmwlm"
mkitab "ptxtrend:a:respawn:/usr/bin/xmtrend -f /etc/perf/jtopas.cf -d ${MTPT[${NFSSRV}]}/${AIXSYS}/Top -n jtopas > /dev/null 2>&1 # Start trend"
egrep "^ptxwlm:|^ptxtrend:" /etc/inittab
- Kill any existing xmtrend or xmwlm processes and restart them
Oracle LPAR
for PID in $( ps -ef -F pid,args | egrep 'xmwlm|xmtrend' | grep -v grep | awk '{ print $1 }' )
do
print kill ${PID}
kill ${PID}
sleep 1
print kill -9 ${PID}
kill -9 ${PID}
done
ps -ef -F pid,args | egrep 'xmwlm|xmtrend'
mount ${MTPT[${NFSSRV}]}
mkdir -p ${MTPT[${NFSSRV}]}/${AIXSYS}/wlm
mkdir -p ${MTPT[${NFSSRV}]}/${AIXSYS}/Top
nohup /usr/bin/xmwlm -d ${MTPT[1]}/${AIXSYS}/wlm -n xmwlm > /dev/null 2>&1 &
sleep 4
nohup /usr/bin/xmtrend -f /etc/perf/jtopas.cf -d ${MTPT[${NFSSRV}]}/${AIXSYS}/Top -n jtopas > /dev/null 2>&1 &
sleep 4
find ${MTPT[${NFSSRV}]}/${AIXSYS} -exec ls -ld {} \;
jobs
- Reboot the LPAR to ensure the AIO, kernel, and /etc/inittab
settings take effect and are restarted on system startup.
Oracle LPAR
shutdown -Fr
- Create Start scripts for each Oracle database. An example start
script is shown for the resource group "etapdtu0", database "EEXTPRD",
oracle user "oraeta", and oracle listener port "1571".
Oracle LPAR: /usr/local/hascripts/etapdtu0_oradb01_start.sh
#!/usr/bin/ksh93
################################################################
print "START SCRIPT BEGIN: ${0##*/}"
RG="etapdtu0"
DBNAME1="EEXTPRD"
UNAME="oraeta"
LSNRPORT="1571"
typeset -l WLMTAG="${DBNAME1}"
print "/${RG}/u01/app/oracle/product/9.2.0/bin/dbstart ${DBNAME1}" > /tmp/dbstart.${DBNAME1}.${$}.sh
chmod 755 /tmp/dbstart.${DBNAME1}.${$}.sh
su - ${UNAME} -c "/usr/local/bin/wlmsettag ${WLMTAG} /tmp/dbstart.${DBNAME1}.${$}.sh" &
su - ${UNAME} -c "/${RG}/u01/app/oracle/product/9.2.0/bin/lsnrctl start lsnr_${UNAME}_${LSNRPORT}" &
/usr/local/hascripts/rg_wlmassign.sh
print "START SCRIPT END: ${0##*/}"
exit 0
- Create Stop scripts for each Resource Group of Oracle databases.
An example stop script is shown for the resource group "etapdtu0",
databases "EEXTPRD" and "ETSSPRD", oracle user "oraeta", and oracle
listener port "1571".
Oracle LPAR: /usr/local/hascripts/etapdtu0_oradb01_stop.sh
#!/usr/bin/ksh93
################################################################
print "STOP SCRIPT BEGIN: ${0##*/}"
# All databases running under the same user ID should be shutdown
# together in a single "dbshut" command, so modify this section
# to include all database names. Shown here are two databases.
RG="etapdtu0"
DBNAME1="EEXTPRD"
DBNAME2="ETSSPRD"
UNAME="oraeta"
LSNRPORT="1571"
su - ${UNAME} -c "/${RG}/u01/app/oracle/product/9.2.0/bin/lsnrctl stop lsnr_${UNAME}_${LSNRPORT}" &
su - ${UNAME} -c "/${RG}/u01/app/oracle/product/9.2.0/bin/dbshut ${DBNAME1} ${DBNAME2}" &
CNT="0"
while (( $( jobs | wc -l ) > 0 ))
do
print "STOP SCRIPT KILL TIMER: $( date ): ${0##*/}"
if (( CNT >= 300 ))
then
for SIGNAL in TERM HUP INT ABRT KILL
do
ps -fu ${UNAME}
for PIDNUM in $( ps -fu ${UNAME} -F pid | grep -v PID )
do
print kill -${SIGNAL} ${PIDNUM}
kill -${SIGNAL} ${PIDNUM}
done
sleep 15
done
break
fi
sleep 15
(( CNT = CNT + 15 ))
done
while (( $( ps -fu ${UNAME} -F pid | grep -v PID | wc -l ) > 0 ))
do
print "STOP SCRIPT WAIT TIMER: $( date ): ${0##*/}"
sleep 15
(( CNT = CNT + 15 ))
(( CNT >= 300 )) && break
done
ps -fu ${UNAME}
if (( $( ps -fu ${UNAME} -F pid | grep -v PID | wc -l ) > 0 ))
then
for SIGNAL in TERM HUP INT ABRT KILL
do
print "STOP SCRIPT FINAL KILL: $( date ): ${0##*/}"
for PIDNUM in $( ps -fu ${UNAME} -F pid | grep -v PID )
do
print kill -${SIGNAL} ${PIDNUM}
kill -${SIGNAL} ${PIDNUM}
done
sleep 15
done
fi
sync
sync
sync
sync
print "STOP SCRIPT END: ${0##*/}"
exit 0
- Create an HACMP build/configuration script and customize it for the
specific cluster.
Oracle LPAR
#!/usr/bin/ksh93
#################################################################
NODE1="ddcaaora01"
RG1="eegadtu0"
RG1APPLS="${RG1}_oradb01 ${RG1}_oradb02 ${RG1}_oradb03"
RG1VGS="${RG1}01vg ${RG1}02vg ${RG1}03vg"
NODE2="ddcaaora02"
RG2="sfaadtu0"
RG2APPLS="${RG2}_oradb01 ${RG2}_oradb02"
RG2VGS="${RG2}01vg ${RG2}02vg"
################################################################
# Remove Existing Cluster and create a new one:
/usr/es/sbin/cluster/utilities/clrmclstr
/usr/es/sbin/cluster/utilities/clmodclstr -n ${NODE1%??}tu01 -p "${NODE1}-boot.tu.com ${NODE2}-boot.tu.com"
################################################################
# Create Service Addresses
/usr/es/sbin/cluster/utilities/clrmnode -a ${NODE1}.tu.com
/usr/es/sbin/cluster/utilities/clrmnode -a ${NODE2}.tu.com
/usr/es/sbin/cluster/utilities/clrmnode -a ${RG1}.tu.com
/usr/es/sbin/cluster/utilities/clrmnode -a ${RG2}.tu.com
/usr/es/sbin/cluster/utilities/claddnode -T service -B ${NODE1}.tu.com -w net_ether_01
/usr/es/sbin/cluster/utilities/claddnode -T service -B ${NODE2}.tu.com -w net_ether_01
/usr/es/sbin/cluster/utilities/claddnode -T service -B ${RG1}.tu.com -w net_ether_01
/usr/es/sbin/cluster/utilities/claddnode -T service -B ${RG2}.tu.com -w net_ether_01
################################################################
# Create Application Servers
for APPLS in ${RG1APPLS} ${RG2APPLS}
do
/usr/es/sbin/cluster/utilities/clrmserv ${APPLS}
/usr/es/sbin/cluster/utilities/claddserv -s ${APPLS} -b "/usr/local/hascripts/${APPLS}_start.sh" -e "/usr/local/hascripts/${APPLS}_stop.sh"
done
################################################################
# Create persistant addresses
/usr/es/sbin/cluster/utilities/clrmnode -a ${NODE1}-pers
/usr/es/sbin/cluster/utilities/clrmnode -a ${NODE2}-pers
/usr/es/sbin/cluster/utilities/claddnode -a ${NODE1}-pers.tu.com :ether :net_ether_01 : :persistent : : -n ${NODE1}
/usr/es/sbin/cluster/utilities/claddnode -a ${NODE2}-pers.tu.com :ether :net_ether_01 : :persistent : : -n ${NODE2}
################################################################
# Create resource groups
/usr/es/sbin/cluster/utilities/clrmgrp -g ${NODE1}
/usr/es/sbin/cluster/utilities/clrmgrp -g ${NODE2}
/usr/es/sbin/cluster/utilities/clrmgrp -g ${RG1}
/usr/es/sbin/cluster/utilities/clrmgrp -g ${RG2}
/usr/es/sbin/cluster/utilities/claddgrp -g ${NODE1} -n "${NODE1} ${NODE2}" -S OHN -O FNPN -B FBHPN
/usr/es/sbin/cluster/utilities/claddgrp -g ${NODE2} -n "${NODE2} ${NODE1}" -S OHN -O FNPN -B FBHPN
/usr/es/sbin/cluster/utilities/claddgrp -g ${RG1} -n "${NODE1} ${NODE2}" -S OHN -O FNPN -B NFB
/usr/es/sbin/cluster/utilities/claddgrp -g ${RG2} -n "${NODE2} ${NODE1}" -S OHN -O FNPN -B NFB
################################################################
# Add the Service address to the resource group
/usr/es/sbin/cluster/utilities/claddres -g "${NODE1}" \
FALLBACK_AT= \
SERVICE_LABEL="${NODE1}" \
APPLICATIONS= \
VOLUME_GROUP= \
FORCED_VARYON="false" \
VG_AUTO_IMPORT="false" \
FILESYSTEM= \
FSCHECK_TOOL="fsck" \
RECOVERY_METHOD="sequential" \
FS_BEFORE_IPADDR="false" \
EXPORT_FILESYSTEM= \
MOUNT_FILESYSTEM= \
NFS_NETWORK= \
SHARED_TAPE_RESOURCES= \
DISK= \
AIX_FAST_CONNECT_SERVICES= \
COMMUNICATION_LINKS= \
MISC_DATA=
/usr/es/sbin/cluster/utilities/claddres -g "${NODE2}" \
FALLBACK_AT= \
SERVICE_LABEL="${NODE2}" \
APPLICATIONS= \
VOLUME_GROUP= \
FORCED_VARYON="false" \
VG_AUTO_IMPORT="false" \
FILESYSTEM= \
FSCHECK_TOOL="fsck" \
RECOVERY_METHOD="sequential" \
FS_BEFORE_IPADDR="false" \
EXPORT_FILESYSTEM= \
MOUNT_FILESYSTEM= \
NFS_NETWORK= \
SHARED_TAPE_RESOURCES= \
DISK= \
AIX_FAST_CONNECT_SERVICES= \
COMMUNICATION_LINKS= \
MISC_DATA=
################################################################
# Add application servers and volumes to resource groups
/usr/es/sbin/cluster/utilities/claddres -g "${RG1}" \
FALLBACK_AT= \
SERVICE_LABEL="${RG1}" \
APPLICATIONS="${RG1APPLS}" \
VOLUME_GROUP="${RG1VGS}" \
FORCED_VARYON="false" \
VG_AUTO_IMPORT="false" \
FILESYSTEM= \
FSCHECK_TOOL="fsck" \
RECOVERY_METHOD="sequential" \
FS_BEFORE_IPADDR="false" \
EXPORT_FILESYSTEM= \
MOUNT_FILESYSTEM= \
NFS_NETWORK= \
SHARED_TAPE_RESOURCES= \
DISK= \
AIX_FAST_CONNECT_SERVICES= \
COMMUNICATION_LINKS= \
MISC_DATA=
/usr/es/sbin/cluster/utilities/claddres -g "${RG2}" \
FALLBACK_AT= \
SERVICE_LABEL="${RG2}" \
APPLICATIONS="${RG2APPLS}" \
VOLUME_GROUP="${RG2VGS}" \
FORCED_VARYON="false" \
VG_AUTO_IMPORT="false" \
FILESYSTEM= \
FSCHECK_TOOL="fsck" \
RECOVERY_METHOD="sequential" \
FS_BEFORE_IPADDR="false" \
EXPORT_FILESYSTEM= \
MOUNT_FILESYSTEM= \
NFS_NETWORK= \
SHARED_TAPE_RESOURCES= \
DISK= \
AIX_FAST_CONNECT_SERVICES= \
COMMUNICATION_LINKS= \
MISC_DATA=
- Manually select and configure the concurrent disk heartbeat, then
synchronize and start the cluster.
Oracle LPAR
smitty cm_config_hacmp_communication_interfaces_devices_menu_dmn
smitty cm_ver_and_sync.select
- Mount the Oracle CD's from the MDC NIM server
Oracle LPAR
mkdir /Oracle9201_Disk1
mkdir /Oracle9201_Disk2
mkdir /Oracle9201_Disk3
mkdir /Oracle9201_Disk4
mkdir /Oracle9206_Disk1
mkdir /Oracle9207_Disk1
mount mdcapnim01:/export/cdimages/Oracle9201_Disk1 /Oracle9201_Disk1
mount mdcapnim01:/export/cdimages/Oracle9201_Disk2 /Oracle9201_Disk2
mount mdcapnim01:/export/cdimages/Oracle9201_Disk3 /Oracle9201_Disk3
mount mdcapnim01:/export/cdimages/Oracle9201_Disk4 /Oracle9201_Disk4
mount mdcapnim01:/export/cdimages/Oracle9206_Disk1 /Oracle9206_Disk1
mount mdcapnim01:/export/cdimages/Oracle9207_Disk1 /Oracle9207_Disk1
|
|
|
|
|
|
|