Escenario:
- pdom nombre m8pdom02 SRU (Oracle Solaris 11.4.59.144.2).
- ldom nombre ldom08 SRU (Oracle Solaris 11.4.56.138.2).
- System Configuration: Oracle Corporation sun4v SPARC M8-8
Descripcion de la Tarea:
Tenia que quitar el zpool "dump" y generar un nuevo zpool "dump1" con un disco nuevo de mayor tamaño
En uno de los pasos me encontre con un BUG al querer agregar un disco al zpool.
Problema:
BUG 35280225 - libdiskmgt inuse_vxvm() falsely reports disk is part of a VxVM volume
Solucion :
- Fix has been integrated in to the Solaris development trunk build version that is tentatively scheduled to
be branched from and released as Solaris 11.4 SRU 60 in mid to late August 2023.
- In the interim use the workaround to disable in use checking by the Solaris disk management library
with NOINUSE_CHECK=1.
Paso a Paso :
en el pdom ,
root@m8pdom02:# zfs create -V300gb ldoms/ldom08_dump2
root@m8pdom02:# ldm add-vdsdev /dev/zvol/dsk/ldoms/ldom08_dump2 ldom08_dump2@primary-vds0
root@m8pdom02:# ldm add-vdisk dump2 ldom08_dump2@primary-vds0 ldom08
en el ldom
chequee con el comando format, cual era el nombre del disco que asigne al ldom y es c1d2
c1d2 <SUN-DiskImage-300GB cyl 8531 alt 2 hd 96 sec 768>
/virtual-devices@100/channel-devices@200/disk@2
Cuando quiero crear el zpool dump1 con el disco c1d2 me da el error siguiente y es donde aparece el BUG
root@ldom08:M8:~# zpool create dump1 c1d2
vdev verification failed: use -f to override the following errors:
/dev/dsk/c1d2s0 is part of a VxVM volume.
/dev/dsk/c1d2s1 is part of a VxVM volume.
/dev/dsk/c1d2s2 is part of a VxVM volume.
/dev/dsk/c1d2s3 is part of a VxVM volume.
/dev/dsk/c1d2s4 is part of a VxVM volume.
/dev/dsk/c1d2s5 is part of a VxVM volume.
/dev/dsk/c1d2s6 is part of a VxVM volume.
/dev/dsk/c1d2s7 is part of a VxVM volume.
Unable to build pool from specified devices: device already in use
root@ldom08:M8:~#
##### Aca aplico la Solucion y me deja crear el zpool
root@ldom08:M8:~# NOINUSE_CHECK=1 zpool create dump1 c1d2
root@ldom08:M8:~# zpool status dump1
pool: dump1
id: 17187728778697848574
state: ONLINE
scan: none requested
config:
NAME STATE READ WRITE CKSUM
dump1 ONLINE 0 0 0
c1d2 ONLINE 0 0 0
errors: No known data errors
root@ldom08a:M8:~#
root@ldom08a:M8:# zfs create -V270gb dump1/dump1
root@ldom08a:M8:# zfs list dump1
NAME USED AVAIL REFER MOUNTPOINT
dump1 279G 14.8G 288K /dump1
root@ldom08a:M8:#
##### ACA tambien aparece el BUG, cuando quiero usar el dumpadm
root@ldom08a:M8:# dumpadm -d /dev/zvol/dsk/dump1/dump1
dumpadm: /dev/zvol/dsk/dump1/dump1 is part of a VxVM volume.
root@ldom08a:M8:#
### ACA vuelvo a aplicar la solucion
root@ldom08a:M8:# NOINUSE_CHECK=1 dumpadm -d /dev/zvol/dsk/dump1/dump1
Dump content : kernel without ZFS metadata
Dump device : /dev/zvol/dsk/dump1/dump1 (dedicated)
Savecore directory: /var/crash
Savecore enabled : no
Save compressed : on
Deferred Dump : on
root@ldom08a:M8:#
root@ldom08a:M8:# zfs destroy dump/dump
root@ldom08a:M8:# zfs get volsize dump1/dump1
NAME PROPERTY VALUE SOURCE
dump1/dump1 volsize 270G local
root@ldom08a:M8:#
root@ldom08a:M8:~# zpool destroy dump
En el pdom
### ACA me encuentro con otro error y es que realmente lo tiene tomao el VERITAS, con el multipath
root@m8pdom02:# ldm list-constraints ldom08|grep dump1
dump1 ldom08_dump1@primary-vds0 1
root@m8pdom02:# ldm rm-vdisk dump1 ldom08
Guest LDom returned the following reason for failing the operation:
Resource Information
-------------- -------------------------
/dev/dsk/c1d1 Device being used by VxVM
VIO operation failed because device is being used in LDom ldom08
Failed to remove vdisk instance
root@m8pdom02:#
Lo tuvimos que excluir del veritas dentro del ldom
root@ldom08a:M8:~# vxdisk list
DEVICE TYPE DISK GROUP STATUS
c1d0 auto:ZFS - - ZFS
c1d1 auto:ZFS - - ZFS <<---- quiero sacar este disco
hitachi_vspg1k0_360a auto:cdsdisk SSD001 TEST1 online thinrclm
hitachi_vspg1k0_360b auto:cdsdisk SSD002 TEST1 online thinrclm
hitachi_vspg1k0_360c auto:cdsdisk SSD003 TEST2 online thinrclm
root@ldom08a:M8:~#
root@ldom08a:M8:~# vxdisk rm c1d1
root@ldom08a:M8:~# vxdisk list
DEVICE TYPE DISK GROUP STATUS
c1d0 auto:ZFS - - ZFS
hitachi_vspg1k0_360a auto:cdsdisk SSD001 TEST1 online thinrclm
hitachi_vspg1k0_360b auto:cdsdisk SSD002 TEST1 online thinrclm
hitachi_vspg1k0_360c auto:cdsdisk SSD003 TEST2 online thinrclm
root@ldom08a:M8:~#
root@ldom08a:M8:~# vxdmpadm getsubpaths
NAME STATE[A] PATH-TYPE[M] DMPNODENAME ENCLR-NAME CTLR ATTRS PRIORITY
=================================================================================================
c2t50060E800750BC62d0 ENABLED(A) - hitachi_vspg1k0_360a hitachi_vspg1k0 c2 - -
c3t50060E800750BC72d0 ENABLED(A) - hitachi_vspg1k0_360a hitachi_vspg1k0 c3 - -
c4t50060E800750BC64d0 ENABLED(A) - hitachi_vspg1k0_360a hitachi_vspg1k0 c4 - -
c5t50060E800750BC74d0 ENABLED(A) - hitachi_vspg1k0_360a hitachi_vspg1k0 c5 - -
c2t50060E800750BC62d1 ENABLED(A) - hitachi_vspg1k0_360b hitachi_vspg1k0 c2 - -
c3t50060E800750BC72d1 ENABLED(A) - hitachi_vspg1k0_360b hitachi_vspg1k0 c3 - -
c4t50060E800750BC64d1 ENABLED(A) - hitachi_vspg1k0_360b hitachi_vspg1k0 c4 - -
c5t50060E800750BC74d1 ENABLED(A) - hitachi_vspg1k0_360b hitachi_vspg1k0 c5 - -
c2t50060E800750BC62d2 ENABLED(A) - hitachi_vspg1k0_360c hitachi_vspg1k0 c2 - -
c3t50060E800750BC72d2 ENABLED(A) - hitachi_vspg1k0_360c hitachi_vspg1k0 c3 - -
c4t50060E800750BC64d2 ENABLED(A) - hitachi_vspg1k0_360c hitachi_vspg1k0 c4 - -
c5t50060E800750BC74d2 ENABLED(A) - hitachi_vspg1k0_360c hitachi_vspg1k0 c5 - -
c1d0 ENABLED(A) - c1d0 other_disks c1 - -
c1d1 ENABLED(A) - c1d1 other_disks c1 - -
root@ldom08a:M8:~# vxdmpadm exclude path=c1d1
Ahora si, en el pdom
root@m8pdom02:# ldm rm-vdisk dump1 ldom08
root@m8pdom02:# ldm rm-vdsdev ldom08_dump1@primary-vds0
root@m8pdom02:# zfs list|grep ldom08
ldoms/ldom08_dump1 103G 1.9T 800M -
ldoms/ldom08_dump2 309G 2.1T 47.5M -
ldoms/ldom08_vol1 155G 1.85T 101G -
root@m8pdom02:# zfs destroy ldoms/ldom08_dump1
root@m8pdom02:# zfs list|grep ldom08
ldoms/ldom08_dump2 309G 2.2T 47.5M -
ldoms/ldom08_vol1 155G 1.95T 101G -
ldoms/ldom08_vol1.old 155G 2.04T 10.4G -
root@m8pdom02:#