3 # clvmd - Clustered LVM Daemon init script
6 # description: Cluster daemon for userland logical volume management tools.
8 # For Red-Hat-based distributions such as Fedora, RHEL, CentOS.
12 # Required-Start: $local_fs@CLVMD_CMANAGERS@
13 # Required-Stop: $local_fs@CLVMD_CMANAGERS@
14 # Short-Description: This service is Clusterd LVM Daemon.
15 # Description: Cluster daemon for userland logical volume management tools.
18 .
/etc
/rc.d
/init.d
/functions
22 exec_prefix
=@exec_prefix@
25 lvm_vgchange
=${sbindir}/vgchange
26 lvm_vgdisplay
=${sbindir}/vgdisplay
27 lvm_vgscan
=${sbindir}/vgscan
28 lvm_lvdisplay
=${sbindir}/lvdisplay
32 [ -f /etc
/sysconfig
/cluster
] && .
/etc
/sysconfig
/cluster
33 [ -f /etc
/sysconfig
/$DAEMON ] && .
/etc
/sysconfig
/$DAEMON
35 [ -n "$CLVMD_CLUSTER_IFACE" ] && CLVMDOPTS
="$CLVMDOPTS -I $CLVMD_CLUSTER_IFACE"
37 # allow up to $CLVMD_STOP_TIMEOUT seconds to clvmd to complete exit operations
38 # default to 10 seconds
40 [ -z $CLMVD_STOP_TIMEOUT ] && CLVMD_STOP_TIMEOUT
=10
42 LOCK_FILE
="/var/lock/subsys/$DAEMON"
44 # NOTE: replace this with vgs, once display filter per attr is implemented.
46 ${lvm_vgdisplay} 2>/dev
/null | \
47 awk 'BEGIN {RS="VG Name"} {if (/Clustered/) print $1;}'
50 clustered_active_lvs
() {
51 for i
in $
(clustered_vgs
); do
52 ${lvm_lvdisplay} $i 2>/dev
/null | \
53 awk 'BEGIN {RS="LV Name"} {if (/[^N^O^T] available/) print $1;}'
62 rh_status
>/dev
/null
2>&1
67 if ! rh_status_q
; then
68 echo -n "Starting $DAEMON: "
69 $DAEMON $CLVMDOPTS ||
return $?
73 # Refresh local cache.
75 # It's possible that new PVs were added to this, or other VGs
76 # while this node was down. So we run vgscan here to avoid
77 # any potential "Missing UUID" messages with subsequent
80 # The following step would be better and more informative to the user:
81 # 'action "Refreshing VG(s) local cache:" ${lvm_vgscan}'
82 # but it could show warnings such as:
83 # 'clvmd not running on node x-y-z Unable to obtain global lock.'
84 # and the action would be shown as FAILED when in reality it didn't.
85 # Ideally vgscan should have a startup mode that would not print
86 # unnecessary warnings.
88 ${lvm_vgscan} > /dev
/null
2>&1
90 action
"Activating VG(s):" ${lvm_vgchange} -ayl $LVM_VGS ||
return $?
100 while [ "$count" -le "$CLVMD_STOP_TIMEOUT" ] && \
111 rh_status_q ||
return 0
113 [ -z "$LVM_VGS" ] && LVM_VGS
="$(clustered_vgs)"
114 if [ -n "$LVM_VGS" ]; then
115 action
"Deactivating clustered VG(s):" ${lvm_vgchange} -anl $LVM_VGS ||
return $?
118 action
"Signaling $DAEMON to exit" kill -TERM $
(pidofproc
$DAEMON) ||
return $?
120 # wait half second before we start the waiting loop or we will show
121 # the loop more time than really necessary
124 # clvmd could take some time to stop
125 rh_status_q
&& action
"Waiting for $DAEMON to exit:" wait_for_finish
128 echo -n "$DAEMON failed to exit"
133 echo -n "$DAEMON terminated"
144 rh_status_q ||
exit 7
145 action
"Reloading $DAEMON configuration: " $DAEMON -R ||
return $?
149 # if stop fails, restart will return the error and not attempt
150 # another start. Even if start is protected by rh_status_q,
151 # that would avoid spawning another daemon, it would try to
152 # reactivate the VGs.
154 # Try to get clvmd to restart itself. This will preserve
156 action
"Restarting $DAEMON: " $DAEMON -S
158 # If that fails then do a normal stop & restart
168 [ "$EUID" != "0" ] && {
169 echo "clvmd init script can only be executed as root user"
173 # See how we were called.
185 restart|force-reload
)
190 condrestart|try-restart
)
191 rh_status_q ||
exit 0
204 if [ $rtrn = 0 ]; then
205 cvgs
="$(clustered_vgs)"
206 echo Clustered Volume Groups
: ${cvgs:-"(none)"}
207 clvs
="$(clustered_active_lvs)"
208 echo Active clustered Logical Volumes
: ${clvs:-"(none)"}
213 echo $
"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"