cache_options), cb, cbe)
cfg.worker_q.put(r)
+ @staticmethod
+ def _repair_raid_lv(lv_uuid, lv_name, new_pvs, repair_options):
+ # Make sure we have a dbus object representing it
+ pv_dests = []
+ dbo = LvCommon.validate_dbus_object(lv_uuid, lv_name)
+
+ # If we have PVs, verify them
+ if len(new_pvs):
+ for pv in new_pvs:
+ pv_dbus_obj = cfg.om.get_object_by_path(pv)
+ if not pv_dbus_obj:
+ raise dbus.exceptions.DBusException(
+ LV_INTERFACE,
+ 'PV Destination (%s) not found' % pv)
+
+ pv_dests.append(pv_dbus_obj.lvm_id)
+
+ LvCommon.handle_execute(*cmdhandler.lv_raid_repair(
+ dbo.lvm_id, pv_dests, repair_options))
+ return "/"
+
+ @dbus.service.method(
+ dbus_interface=LV_INTERFACE,
+ in_signature='aoia{sv}',
+ out_signature='o',
+ async_callbacks=('cb', 'cbe'))
+ def RepairRaidLv(self, new_pvs, tmo, repair_options, cb, cbe):
+ r = RequestEntry(
+ tmo, Lv._repair_raid_lv,
+ (self.Uuid, self.lvm_id, new_pvs,
+ repair_options), cb, cbe, return_tuple=False)
+ cfg.worker_q.put(r)
+
# noinspection PyPep8Naming
@utils.dbus_property(VDO_POOL_INTERFACE, 'OperatingMode', 's')
vg.Remove(dbus.Int32(g_tmo), EOD))
self._check_consistency()
- def _pv_remove(self, pv):
+ def _pv_remove(self, pv, force=False):
+ if force:
+ options = dbus.Dictionary({'--force': '--force', '--yes': ''}, 'sv')
+ else:
+ options = EOD
rc = self.handle_return(
- pv.Pv.Remove(dbus.Int32(g_tmo), EOD))
+ pv.Pv.Remove(dbus.Int32(g_tmo), options))
return rc
def test_pv_remove_add(self):
self.handle_return(vg.Vg.Remove(dbus.Int32(g_tmo), EOD))
+ def test_lv_raid_repair(self):
+ if len(self.objs[PV_INT]) < 3:
+ self.skipTest("we need at least 3 PVs to run LV repair test")
+
+ lv_name = lv_n()
+ vg = self._vg_create(pv_paths=[self.objs[PV_INT][0].object_path,
+ self.objs[PV_INT][1].object_path]).Vg
+ lv = self._test_lv_create(
+ vg.LvCreateRaid,
+ (dbus.String(lv_name), dbus.String('raid1'), dbus.UInt64(mib(16)),
+ dbus.UInt32(0), dbus.UInt32(0), dbus.Int32(g_tmo), EOD),
+ vg, LV_BASE_INT)
+
+ # deactivate the RAID LV (can't force remove PV with active LVs)
+ self.handle_return(lv.Lv.Deactivate(
+ dbus.UInt64(0), dbus.Int32(g_tmo), EOD))
+ lv.update()
+ self.assertFalse(lv.LvCommon.Active)
+
+ # remove second PV using --force --force
+ self._pv_remove(self.objs[PV_INT][1], force=True)
+
+ # activate the RAID LV (can't repair inactive LVs)
+ self.handle_return(lv.Lv.Activate(
+ dbus.UInt64(0), dbus.Int32(g_tmo), EOD))
+ lv.update()
+ self.assertTrue(lv.LvCommon.Active)
+
+ # LV should be "broken" now
+ self.assertEqual(lv.LvCommon.Health[1], "partial")
+
+ # add the third PV to the VG
+ path = self.handle_return(vg.Extend(
+ dbus.Array([self.objs[PV_INT][2].object_path], signature="o"),
+ dbus.Int32(g_tmo), EOD))
+ self.assertTrue(path == '/')
+
+ # repair the RAID LV using the third PV
+ rc = self.handle_return(
+ lv.Lv.RepairRaidLv(
+ dbus.Array([self.objs[PV_INT][2].object_path], 'o'),
+ dbus.Int32(g_tmo), EOD))
+
+ self.assertEqual(rc, '/')
+ self._check_consistency()
+
+ lv.update()
+
+ self.assertEqual(lv.LvCommon.Health[1], "unspecified")
+
+ # cleanup: remove the wiped PV from the VG and re-create it to have a clean PV
+ call_lvm(["vgreduce", "--removemissing", vg.Name])
+ self._pv_create(self.objs[PV_INT][1].Pv.Name)
+
def _test_lv_method_interface(self, lv):
self._rename_lv_test(lv)
self._test_activate_deactivate(lv)