]> sourceware.org Git - lvm2.git/blob - lib/activate/activate.c
Add log_error even for general device in use when we can't do the sysfs checks.
[lvm2.git] / lib / activate / activate.c
1 /*
2 * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
3 * Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved.
4 *
5 * This file is part of LVM2.
6 *
7 * This copyrighted material is made available to anyone wishing to use,
8 * modify, copy, or redistribute it subject to the terms and conditions
9 * of the GNU Lesser General Public License v.2.1.
10 *
11 * You should have received a copy of the GNU Lesser General Public License
12 * along with this program; if not, write to the Free Software Foundation,
13 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 */
15
16 #include "lib.h"
17 #include "metadata.h"
18 #include "activate.h"
19 #include "memlock.h"
20 #include "display.h"
21 #include "fs.h"
22 #include "lvm-exec.h"
23 #include "lvm-file.h"
24 #include "lvm-string.h"
25 #include "toolcontext.h"
26 #include "dev_manager.h"
27 #include "str_list.h"
28 #include "config.h"
29 #include "filter.h"
30 #include "segtype.h"
31 #include "sharedlib.h"
32
33 #include <limits.h>
34 #include <fcntl.h>
35 #include <unistd.h>
36
37 #define _skip(fmt, args...) log_very_verbose("Skipping: " fmt , ## args)
38
39 int lvm1_present(struct cmd_context *cmd)
40 {
41 char path[PATH_MAX];
42
43 if (dm_snprintf(path, sizeof(path), "%s/lvm/global", cmd->proc_dir)
44 < 0) {
45 log_error("LVM1 proc global snprintf failed");
46 return 0;
47 }
48
49 if (path_exists(path))
50 return 1;
51 else
52 return 0;
53 }
54
55 int list_segment_modules(struct dm_pool *mem, const struct lv_segment *seg,
56 struct dm_list *modules)
57 {
58 unsigned int s;
59 struct lv_segment *seg2, *snap_seg;
60 struct dm_list *snh;
61
62 if (seg->segtype->ops->modules_needed &&
63 !seg->segtype->ops->modules_needed(mem, seg, modules)) {
64 log_error("module string allocation failed");
65 return 0;
66 }
67
68 if (lv_is_origin(seg->lv))
69 dm_list_iterate(snh, &seg->lv->snapshot_segs)
70 if (!list_lv_modules(mem,
71 dm_list_struct_base(snh,
72 struct lv_segment,
73 origin_list)->cow,
74 modules))
75 return_0;
76
77 if (lv_is_cow(seg->lv)) {
78 snap_seg = find_cow(seg->lv);
79 if (snap_seg->segtype->ops->modules_needed &&
80 !snap_seg->segtype->ops->modules_needed(mem, snap_seg,
81 modules)) {
82 log_error("snap_seg module string allocation failed");
83 return 0;
84 }
85 }
86
87 for (s = 0; s < seg->area_count; s++) {
88 switch (seg_type(seg, s)) {
89 case AREA_LV:
90 seg2 = find_seg_by_le(seg_lv(seg, s), seg_le(seg, s));
91 if (seg2 && !list_segment_modules(mem, seg2, modules))
92 return_0;
93 break;
94 case AREA_PV:
95 case AREA_UNASSIGNED:
96 ;
97 }
98 }
99
100 return 1;
101 }
102
103 int list_lv_modules(struct dm_pool *mem, const struct logical_volume *lv,
104 struct dm_list *modules)
105 {
106 struct lv_segment *seg;
107
108 dm_list_iterate_items(seg, &lv->segments)
109 if (!list_segment_modules(mem, seg, modules))
110 return_0;
111
112 return 1;
113 }
114
115 #ifndef DEVMAPPER_SUPPORT
116 void set_activation(int act)
117 {
118 static int warned = 0;
119
120 if (warned || !act)
121 return;
122
123 log_error("Compiled without libdevmapper support. "
124 "Can't enable activation.");
125
126 warned = 1;
127 }
128 int activation(void)
129 {
130 return 0;
131 }
132 int library_version(char *version, size_t size)
133 {
134 return 0;
135 }
136 int driver_version(char *version, size_t size)
137 {
138 return 0;
139 }
140 int target_version(const char *target_name, uint32_t *maj,
141 uint32_t *min, uint32_t *patchlevel)
142 {
143 return 0;
144 }
145 int target_present(struct cmd_context *cmd, const char *target_name,
146 int use_modprobe)
147 {
148 return 0;
149 }
150 int lv_info(struct cmd_context *cmd, const struct logical_volume *lv, unsigned origin_only,
151 struct lvinfo *info, int with_open_count, int with_read_ahead)
152 {
153 return 0;
154 }
155 int lv_info_by_lvid(struct cmd_context *cmd, const char *lvid_s,
156 unsigned origin_only,
157 struct lvinfo *info, int with_open_count, int with_read_ahead)
158 {
159 return 0;
160 }
161 int lv_snapshot_percent(const struct logical_volume *lv, percent_t *percent)
162 {
163 return 0;
164 }
165 int lv_mirror_percent(struct cmd_context *cmd, const struct logical_volume *lv,
166 int wait, percent_t *percent, uint32_t *event_nr)
167 {
168 return 0;
169 }
170 int lvs_in_vg_activated(struct volume_group *vg)
171 {
172 return 0;
173 }
174 int lvs_in_vg_opened(const struct volume_group *vg)
175 {
176 return 0;
177 }
178 /******
179 int lv_suspend(struct cmd_context *cmd, const char *lvid_s)
180 {
181 return 1;
182 }
183 *******/
184 int lv_suspend_if_active(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only)
185 {
186 return 1;
187 }
188 int lv_resume(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only)
189 {
190 return 1;
191 }
192 int lv_resume_if_active(struct cmd_context *cmd, const char *lvid_s,
193 unsigned origin_only, unsigned exclusive)
194 {
195 return 1;
196 }
197 int lv_deactivate(struct cmd_context *cmd, const char *lvid_s)
198 {
199 return 1;
200 }
201 int lv_activation_filter(struct cmd_context *cmd, const char *lvid_s,
202 int *activate_lv)
203 {
204 return 1;
205 }
206 int lv_activate(struct cmd_context *cmd, const char *lvid_s, int exclusive)
207 {
208 return 1;
209 }
210 int lv_activate_with_filter(struct cmd_context *cmd, const char *lvid_s, int exclusive)
211 {
212 return 1;
213 }
214 int lv_mknodes(struct cmd_context *cmd, const struct logical_volume *lv)
215 {
216 return 1;
217 }
218 int pv_uses_vg(struct physical_volume *pv,
219 struct volume_group *vg)
220 {
221 return 0;
222 }
223 void activation_release(void)
224 {
225 }
226 void activation_exit(void)
227 {
228 }
229 int lv_is_active(struct logical_volume *lv)
230 {
231 return 0;
232 }
233 int lv_is_active_exclusive_locally(struct logical_volume *lv)
234 {
235 return 0;
236 }
237 int lv_is_active_exclusive_remotely(struct logical_volume *lv)
238 {
239 return 0;
240 }
241 int lv_check_transient(struct logical_volume *lv)
242 {
243 return 1;
244 }
245 int monitor_dev_for_events(struct cmd_context *cmd, struct logical_volume *lv,
246 struct lv_activate_opts *laopts, int monitor)
247 {
248 return 1;
249 }
250 #else /* DEVMAPPER_SUPPORT */
251
252 static int _activation = 1;
253
254 void set_activation(int act)
255 {
256 if (act == _activation)
257 return;
258
259 _activation = act;
260 if (_activation)
261 log_verbose("Activation enabled. Device-mapper kernel "
262 "driver will be used.");
263 else
264 log_warn("WARNING: Activation disabled. No device-mapper "
265 "interaction will be attempted.");
266 }
267
268 int activation(void)
269 {
270 return _activation;
271 }
272
273 static int _passes_activation_filter(struct cmd_context *cmd,
274 struct logical_volume *lv)
275 {
276 const struct dm_config_node *cn;
277 const struct dm_config_value *cv;
278 const char *str;
279 char path[PATH_MAX];
280
281 if (!(cn = find_config_tree_node(cmd, "activation/volume_list"))) {
282 log_verbose("activation/volume_list configuration setting "
283 "not defined, checking only host tags for %s/%s",
284 lv->vg->name, lv->name);
285
286 /* If no host tags defined, activate */
287 if (dm_list_empty(&cmd->tags))
288 return 1;
289
290 /* If any host tag matches any LV or VG tag, activate */
291 if (str_list_match_list(&cmd->tags, &lv->tags, NULL) ||
292 str_list_match_list(&cmd->tags, &lv->vg->tags, NULL))
293 return 1;
294
295 log_verbose("No host tag matches %s/%s",
296 lv->vg->name, lv->name);
297
298 /* Don't activate */
299 return 0;
300 }
301 else
302 log_verbose("activation/volume_list configuration setting "
303 "defined, checking the list to match %s/%s",
304 lv->vg->name, lv->name);
305
306 for (cv = cn->v; cv; cv = cv->next) {
307 if (cv->type != DM_CFG_STRING) {
308 log_error("Ignoring invalid string in config file "
309 "activation/volume_list");
310 continue;
311 }
312 str = cv->v.str;
313 if (!*str) {
314 log_error("Ignoring empty string in config file "
315 "activation/volume_list");
316 continue;
317 }
318
319
320 /* Tag? */
321 if (*str == '@') {
322 str++;
323 if (!*str) {
324 log_error("Ignoring empty tag in config file "
325 "activation/volume_list");
326 continue;
327 }
328 /* If any host tag matches any LV or VG tag, activate */
329 if (!strcmp(str, "*")) {
330 if (str_list_match_list(&cmd->tags, &lv->tags, NULL)
331 || str_list_match_list(&cmd->tags,
332 &lv->vg->tags, NULL))
333 return 1;
334 else
335 continue;
336 }
337 /* If supplied tag matches LV or VG tag, activate */
338 if (str_list_match_item(&lv->tags, str) ||
339 str_list_match_item(&lv->vg->tags, str))
340 return 1;
341 else
342 continue;
343 }
344 if (!strchr(str, '/')) {
345 /* vgname supplied */
346 if (!strcmp(str, lv->vg->name))
347 return 1;
348 else
349 continue;
350 }
351 /* vgname/lvname */
352 if (dm_snprintf(path, sizeof(path), "%s/%s", lv->vg->name,
353 lv->name) < 0) {
354 log_error("dm_snprintf error from %s/%s", lv->vg->name,
355 lv->name);
356 continue;
357 }
358 if (!strcmp(path, str))
359 return 1;
360 }
361
362 log_verbose("No item supplied in activation/volume_list configuration "
363 "setting matches %s/%s", lv->vg->name, lv->name);
364
365 return 0;
366 }
367
368 int library_version(char *version, size_t size)
369 {
370 if (!activation())
371 return 0;
372
373 return dm_get_library_version(version, size);
374 }
375
376 int driver_version(char *version, size_t size)
377 {
378 if (!activation())
379 return 0;
380
381 log_very_verbose("Getting driver version");
382
383 return dm_driver_version(version, size);
384 }
385
386 int target_version(const char *target_name, uint32_t *maj,
387 uint32_t *min, uint32_t *patchlevel)
388 {
389 int r = 0;
390 struct dm_task *dmt;
391 struct dm_versions *target, *last_target;
392
393 log_very_verbose("Getting target version for %s", target_name);
394 if (!(dmt = dm_task_create(DM_DEVICE_LIST_VERSIONS)))
395 return_0;
396
397 if (activation_checks() && !dm_task_enable_checks(dmt))
398 goto_out;
399
400 if (!dm_task_run(dmt)) {
401 log_debug("Failed to get %s target version", target_name);
402 /* Assume this was because LIST_VERSIONS isn't supported */
403 return 1;
404 }
405
406 target = dm_task_get_versions(dmt);
407
408 do {
409 last_target = target;
410
411 if (!strcmp(target_name, target->name)) {
412 r = 1;
413 *maj = target->version[0];
414 *min = target->version[1];
415 *patchlevel = target->version[2];
416 goto out;
417 }
418
419 target = (struct dm_versions *)((char *) target + target->next);
420 } while (last_target != target);
421
422 out:
423 dm_task_destroy(dmt);
424
425 return r;
426 }
427
428 int module_present(struct cmd_context *cmd, const char *target_name)
429 {
430 int ret = 0;
431 #ifdef MODPROBE_CMD
432 char module[128];
433 const char *argv[3];
434
435 if (dm_snprintf(module, sizeof(module), "dm-%s", target_name) < 0) {
436 log_error("module_present module name too long: %s",
437 target_name);
438 return 0;
439 }
440
441 argv[0] = MODPROBE_CMD;
442 argv[1] = module;
443 argv[2] = NULL;
444
445 ret = exec_cmd(cmd, argv, NULL, 0);
446 #endif
447 return ret;
448 }
449
450 int target_present(struct cmd_context *cmd, const char *target_name,
451 int use_modprobe)
452 {
453 uint32_t maj, min, patchlevel;
454
455 if (!activation())
456 return 0;
457
458 #ifdef MODPROBE_CMD
459 if (use_modprobe) {
460 if (target_version(target_name, &maj, &min, &patchlevel))
461 return 1;
462
463 if (!module_present(cmd, target_name))
464 return_0;
465 }
466 #endif
467
468 return target_version(target_name, &maj, &min, &patchlevel);
469 }
470
471 /*
472 * Returns 1 if info structure populated, else 0 on failure.
473 */
474 int lv_info(struct cmd_context *cmd, const struct logical_volume *lv, unsigned origin_only,
475 struct lvinfo *info, int with_open_count, int with_read_ahead)
476 {
477 struct dm_info dminfo;
478
479 if (!activation())
480 return 0;
481 /*
482 * If open_count info is requested and we have to be sure our own udev
483 * transactions are finished
484 * For non-clustered locking type we are only interested for non-delete operation
485 * in progress - as only those could lead to opened files
486 */
487 if (with_open_count) {
488 if (locking_is_clustered())
489 sync_local_dev_names(cmd); /* Wait to have udev in sync */
490 else if (fs_has_non_delete_ops())
491 fs_unlock(); /* For non clustered - wait if there are non-delete ops */
492 }
493
494 if (!dev_manager_info(lv->vg->cmd->mem, lv, origin_only ? "real" : NULL, with_open_count,
495 with_read_ahead, &dminfo, &info->read_ahead))
496 return_0;
497
498 info->exists = dminfo.exists;
499 info->suspended = dminfo.suspended;
500 info->open_count = dminfo.open_count;
501 info->major = dminfo.major;
502 info->minor = dminfo.minor;
503 info->read_only = dminfo.read_only;
504 info->live_table = dminfo.live_table;
505 info->inactive_table = dminfo.inactive_table;
506
507 return 1;
508 }
509
510 int lv_info_by_lvid(struct cmd_context *cmd, const char *lvid_s,
511 unsigned origin_only,
512 struct lvinfo *info, int with_open_count, int with_read_ahead)
513 {
514 int r;
515 struct logical_volume *lv;
516
517 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
518 return 0;
519
520 if (!lv_is_origin(lv))
521 origin_only = 0;
522
523 r = lv_info(cmd, lv, origin_only, info, with_open_count, with_read_ahead);
524 release_vg(lv->vg);
525
526 return r;
527 }
528
529 int lv_check_not_in_use(struct cmd_context *cmd __attribute__((unused)),
530 struct logical_volume *lv, struct lvinfo *info)
531 {
532 if (!info->exists)
533 return 1;
534
535 /* If sysfs is not used, use open_count information only. */
536 if (!*dm_sysfs_dir()) {
537 if (info->open_count) {
538 log_error("Logical volume %s/%s in use.",
539 lv->vg->name, lv->name);
540 return 0;
541 }
542
543 return 1;
544 }
545
546 if (dm_device_has_holders(info->major, info->minor)) {
547 log_error("Logical volume %s/%s is used by another device.",
548 lv->vg->name, lv->name);
549 return 0;
550 }
551
552 if (dm_device_has_mounted_fs(info->major, info->minor)) {
553 log_error("Logical volume %s/%s contains a filesystem in use.",
554 lv->vg->name, lv->name);
555 return 0;
556 }
557
558 return 1;
559 }
560
561 /*
562 * Returns 1 if percent set, else 0 on failure.
563 */
564 int lv_check_transient(struct logical_volume *lv)
565 {
566 int r;
567 struct dev_manager *dm;
568
569 if (!activation())
570 return 0;
571
572 log_debug("Checking transient status for LV %s/%s", lv->vg->name, lv->name);
573
574 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
575 return_0;
576
577 if (!(r = dev_manager_transient(dm, lv)))
578 stack;
579
580 dev_manager_destroy(dm);
581
582 return r;
583 }
584
585 /*
586 * Returns 1 if percent set, else 0 on failure.
587 */
588 int lv_snapshot_percent(const struct logical_volume *lv, percent_t *percent)
589 {
590 int r;
591 struct dev_manager *dm;
592
593 if (!activation())
594 return 0;
595
596 log_debug("Checking snapshot percent for LV %s/%s", lv->vg->name, lv->name);
597
598 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
599 return_0;
600
601 if (!(r = dev_manager_snapshot_percent(dm, lv, percent)))
602 stack;
603
604 dev_manager_destroy(dm);
605
606 return r;
607 }
608
609 /* FIXME Merge with snapshot_percent */
610 int lv_mirror_percent(struct cmd_context *cmd, const struct logical_volume *lv,
611 int wait, percent_t *percent, uint32_t *event_nr)
612 {
613 int r;
614 struct dev_manager *dm;
615 struct lvinfo info;
616
617 /* If mirrored LV is temporarily shrinked to 1 area (= linear),
618 * it should be considered in-sync. */
619 if (dm_list_size(&lv->segments) == 1 && first_seg(lv)->area_count == 1) {
620 *percent = PERCENT_100;
621 return 1;
622 }
623
624 if (!activation())
625 return 0;
626
627 log_debug("Checking mirror percent for LV %s/%s", lv->vg->name, lv->name);
628
629 if (!lv_info(cmd, lv, 0, &info, 0, 0))
630 return_0;
631
632 if (!info.exists)
633 return 0;
634
635 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
636 return_0;
637
638 if (!(r = dev_manager_mirror_percent(dm, lv, wait, percent, event_nr)))
639 stack;
640
641 dev_manager_destroy(dm);
642
643 return r;
644 }
645
646 int lv_raid_percent(const struct logical_volume *lv, percent_t *percent)
647 {
648 return lv_mirror_percent(lv->vg->cmd, lv, 0, percent, NULL);
649 }
650
651 static int _lv_active(struct cmd_context *cmd, struct logical_volume *lv)
652 {
653 struct lvinfo info;
654
655 if (!lv_info(cmd, lv, 0, &info, 0, 0)) {
656 stack;
657 return -1;
658 }
659
660 return info.exists;
661 }
662
663 static int _lv_open_count(struct cmd_context *cmd, struct logical_volume *lv)
664 {
665 struct lvinfo info;
666
667 if (!lv_info(cmd, lv, 0, &info, 1, 0)) {
668 stack;
669 return -1;
670 }
671
672 return info.open_count;
673 }
674
675 static int _lv_activate_lv(struct logical_volume *lv, struct lv_activate_opts *laopts)
676 {
677 int r;
678 struct dev_manager *dm;
679
680 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, (lv->status & PVMOVE) ? 0 : 1)))
681 return_0;
682
683 if (!(r = dev_manager_activate(dm, lv, laopts)))
684 stack;
685
686 dev_manager_destroy(dm);
687 return r;
688 }
689
690 static int _lv_preload(struct logical_volume *lv, struct lv_activate_opts *laopts,
691 int *flush_required)
692 {
693 int r;
694 struct dev_manager *dm;
695
696 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, (lv->status & PVMOVE) ? 0 : 1)))
697 return_0;
698
699 if (!(r = dev_manager_preload(dm, lv, laopts, flush_required)))
700 stack;
701
702 dev_manager_destroy(dm);
703 return r;
704 }
705
706 static int _lv_deactivate(struct logical_volume *lv)
707 {
708 int r;
709 struct dev_manager *dm;
710
711 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
712 return_0;
713
714 if (!(r = dev_manager_deactivate(dm, lv)))
715 stack;
716
717 dev_manager_destroy(dm);
718 return r;
719 }
720
721 static int _lv_suspend_lv(struct logical_volume *lv, struct lv_activate_opts *laopts,
722 int lockfs, int flush_required)
723 {
724 int r;
725 struct dev_manager *dm;
726
727 /*
728 * When we are asked to manipulate (normally suspend/resume) the PVMOVE
729 * device directly, we don't want to touch the devices that use it.
730 */
731 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, (lv->status & PVMOVE) ? 0 : 1)))
732 return_0;
733
734 if (!(r = dev_manager_suspend(dm, lv, laopts, lockfs, flush_required)))
735 stack;
736
737 dev_manager_destroy(dm);
738 return r;
739 }
740
741 /*
742 * These two functions return the number of visible LVs in the state,
743 * or -1 on error. FIXME Check this.
744 */
745 int lvs_in_vg_activated(struct volume_group *vg)
746 {
747 struct lv_list *lvl;
748 int count = 0;
749
750 if (!activation())
751 return 0;
752
753 dm_list_iterate_items(lvl, &vg->lvs)
754 if (lv_is_visible(lvl->lv))
755 count += (_lv_active(vg->cmd, lvl->lv) == 1);
756
757 log_debug("Counted %d active LVs in VG %s", count, vg->name);
758
759 return count;
760 }
761
762 int lvs_in_vg_opened(const struct volume_group *vg)
763 {
764 const struct lv_list *lvl;
765 int count = 0;
766
767 if (!activation())
768 return 0;
769
770 dm_list_iterate_items(lvl, &vg->lvs)
771 if (lv_is_visible(lvl->lv))
772 count += (_lv_open_count(vg->cmd, lvl->lv) > 0);
773
774 log_debug("Counted %d open LVs in VG %s", count, vg->name);
775
776 return count;
777 }
778
779 /*
780 * _lv_is_active
781 * @lv: logical volume being queried
782 * @locally: set if active locally (when provided)
783 * @exclusive: set if active exclusively (when provided)
784 *
785 * Determine whether an LV is active locally or in a cluster.
786 * In addition to the return code which indicates whether or
787 * not the LV is active somewhere, two other values are set
788 * to yield more information about the status of the activation:
789 * return locally exclusively status
790 * ====== ======= =========== ======
791 * 0 0 0 not active
792 * 1 0 0 active remotely
793 * 1 0 1 exclusive remotely
794 * 1 1 0 active locally and possibly remotely
795 * 1 1 1 exclusive locally (or local && !cluster)
796 * The VG lock must be held to call this function.
797 *
798 * Returns: 0 or 1
799 */
800 static int _lv_is_active(struct logical_volume *lv,
801 int *locally, int *exclusive)
802 {
803 int r, l, e; /* remote, local, and exclusive */
804
805 r = l = e = 0;
806
807 if (_lv_active(lv->vg->cmd, lv))
808 l = 1;
809
810 if (!vg_is_clustered(lv->vg)) {
811 e = 1; /* exclusive by definition */
812 goto out;
813 }
814
815 /* Active locally, and the caller doesn't care about exclusive */
816 if (l && !exclusive)
817 goto out;
818
819 if ((r = remote_lock_held(lv->lvid.s, &e)) >= 0)
820 goto out;
821
822 /*
823 * If lock query is not supported (due to interfacing with old
824 * code), then we cannot evaluate exclusivity properly.
825 *
826 * Old users of this function will never be affected by this,
827 * since they are only concerned about active vs. not active.
828 * New users of this function who specifically ask for 'exclusive'
829 * will be given an error message.
830 */
831 if (l) {
832 if (exclusive)
833 log_error("Unable to determine exclusivity of %s",
834 lv->name);
835 goto out;
836 }
837
838 /* FIXME: Is this fallback alright? */
839 if (activate_lv_excl(lv->vg->cmd, lv)) {
840 if (!deactivate_lv(lv->vg->cmd, lv))
841 stack;
842 /* FIXME: locally & exclusive are undefined. */
843 return 0;
844 }
845 /* FIXME: Check exclusive value here. */
846 out:
847 if (locally)
848 *locally = l;
849 if (exclusive)
850 *exclusive = e;
851
852 log_very_verbose("%s/%s is %sactive%s%s",
853 lv->vg->name, lv->name,
854 (r || l) ? "" : "not ",
855 (exclusive && e) ? " exclusive" : "",
856 e ? (l ? " locally" : " remotely") : "");
857
858 return r || l;
859 }
860
861 int lv_is_active(struct logical_volume *lv)
862 {
863 return _lv_is_active(lv, NULL, NULL);
864 }
865
866 int lv_is_active_but_not_locally(struct logical_volume *lv)
867 {
868 int l;
869 return _lv_is_active(lv, &l, NULL) && !l;
870 }
871
872 int lv_is_active_exclusive_locally(struct logical_volume *lv)
873 {
874 int l, e;
875
876 return _lv_is_active(lv, &l, &e) && l && e;
877 }
878
879 int lv_is_active_exclusive_remotely(struct logical_volume *lv)
880 {
881 int l, e;
882
883 return _lv_is_active(lv, &l, &e) && !l && e;
884 }
885
886 #ifdef DMEVENTD
887 static struct dm_event_handler *_create_dm_event_handler(struct cmd_context *cmd, const char *dmuuid, const char *dso,
888 const int timeout, enum dm_event_mask mask)
889 {
890 struct dm_event_handler *dmevh;
891
892 if (!(dmevh = dm_event_handler_create()))
893 return_NULL;
894
895 if (dm_event_handler_set_dmeventd_path(dmevh, find_config_tree_str(cmd, "dmeventd/executable", NULL)))
896 goto_bad;
897
898 if (dm_event_handler_set_dso(dmevh, dso))
899 goto_bad;
900
901 if (dm_event_handler_set_uuid(dmevh, dmuuid))
902 goto_bad;
903
904 dm_event_handler_set_timeout(dmevh, timeout);
905 dm_event_handler_set_event_mask(dmevh, mask);
906
907 return dmevh;
908
909 bad:
910 dm_event_handler_destroy(dmevh);
911 return NULL;
912 }
913
914 char *get_monitor_dso_path(struct cmd_context *cmd, const char *libpath)
915 {
916 char *path;
917
918 if (!(path = dm_pool_alloc(cmd->mem, PATH_MAX))) {
919 log_error("Failed to allocate dmeventd library path.");
920 return NULL;
921 }
922
923 get_shared_library_path(cmd, libpath, path, PATH_MAX);
924
925 return path;
926 }
927
928 int target_registered_with_dmeventd(struct cmd_context *cmd, const char *dso,
929 struct logical_volume *lv, int *pending)
930 {
931 char *uuid;
932 enum dm_event_mask evmask = 0;
933 struct dm_event_handler *dmevh;
934
935 *pending = 0;
936
937 if (!dso)
938 return_0;
939
940 /* We always monitor the "real" device, never the "snapshot-origin" itself. */
941 if (!(uuid = build_dm_uuid(cmd->mem, lv->lvid.s, lv_is_origin(lv) ? "real" : NULL)))
942 return_0;
943
944 if (!(dmevh = _create_dm_event_handler(cmd, uuid, dso, 0, DM_EVENT_ALL_ERRORS)))
945 return_0;
946
947 if (dm_event_get_registered_device(dmevh, 0)) {
948 dm_event_handler_destroy(dmevh);
949 return 0;
950 }
951
952 evmask = dm_event_handler_get_event_mask(dmevh);
953 if (evmask & DM_EVENT_REGISTRATION_PENDING) {
954 *pending = 1;
955 evmask &= ~DM_EVENT_REGISTRATION_PENDING;
956 }
957
958 dm_event_handler_destroy(dmevh);
959
960 return evmask;
961 }
962
963 int target_register_events(struct cmd_context *cmd, const char *dso, struct logical_volume *lv,
964 int evmask __attribute__((unused)), int set, int timeout)
965 {
966 char *uuid;
967 struct dm_event_handler *dmevh;
968 int r;
969
970 if (!dso)
971 return_0;
972
973 /* We always monitor the "real" device, never the "snapshot-origin" itself. */
974 if (!(uuid = build_dm_uuid(cmd->mem, lv->lvid.s, lv_is_origin(lv) ? "real" : NULL)))
975 return_0;
976
977 if (!(dmevh = _create_dm_event_handler(cmd, uuid, dso, timeout,
978 DM_EVENT_ALL_ERRORS | (timeout ? DM_EVENT_TIMEOUT : 0))))
979 return_0;
980
981 r = set ? dm_event_register_handler(dmevh) : dm_event_unregister_handler(dmevh);
982
983 dm_event_handler_destroy(dmevh);
984
985 if (!r)
986 return_0;
987
988 log_info("%s %s for events", set ? "Monitored" : "Unmonitored", uuid);
989
990 return 1;
991 }
992
993 #endif
994
995 /*
996 * Returns 0 if an attempt to (un)monitor the device failed.
997 * Returns 1 otherwise.
998 */
999 int monitor_dev_for_events(struct cmd_context *cmd, struct logical_volume *lv,
1000 const struct lv_activate_opts *laopts, int monitor)
1001 {
1002 #ifdef DMEVENTD
1003 int i, pending = 0, monitored;
1004 int r = 1;
1005 struct dm_list *tmp, *snh, *snht;
1006 struct lv_segment *seg;
1007 struct lv_segment *log_seg;
1008 int (*monitor_fn) (struct lv_segment *s, int e);
1009 uint32_t s;
1010 static const struct lv_activate_opts zlaopts = { 0 };
1011
1012 if (!laopts)
1013 laopts = &zlaopts;
1014
1015 /* skip dmeventd code altogether */
1016 if (dmeventd_monitor_mode() == DMEVENTD_MONITOR_IGNORE)
1017 return 1;
1018
1019 /*
1020 * Nothing to do if dmeventd configured not to be used.
1021 */
1022 if (monitor && !dmeventd_monitor_mode())
1023 return 1;
1024
1025 /*
1026 * In case of a snapshot device, we monitor lv->snapshot->lv,
1027 * not the actual LV itself.
1028 */
1029 if (lv_is_cow(lv) && (laopts->no_merging || !lv_is_merging_cow(lv)))
1030 return monitor_dev_for_events(cmd, lv->snapshot->lv, NULL, monitor);
1031
1032 /*
1033 * In case this LV is a snapshot origin, we instead monitor
1034 * each of its respective snapshots. The origin itself may
1035 * also need to be monitored if it is a mirror, for example.
1036 */
1037 if (!laopts->origin_only && lv_is_origin(lv))
1038 dm_list_iterate_safe(snh, snht, &lv->snapshot_segs)
1039 if (!monitor_dev_for_events(cmd, dm_list_struct_base(snh,
1040 struct lv_segment, origin_list)->cow, NULL, monitor))
1041 r = 0;
1042
1043 /*
1044 * If the volume is mirrored and its log is also mirrored, monitor
1045 * the log volume as well.
1046 */
1047 if ((seg = first_seg(lv)) != NULL && seg->log_lv != NULL &&
1048 (log_seg = first_seg(seg->log_lv)) != NULL &&
1049 seg_is_mirrored(log_seg))
1050 if (!monitor_dev_for_events(cmd, seg->log_lv, NULL, monitor))
1051 r = 0;
1052
1053 dm_list_iterate(tmp, &lv->segments) {
1054 seg = dm_list_item(tmp, struct lv_segment);
1055
1056 /* Recurse for AREA_LV */
1057 for (s = 0; s < seg->area_count; s++) {
1058 if (seg_type(seg, s) != AREA_LV)
1059 continue;
1060 if (!monitor_dev_for_events(cmd, seg_lv(seg, s), NULL,
1061 monitor)) {
1062 log_error("Failed to %smonitor %s",
1063 monitor ? "" : "un",
1064 seg_lv(seg, s)->name);
1065 r = 0;
1066 }
1067 }
1068
1069 if (!seg_monitored(seg) || (seg->status & PVMOVE))
1070 continue;
1071
1072 monitor_fn = NULL;
1073
1074 /* Check monitoring status */
1075 if (seg->segtype->ops->target_monitored)
1076 monitored = seg->segtype->ops->target_monitored(seg, &pending);
1077 else
1078 continue; /* segtype doesn't support registration */
1079
1080 /*
1081 * FIXME: We should really try again if pending
1082 */
1083 monitored = (pending) ? 0 : monitored;
1084
1085 if (monitor) {
1086 if (monitored)
1087 log_verbose("%s/%s already monitored.", lv->vg->name, lv->name);
1088 else if (seg->segtype->ops->target_monitor_events)
1089 monitor_fn = seg->segtype->ops->target_monitor_events;
1090 } else {
1091 if (!monitored)
1092 log_verbose("%s/%s already not monitored.", lv->vg->name, lv->name);
1093 else if (seg->segtype->ops->target_unmonitor_events)
1094 monitor_fn = seg->segtype->ops->target_unmonitor_events;
1095 }
1096
1097 /* Do [un]monitor */
1098 if (!monitor_fn)
1099 continue;
1100
1101 log_verbose("%sonitoring %s/%s%s", monitor ? "M" : "Not m", lv->vg->name, lv->name,
1102 test_mode() ? " [Test mode: skipping this]" : "");
1103
1104 /* FIXME Test mode should really continue a bit further. */
1105 if (test_mode())
1106 continue;
1107
1108 /* FIXME specify events */
1109 if (!monitor_fn(seg, 0)) {
1110 log_error("%s/%s: %s segment monitoring function failed.",
1111 lv->vg->name, lv->name, seg->segtype->name);
1112 return 0;
1113 }
1114
1115 /* Check [un]monitor results */
1116 /* Try a couple times if pending, but not forever... */
1117 for (i = 0; i < 10; i++) {
1118 pending = 0;
1119 monitored = seg->segtype->ops->target_monitored(seg, &pending);
1120 if (pending ||
1121 (!monitored && monitor) ||
1122 (monitored && !monitor))
1123 log_very_verbose("%s/%s %smonitoring still pending: waiting...",
1124 lv->vg->name, lv->name, monitor ? "" : "un");
1125 else
1126 break;
1127 sleep(1);
1128 }
1129
1130 if (r)
1131 r = (monitored && monitor) || (!monitored && !monitor);
1132 }
1133
1134 return r;
1135 #else
1136 return 1;
1137 #endif
1138 }
1139
1140 struct detached_lv_data {
1141 struct logical_volume *lv_pre;
1142 struct lv_activate_opts *laopts;
1143 int *flush_required;
1144 };
1145
1146 static int _preload_detached_lv(struct cmd_context *cmd, struct logical_volume *lv, void *data)
1147 {
1148 struct detached_lv_data *detached = data;
1149 struct lv_list *lvl_pre;
1150
1151 if ((lvl_pre = find_lv_in_vg(detached->lv_pre->vg, lv->name))) {
1152 if (lv_is_visible(lvl_pre->lv) && lv_is_active(lv) && (!lv_is_cow(lv) || !lv_is_cow(lvl_pre->lv)) &&
1153 !_lv_preload(lvl_pre->lv, detached->laopts, detached->flush_required))
1154 return_0;
1155 }
1156
1157 return 1;
1158 }
1159
1160 static int _lv_suspend(struct cmd_context *cmd, const char *lvid_s,
1161 struct lv_activate_opts *laopts, int error_if_not_suspended)
1162 {
1163 struct logical_volume *lv = NULL, *lv_pre = NULL, *pvmove_lv = NULL;
1164 struct lv_list *lvl_pre;
1165 struct seg_list *sl;
1166 struct lv_segment *snap_seg;
1167 struct lvinfo info;
1168 int r = 0, lockfs = 0, flush_required = 0;
1169 struct detached_lv_data detached;
1170
1171 if (!activation())
1172 return 1;
1173
1174 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1175 goto_out;
1176
1177 /* Use precommitted metadata if present */
1178 if (!(lv_pre = lv_from_lvid(cmd, lvid_s, 1)))
1179 goto_out;
1180
1181 /* Ignore origin_only unless LV is origin in both old and new metadata */
1182 if (!lv_is_origin(lv) || !lv_is_origin(lv_pre))
1183 laopts->origin_only = 0;
1184
1185 if (test_mode()) {
1186 _skip("Suspending %s%s.", lv->name, laopts->origin_only ? " origin without snapshots" : "");
1187 r = 1;
1188 goto out;
1189 }
1190
1191 if (!lv_info(cmd, lv, laopts->origin_only, &info, 0, 0))
1192 goto_out;
1193
1194 if (!info.exists || info.suspended) {
1195 if (!error_if_not_suspended) {
1196 r = 1;
1197 if (info.suspended)
1198 critical_section_inc(cmd, "already suspended");
1199 }
1200 goto out;
1201 }
1202
1203 if (!lv_read_replicator_vgs(lv))
1204 goto_out;
1205
1206 lv_calculate_readahead(lv, NULL);
1207
1208 /*
1209 * Preload devices for the LV.
1210 * If the PVMOVE LV is being removed, it's only present in the old
1211 * metadata and not the new, so we must explicitly add the new
1212 * tables for all the changed LVs here, as the relationships
1213 * are not found by walking the new metadata.
1214 */
1215 if (!(lv_pre->status & LOCKED) &&
1216 (lv->status & LOCKED) &&
1217 (pvmove_lv = find_pvmove_lv_in_lv(lv))) {
1218 /* Preload all the LVs above the PVMOVE LV */
1219 dm_list_iterate_items(sl, &pvmove_lv->segs_using_this_lv) {
1220 if (!(lvl_pre = find_lv_in_vg(lv_pre->vg, sl->seg->lv->name))) {
1221 log_error(INTERNAL_ERROR "LV %s missing from preload metadata", sl->seg->lv->name);
1222 goto out;
1223 }
1224 if (!_lv_preload(lvl_pre->lv, laopts, &flush_required))
1225 goto_out;
1226 }
1227 /* Now preload the PVMOVE LV itself */
1228 if (!(lvl_pre = find_lv_in_vg(lv_pre->vg, pvmove_lv->name))) {
1229 log_error(INTERNAL_ERROR "LV %s missing from preload metadata", pvmove_lv->name);
1230 goto out;
1231 }
1232 if (!_lv_preload(lvl_pre->lv, laopts, &flush_required))
1233 goto_out;
1234 } else {
1235 if (!_lv_preload(lv_pre, laopts, &flush_required))
1236 /* FIXME Revert preloading */
1237 goto_out;
1238
1239 /*
1240 * Search for existing LVs that have become detached and preload them.
1241 */
1242 detached.lv_pre = lv_pre;
1243 detached.laopts = laopts;
1244 detached.flush_required = &flush_required;
1245
1246 if (!for_each_sub_lv(cmd, lv, &_preload_detached_lv, &detached))
1247 goto_out;
1248
1249 /*
1250 * Preload any snapshots that are being removed.
1251 */
1252 if (!laopts->origin_only && lv_is_origin(lv)) {
1253 dm_list_iterate_items_gen(snap_seg, &lv->snapshot_segs, origin_list) {
1254 if (!(lvl_pre = find_lv_in_vg_by_lvid(lv_pre->vg, &snap_seg->cow->lvid))) {
1255 log_error(INTERNAL_ERROR "LV %s (%s) missing from preload metadata",
1256 snap_seg->cow->name, snap_seg->cow->lvid.id[1].uuid);
1257 goto out;
1258 }
1259 if (!lv_is_cow(lvl_pre->lv) &&
1260 !_lv_preload(lvl_pre->lv, laopts, &flush_required))
1261 goto_out;
1262 }
1263 }
1264 }
1265
1266 if (!monitor_dev_for_events(cmd, lv, laopts, 0))
1267 /* FIXME Consider aborting here */
1268 stack;
1269
1270 critical_section_inc(cmd, "suspending");
1271 if (pvmove_lv)
1272 critical_section_inc(cmd, "suspending pvmove LV");
1273
1274 if (!laopts->origin_only &&
1275 (lv_is_origin(lv_pre) || lv_is_cow(lv_pre)))
1276 lockfs = 1;
1277
1278 /*
1279 * Suspending an LV directly above a PVMOVE LV also
1280 * suspends other LVs using that same PVMOVE LV.
1281 * FIXME Remove this and delay the 'clear node' until
1282 * after the code knows whether there's a different
1283 * inactive table to load or not instead so lv_suspend
1284 * can be called separately for each LV safely.
1285 */
1286 if ((lv_pre->vg->status & PRECOMMITTED) &&
1287 (lv_pre->status & LOCKED) && find_pvmove_lv_in_lv(lv_pre)) {
1288 if (!_lv_suspend_lv(lv_pre, laopts, lockfs, flush_required)) {
1289 critical_section_dec(cmd, "failed precommitted suspend");
1290 if (pvmove_lv)
1291 critical_section_dec(cmd, "failed precommitted suspend (pvmove)");
1292 goto_out;
1293 }
1294 } else {
1295 /* Normal suspend */
1296 if (!_lv_suspend_lv(lv, laopts, lockfs, flush_required)) {
1297 critical_section_dec(cmd, "failed suspend");
1298 if (pvmove_lv)
1299 critical_section_dec(cmd, "failed suspend (pvmove)");
1300 goto_out;
1301 }
1302 }
1303
1304 r = 1;
1305 out:
1306 if (lv_pre)
1307 release_vg(lv_pre->vg);
1308 if (lv) {
1309 lv_release_replicator_vgs(lv);
1310 release_vg(lv->vg);
1311 }
1312
1313 return r;
1314 }
1315
1316 /* Returns success if the device is not active */
1317 int lv_suspend_if_active(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only)
1318 {
1319 struct lv_activate_opts laopts = { .origin_only = origin_only };
1320
1321 return _lv_suspend(cmd, lvid_s, &laopts, 0);
1322 }
1323
1324 /* No longer used */
1325 /***********
1326 int lv_suspend(struct cmd_context *cmd, const char *lvid_s)
1327 {
1328 return _lv_suspend(cmd, lvid_s, 1);
1329 }
1330 ***********/
1331
1332 /*
1333 * _lv_resume
1334 * @cmd
1335 * @lvid_s
1336 * @origin_only
1337 * @exclusive: This parameter only has an affect in cluster-context.
1338 * It forces local target type to be used (instead of
1339 * cluster-aware type).
1340 * @error_if_not_active
1341 */
1342 static int _lv_resume(struct cmd_context *cmd, const char *lvid_s,
1343 struct lv_activate_opts *laopts, int error_if_not_active)
1344 {
1345 struct logical_volume *lv;
1346 struct lvinfo info;
1347 int r = 0;
1348
1349 if (!activation())
1350 return 1;
1351
1352 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1353 goto_out;
1354
1355 if (!lv_is_origin(lv))
1356 laopts->origin_only = 0;
1357
1358 if (test_mode()) {
1359 _skip("Resuming %s%s.", lv->name, laopts->origin_only ? " without snapshots" : "");
1360 r = 1;
1361 goto out;
1362 }
1363
1364 log_debug("Resuming LV %s/%s%s%s.", lv->vg->name, lv->name,
1365 error_if_not_active ? "" : " if active",
1366 laopts->origin_only ? " without snapshots" : "");
1367
1368 if (!lv_info(cmd, lv, laopts->origin_only, &info, 0, 0))
1369 goto_out;
1370
1371 if (!info.exists || !info.suspended) {
1372 if (error_if_not_active)
1373 goto_out;
1374 r = 1;
1375 if (!info.suspended)
1376 critical_section_dec(cmd, "already resumed");
1377 goto out;
1378 }
1379
1380 if (!_lv_activate_lv(lv, laopts))
1381 goto_out;
1382
1383 critical_section_dec(cmd, "resumed");
1384
1385 if (!monitor_dev_for_events(cmd, lv, laopts, 1))
1386 stack;
1387
1388 r = 1;
1389 out:
1390 if (lv)
1391 release_vg(lv->vg);
1392
1393 return r;
1394 }
1395
1396 /* Returns success if the device is not active */
1397 int lv_resume_if_active(struct cmd_context *cmd, const char *lvid_s,
1398 unsigned origin_only, unsigned exclusive)
1399 {
1400 struct lv_activate_opts laopts = {
1401 .origin_only = origin_only,
1402 /*
1403 * When targets are activated exclusively in a cluster, the
1404 * non-clustered target should be used. This only happens
1405 * if exclusive is set.
1406 */
1407 .exclusive = exclusive
1408 };
1409
1410 return _lv_resume(cmd, lvid_s, &laopts, 0);
1411 }
1412
1413 int lv_resume(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only)
1414 {
1415 struct lv_activate_opts laopts = { .origin_only = origin_only, };
1416
1417 return _lv_resume(cmd, lvid_s, &laopts, 1);
1418 }
1419
1420 static int _lv_has_open_snapshots(struct logical_volume *lv)
1421 {
1422 struct lv_segment *snap_seg;
1423 struct lvinfo info;
1424 int r = 0;
1425
1426 dm_list_iterate_items_gen(snap_seg, &lv->snapshot_segs, origin_list) {
1427 if (!lv_info(lv->vg->cmd, snap_seg->cow, 0, &info, 1, 0)) {
1428 r = 1;
1429 continue;
1430 }
1431
1432 if (info.exists && info.open_count) {
1433 log_error("LV %s/%s has open snapshot %s: "
1434 "not deactivating", lv->vg->name, lv->name,
1435 snap_seg->cow->name);
1436 r = 1;
1437 }
1438 }
1439
1440 return r;
1441 }
1442
1443 int lv_deactivate(struct cmd_context *cmd, const char *lvid_s)
1444 {
1445 struct logical_volume *lv;
1446 struct lvinfo info;
1447 int r = 0;
1448
1449 if (!activation())
1450 return 1;
1451
1452 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1453 goto out;
1454
1455 if (test_mode()) {
1456 _skip("Deactivating '%s'.", lv->name);
1457 r = 1;
1458 goto out;
1459 }
1460
1461 log_debug("Deactivating %s/%s.", lv->vg->name, lv->name);
1462
1463 if (!lv_info(cmd, lv, 0, &info, 1, 0))
1464 goto_out;
1465
1466 if (!info.exists) {
1467 r = 1;
1468 goto out;
1469 }
1470
1471 if (lv_is_visible(lv)) {
1472 if (!lv_check_not_in_use(cmd, lv, &info))
1473 goto_out;
1474
1475 if (lv_is_origin(lv) && _lv_has_open_snapshots(lv))
1476 goto_out;
1477 }
1478
1479 if (!lv_read_replicator_vgs(lv))
1480 goto_out;
1481
1482 lv_calculate_readahead(lv, NULL);
1483
1484 if (!monitor_dev_for_events(cmd, lv, NULL, 0))
1485 stack;
1486
1487 critical_section_inc(cmd, "deactivating");
1488 r = _lv_deactivate(lv);
1489 critical_section_dec(cmd, "deactivated");
1490
1491 if (!lv_info(cmd, lv, 0, &info, 0, 0) || info.exists)
1492 r = 0;
1493 out:
1494 if (lv) {
1495 lv_release_replicator_vgs(lv);
1496 release_vg(lv->vg);
1497 }
1498
1499 return r;
1500 }
1501
1502 /* Test if LV passes filter */
1503 int lv_activation_filter(struct cmd_context *cmd, const char *lvid_s,
1504 int *activate_lv)
1505 {
1506 struct logical_volume *lv;
1507 int r = 0;
1508
1509 if (!activation()) {
1510 *activate_lv = 1;
1511 return 1;
1512 }
1513
1514 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1515 goto out;
1516
1517 if (!_passes_activation_filter(cmd, lv)) {
1518 log_verbose("Not activating %s/%s since it does not pass "
1519 "activation filter.", lv->vg->name, lv->name);
1520 *activate_lv = 0;
1521 } else
1522 *activate_lv = 1;
1523 r = 1;
1524 out:
1525 if (lv)
1526 release_vg(lv->vg);
1527
1528 return r;
1529 }
1530
1531 static int _lv_activate(struct cmd_context *cmd, const char *lvid_s,
1532 struct lv_activate_opts *laopts, int filter)
1533 {
1534 struct logical_volume *lv;
1535 struct lvinfo info;
1536 int r = 0;
1537
1538 if (!activation())
1539 return 1;
1540
1541 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1542 goto out;
1543
1544 if (filter && !_passes_activation_filter(cmd, lv)) {
1545 log_error("Not activating %s/%s since it does not pass "
1546 "activation filter.", lv->vg->name, lv->name);
1547 goto out;
1548 }
1549
1550 if ((!lv->vg->cmd->partial_activation) && (lv->status & PARTIAL_LV)) {
1551 log_error("Refusing activation of partial LV %s. Use --partial to override.",
1552 lv->name);
1553 goto_out;
1554 }
1555
1556 if (lv_has_unknown_segments(lv)) {
1557 log_error("Refusing activation of LV %s containing "
1558 "an unrecognised segment.", lv->name);
1559 goto_out;
1560 }
1561
1562 if (test_mode()) {
1563 _skip("Activating '%s'.", lv->name);
1564 r = 1;
1565 goto out;
1566 }
1567
1568 log_debug("Activating %s/%s%s.", lv->vg->name, lv->name,
1569 laopts->exclusive ? " exclusively" : "");
1570
1571 if (!lv_info(cmd, lv, 0, &info, 0, 0))
1572 goto_out;
1573
1574 if (info.exists && !info.suspended && info.live_table) {
1575 r = 1;
1576 goto out;
1577 }
1578
1579 if (!lv_read_replicator_vgs(lv))
1580 goto_out;
1581
1582 lv_calculate_readahead(lv, NULL);
1583
1584 critical_section_inc(cmd, "activating");
1585 if (!(r = _lv_activate_lv(lv, laopts)))
1586 stack;
1587 critical_section_dec(cmd, "activated");
1588
1589 if (r && !monitor_dev_for_events(cmd, lv, laopts, 1))
1590 stack;
1591
1592 out:
1593 if (lv) {
1594 lv_release_replicator_vgs(lv);
1595 release_vg(lv->vg);
1596 }
1597
1598 return r;
1599 }
1600
1601 /* Activate LV */
1602 int lv_activate(struct cmd_context *cmd, const char *lvid_s, int exclusive)
1603 {
1604 struct lv_activate_opts laopts = { .exclusive = exclusive };
1605
1606 if (!_lv_activate(cmd, lvid_s, &laopts, 0))
1607 return_0;
1608
1609 return 1;
1610 }
1611
1612 /* Activate LV only if it passes filter */
1613 int lv_activate_with_filter(struct cmd_context *cmd, const char *lvid_s, int exclusive)
1614 {
1615 struct lv_activate_opts laopts = { .exclusive = exclusive };
1616
1617 if (!_lv_activate(cmd, lvid_s, &laopts, 1))
1618 return_0;
1619
1620 return 1;
1621 }
1622
1623 int lv_mknodes(struct cmd_context *cmd, const struct logical_volume *lv)
1624 {
1625 int r = 1;
1626
1627 if (!lv) {
1628 r = dm_mknodes(NULL);
1629 fs_unlock();
1630 return r;
1631 }
1632
1633 if (!activation())
1634 return 1;
1635
1636 r = dev_manager_mknodes(lv);
1637
1638 fs_unlock();
1639
1640 return r;
1641 }
1642
1643 /*
1644 * Does PV use VG somewhere in its construction?
1645 * Returns 1 on failure.
1646 */
1647 int pv_uses_vg(struct physical_volume *pv,
1648 struct volume_group *vg)
1649 {
1650 if (!activation())
1651 return 0;
1652
1653 if (!dm_is_dm_major(MAJOR(pv->dev->dev)))
1654 return 0;
1655
1656 return dev_manager_device_uses_vg(pv->dev, vg);
1657 }
1658
1659 void activation_release(void)
1660 {
1661 dev_manager_release();
1662 }
1663
1664 void activation_exit(void)
1665 {
1666 dev_manager_exit();
1667 }
1668 #endif
This page took 0.111911 seconds and 6 git commands to generate.