]> sourceware.org Git - lvm2.git/blob - lib/activate/activate.c
Thin add dmeventd support
[lvm2.git] / lib / activate / activate.c
1 /*
2 * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
3 * Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved.
4 *
5 * This file is part of LVM2.
6 *
7 * This copyrighted material is made available to anyone wishing to use,
8 * modify, copy, or redistribute it subject to the terms and conditions
9 * of the GNU Lesser General Public License v.2.1.
10 *
11 * You should have received a copy of the GNU Lesser General Public License
12 * along with this program; if not, write to the Free Software Foundation,
13 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 */
15
16 #include "lib.h"
17 #include "metadata.h"
18 #include "activate.h"
19 #include "memlock.h"
20 #include "display.h"
21 #include "fs.h"
22 #include "lvm-exec.h"
23 #include "lvm-file.h"
24 #include "lvm-string.h"
25 #include "toolcontext.h"
26 #include "dev_manager.h"
27 #include "str_list.h"
28 #include "config.h"
29 #include "filter.h"
30 #include "segtype.h"
31 #include "sharedlib.h"
32
33 #include <limits.h>
34 #include <fcntl.h>
35 #include <unistd.h>
36
37 #define _skip(fmt, args...) log_very_verbose("Skipping: " fmt , ## args)
38
39 int lvm1_present(struct cmd_context *cmd)
40 {
41 static char path[PATH_MAX];
42
43 if (dm_snprintf(path, sizeof(path), "%s/lvm/global", cmd->proc_dir)
44 < 0) {
45 log_error("LVM1 proc global snprintf failed");
46 return 0;
47 }
48
49 if (path_exists(path))
50 return 1;
51 else
52 return 0;
53 }
54
55 int list_segment_modules(struct dm_pool *mem, const struct lv_segment *seg,
56 struct dm_list *modules)
57 {
58 unsigned int s;
59 struct lv_segment *seg2, *snap_seg;
60 struct dm_list *snh;
61
62 if (seg->segtype->ops->modules_needed &&
63 !seg->segtype->ops->modules_needed(mem, seg, modules)) {
64 log_error("module string allocation failed");
65 return 0;
66 }
67
68 if (lv_is_origin(seg->lv))
69 dm_list_iterate(snh, &seg->lv->snapshot_segs)
70 if (!list_lv_modules(mem,
71 dm_list_struct_base(snh,
72 struct lv_segment,
73 origin_list)->cow,
74 modules))
75 return_0;
76
77 if (lv_is_cow(seg->lv)) {
78 snap_seg = find_cow(seg->lv);
79 if (snap_seg->segtype->ops->modules_needed &&
80 !snap_seg->segtype->ops->modules_needed(mem, snap_seg,
81 modules)) {
82 log_error("snap_seg module string allocation failed");
83 return 0;
84 }
85 }
86
87 for (s = 0; s < seg->area_count; s++) {
88 switch (seg_type(seg, s)) {
89 case AREA_LV:
90 seg2 = find_seg_by_le(seg_lv(seg, s), seg_le(seg, s));
91 if (seg2 && !list_segment_modules(mem, seg2, modules))
92 return_0;
93 break;
94 case AREA_PV:
95 case AREA_UNASSIGNED:
96 ;
97 }
98 }
99
100 return 1;
101 }
102
103 int list_lv_modules(struct dm_pool *mem, const struct logical_volume *lv,
104 struct dm_list *modules)
105 {
106 struct lv_segment *seg;
107
108 dm_list_iterate_items(seg, &lv->segments)
109 if (!list_segment_modules(mem, seg, modules))
110 return_0;
111
112 return 1;
113 }
114
115 #ifndef DEVMAPPER_SUPPORT
116 void set_activation(int act)
117 {
118 static int warned = 0;
119
120 if (warned || !act)
121 return;
122
123 log_error("Compiled without libdevmapper support. "
124 "Can't enable activation.");
125
126 warned = 1;
127 }
128 int activation(void)
129 {
130 return 0;
131 }
132 int library_version(char *version, size_t size)
133 {
134 return 0;
135 }
136 int driver_version(char *version, size_t size)
137 {
138 return 0;
139 }
140 int target_version(const char *target_name, uint32_t *maj,
141 uint32_t *min, uint32_t *patchlevel)
142 {
143 return 0;
144 }
145 int target_present(struct cmd_context *cmd, const char *target_name,
146 int use_modprobe)
147 {
148 return 0;
149 }
150 int lvm_dm_prefix_check(const char *sysfs_dir, int major, int minor, const char *prefix)
151 {
152 return 0;
153 }
154 int lv_info(struct cmd_context *cmd, const struct logical_volume *lv, unsigned origin_only,
155 struct lvinfo *info, int with_open_count, int with_read_ahead)
156 {
157 return 0;
158 }
159 int lv_info_by_lvid(struct cmd_context *cmd, const char *lvid_s,
160 unsigned origin_only,
161 struct lvinfo *info, int with_open_count, int with_read_ahead)
162 {
163 return 0;
164 }
165 int lv_snapshot_percent(const struct logical_volume *lv, percent_t *percent)
166 {
167 return 0;
168 }
169 int lv_mirror_percent(struct cmd_context *cmd, const struct logical_volume *lv,
170 int wait, percent_t *percent, uint32_t *event_nr)
171 {
172 return 0;
173 }
174 int lvs_in_vg_activated(struct volume_group *vg)
175 {
176 return 0;
177 }
178 int lvs_in_vg_opened(const struct volume_group *vg)
179 {
180 return 0;
181 }
182 /******
183 int lv_suspend(struct cmd_context *cmd, const char *lvid_s)
184 {
185 return 1;
186 }
187 *******/
188 int lv_suspend_if_active(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only)
189 {
190 return 1;
191 }
192 int lv_resume(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only)
193 {
194 return 1;
195 }
196 int lv_resume_if_active(struct cmd_context *cmd, const char *lvid_s,
197 unsigned origin_only, unsigned exclusive, unsigned revert)
198 {
199 return 1;
200 }
201 int lv_deactivate(struct cmd_context *cmd, const char *lvid_s)
202 {
203 return 1;
204 }
205 int lv_activation_filter(struct cmd_context *cmd, const char *lvid_s,
206 int *activate_lv)
207 {
208 return 1;
209 }
210 int lv_activate(struct cmd_context *cmd, const char *lvid_s, int exclusive)
211 {
212 return 1;
213 }
214 int lv_activate_with_filter(struct cmd_context *cmd, const char *lvid_s, int exclusive)
215 {
216 return 1;
217 }
218 int lv_mknodes(struct cmd_context *cmd, const struct logical_volume *lv)
219 {
220 return 1;
221 }
222 int lv_send_message(const struct logical_volume *lv, const char *message)
223 {
224 return 0;
225 }
226 int pv_uses_vg(struct physical_volume *pv,
227 struct volume_group *vg)
228 {
229 return 0;
230 }
231 void activation_release(void)
232 {
233 }
234 void activation_exit(void)
235 {
236 }
237
238 int lv_is_active(struct logical_volume *lv)
239 {
240 return 0;
241 }
242 int lv_is_active_but_not_locally(struct logical_volume *lv)
243 {
244 return 0;
245 }
246 int lv_is_active_exclusive(struct logical_volume *lv)
247 {
248 return 0;
249 }
250 int lv_is_active_exclusive_locally(struct logical_volume *lv)
251 {
252 return 0;
253 }
254 int lv_is_active_exclusive_remotely(struct logical_volume *lv)
255 {
256 return 0;
257 }
258
259 int lv_check_transient(struct logical_volume *lv)
260 {
261 return 1;
262 }
263 int monitor_dev_for_events(struct cmd_context *cmd, struct logical_volume *lv,
264 struct lv_activate_opts *laopts, int monitor)
265 {
266 return 1;
267 }
268 #else /* DEVMAPPER_SUPPORT */
269
270 static int _activation = 1;
271
272 void set_activation(int act)
273 {
274 if (act == _activation)
275 return;
276
277 _activation = act;
278 if (_activation)
279 log_verbose("Activation enabled. Device-mapper kernel "
280 "driver will be used.");
281 else
282 log_warn("WARNING: Activation disabled. No device-mapper "
283 "interaction will be attempted.");
284 }
285
286 int activation(void)
287 {
288 return _activation;
289 }
290
291 static int _passes_activation_filter(struct cmd_context *cmd,
292 struct logical_volume *lv)
293 {
294 const struct dm_config_node *cn;
295 const struct dm_config_value *cv;
296 const char *str;
297 static char path[PATH_MAX];
298
299 if (!(cn = find_config_tree_node(cmd, "activation/volume_list"))) {
300 log_verbose("activation/volume_list configuration setting "
301 "not defined, checking only host tags for %s/%s",
302 lv->vg->name, lv->name);
303
304 /* If no host tags defined, activate */
305 if (dm_list_empty(&cmd->tags))
306 return 1;
307
308 /* If any host tag matches any LV or VG tag, activate */
309 if (str_list_match_list(&cmd->tags, &lv->tags, NULL) ||
310 str_list_match_list(&cmd->tags, &lv->vg->tags, NULL))
311 return 1;
312
313 log_verbose("No host tag matches %s/%s",
314 lv->vg->name, lv->name);
315
316 /* Don't activate */
317 return 0;
318 }
319 else
320 log_verbose("activation/volume_list configuration setting "
321 "defined, checking the list to match %s/%s",
322 lv->vg->name, lv->name);
323
324 for (cv = cn->v; cv; cv = cv->next) {
325 if (cv->type != DM_CFG_STRING) {
326 log_error("Ignoring invalid string in config file "
327 "activation/volume_list");
328 continue;
329 }
330 str = cv->v.str;
331 if (!*str) {
332 log_error("Ignoring empty string in config file "
333 "activation/volume_list");
334 continue;
335 }
336
337
338 /* Tag? */
339 if (*str == '@') {
340 str++;
341 if (!*str) {
342 log_error("Ignoring empty tag in config file "
343 "activation/volume_list");
344 continue;
345 }
346 /* If any host tag matches any LV or VG tag, activate */
347 if (!strcmp(str, "*")) {
348 if (str_list_match_list(&cmd->tags, &lv->tags, NULL)
349 || str_list_match_list(&cmd->tags,
350 &lv->vg->tags, NULL))
351 return 1;
352 else
353 continue;
354 }
355 /* If supplied tag matches LV or VG tag, activate */
356 if (str_list_match_item(&lv->tags, str) ||
357 str_list_match_item(&lv->vg->tags, str))
358 return 1;
359 else
360 continue;
361 }
362 if (!strchr(str, '/')) {
363 /* vgname supplied */
364 if (!strcmp(str, lv->vg->name))
365 return 1;
366 else
367 continue;
368 }
369 /* vgname/lvname */
370 if (dm_snprintf(path, sizeof(path), "%s/%s", lv->vg->name,
371 lv->name) < 0) {
372 log_error("dm_snprintf error from %s/%s", lv->vg->name,
373 lv->name);
374 continue;
375 }
376 if (!strcmp(path, str))
377 return 1;
378 }
379
380 log_verbose("No item supplied in activation/volume_list configuration "
381 "setting matches %s/%s", lv->vg->name, lv->name);
382
383 return 0;
384 }
385
386 int library_version(char *version, size_t size)
387 {
388 if (!activation())
389 return 0;
390
391 return dm_get_library_version(version, size);
392 }
393
394 int driver_version(char *version, size_t size)
395 {
396 if (!activation())
397 return 0;
398
399 log_very_verbose("Getting driver version");
400
401 return dm_driver_version(version, size);
402 }
403
404 int target_version(const char *target_name, uint32_t *maj,
405 uint32_t *min, uint32_t *patchlevel)
406 {
407 int r = 0;
408 struct dm_task *dmt;
409 struct dm_versions *target, *last_target;
410
411 log_very_verbose("Getting target version for %s", target_name);
412 if (!(dmt = dm_task_create(DM_DEVICE_LIST_VERSIONS)))
413 return_0;
414
415 if (activation_checks() && !dm_task_enable_checks(dmt))
416 goto_out;
417
418 if (!dm_task_run(dmt)) {
419 log_debug("Failed to get %s target version", target_name);
420 /* Assume this was because LIST_VERSIONS isn't supported */
421 return 1;
422 }
423
424 target = dm_task_get_versions(dmt);
425
426 do {
427 last_target = target;
428
429 if (!strcmp(target_name, target->name)) {
430 r = 1;
431 *maj = target->version[0];
432 *min = target->version[1];
433 *patchlevel = target->version[2];
434 goto out;
435 }
436
437 target = (struct dm_versions *)((char *) target + target->next);
438 } while (last_target != target);
439
440 out:
441 dm_task_destroy(dmt);
442
443 return r;
444 }
445
446 int lvm_dm_prefix_check(int major, int minor, const char *prefix)
447 {
448 struct dm_task *dmt;
449 const char *uuid;
450 int r;
451
452 if (!(dmt = dm_task_create(DM_DEVICE_STATUS)))
453 return_0;
454
455 if (!dm_task_set_minor(dmt, minor) ||
456 !dm_task_set_major(dmt, major) ||
457 !dm_task_run(dmt) ||
458 !(uuid = dm_task_get_uuid(dmt))) {
459 dm_task_destroy(dmt);
460 return 0;
461 }
462
463 r = strncasecmp(uuid, prefix, strlen(prefix));
464 dm_task_destroy(dmt);
465
466 return r ? 0 : 1;
467 }
468
469 int module_present(struct cmd_context *cmd, const char *target_name)
470 {
471 int ret = 0;
472 #ifdef MODPROBE_CMD
473 char module[128];
474 const char *argv[3];
475
476 if (dm_snprintf(module, sizeof(module), "dm-%s", target_name) < 0) {
477 log_error("module_present module name too long: %s",
478 target_name);
479 return 0;
480 }
481
482 argv[0] = MODPROBE_CMD;
483 argv[1] = module;
484 argv[2] = NULL;
485
486 ret = exec_cmd(cmd, argv, NULL, 0);
487 #endif
488 return ret;
489 }
490
491 int target_present(struct cmd_context *cmd, const char *target_name,
492 int use_modprobe)
493 {
494 uint32_t maj, min, patchlevel;
495
496 if (!activation())
497 return 0;
498
499 #ifdef MODPROBE_CMD
500 if (use_modprobe) {
501 if (target_version(target_name, &maj, &min, &patchlevel))
502 return 1;
503
504 if (!module_present(cmd, target_name))
505 return_0;
506 }
507 #endif
508
509 return target_version(target_name, &maj, &min, &patchlevel);
510 }
511
512 /*
513 * Returns 1 if info structure populated, else 0 on failure.
514 */
515 int lv_info(struct cmd_context *cmd, const struct logical_volume *lv, unsigned origin_only,
516 struct lvinfo *info, int with_open_count, int with_read_ahead)
517 {
518 struct dm_info dminfo;
519
520 if (!activation())
521 return 0;
522 /*
523 * If open_count info is requested and we have to be sure our own udev
524 * transactions are finished
525 * For non-clustered locking type we are only interested for non-delete operation
526 * in progress - as only those could lead to opened files
527 */
528 if (with_open_count) {
529 if (locking_is_clustered())
530 sync_local_dev_names(cmd); /* Wait to have udev in sync */
531 else if (fs_has_non_delete_ops())
532 fs_unlock(); /* For non clustered - wait if there are non-delete ops */
533 }
534
535 if (!dev_manager_info(lv->vg->cmd->mem, lv, origin_only ? "real" : NULL, with_open_count,
536 with_read_ahead, &dminfo, &info->read_ahead))
537 return_0;
538
539 info->exists = dminfo.exists;
540 info->suspended = dminfo.suspended;
541 info->open_count = dminfo.open_count;
542 info->major = dminfo.major;
543 info->minor = dminfo.minor;
544 info->read_only = dminfo.read_only;
545 info->live_table = dminfo.live_table;
546 info->inactive_table = dminfo.inactive_table;
547
548 return 1;
549 }
550
551 int lv_info_by_lvid(struct cmd_context *cmd, const char *lvid_s,
552 unsigned origin_only,
553 struct lvinfo *info, int with_open_count, int with_read_ahead)
554 {
555 int r;
556 struct logical_volume *lv;
557
558 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
559 return 0;
560
561 if (!lv_is_origin(lv))
562 origin_only = 0;
563
564 r = lv_info(cmd, lv, origin_only, info, with_open_count, with_read_ahead);
565 release_vg(lv->vg);
566
567 return r;
568 }
569
570 int lv_check_not_in_use(struct cmd_context *cmd __attribute__((unused)),
571 struct logical_volume *lv, struct lvinfo *info)
572 {
573 if (!info->exists)
574 return 1;
575
576 /* If sysfs is not used, use open_count information only. */
577 if (!*dm_sysfs_dir()) {
578 if (info->open_count) {
579 log_error("Logical volume %s/%s in use.",
580 lv->vg->name, lv->name);
581 return 0;
582 }
583
584 return 1;
585 }
586
587 if (dm_device_has_holders(info->major, info->minor)) {
588 log_error("Logical volume %s/%s is used by another device.",
589 lv->vg->name, lv->name);
590 return 0;
591 }
592
593 if (dm_device_has_mounted_fs(info->major, info->minor)) {
594 log_error("Logical volume %s/%s contains a filesystem in use.",
595 lv->vg->name, lv->name);
596 return 0;
597 }
598
599 return 1;
600 }
601
602 /*
603 * Returns 1 if percent set, else 0 on failure.
604 */
605 int lv_check_transient(struct logical_volume *lv)
606 {
607 int r;
608 struct dev_manager *dm;
609
610 if (!activation())
611 return 0;
612
613 log_debug("Checking transient status for LV %s/%s", lv->vg->name, lv->name);
614
615 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
616 return_0;
617
618 if (!(r = dev_manager_transient(dm, lv)))
619 stack;
620
621 dev_manager_destroy(dm);
622
623 return r;
624 }
625
626 /*
627 * Returns 1 if percent set, else 0 on failure.
628 */
629 int lv_snapshot_percent(const struct logical_volume *lv, percent_t *percent)
630 {
631 int r;
632 struct dev_manager *dm;
633
634 if (!activation())
635 return 0;
636
637 log_debug("Checking snapshot percent for LV %s/%s", lv->vg->name, lv->name);
638
639 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
640 return_0;
641
642 if (!(r = dev_manager_snapshot_percent(dm, lv, percent)))
643 stack;
644
645 dev_manager_destroy(dm);
646
647 return r;
648 }
649
650 /* FIXME Merge with snapshot_percent */
651 int lv_mirror_percent(struct cmd_context *cmd, const struct logical_volume *lv,
652 int wait, percent_t *percent, uint32_t *event_nr)
653 {
654 int r;
655 struct dev_manager *dm;
656 struct lvinfo info;
657
658 /* If mirrored LV is temporarily shrinked to 1 area (= linear),
659 * it should be considered in-sync. */
660 if (dm_list_size(&lv->segments) == 1 && first_seg(lv)->area_count == 1) {
661 *percent = PERCENT_100;
662 return 1;
663 }
664
665 if (!activation())
666 return 0;
667
668 log_debug("Checking mirror percent for LV %s/%s", lv->vg->name, lv->name);
669
670 if (!lv_info(cmd, lv, 0, &info, 0, 0))
671 return_0;
672
673 if (!info.exists)
674 return 0;
675
676 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
677 return_0;
678
679 if (!(r = dev_manager_mirror_percent(dm, lv, wait, percent, event_nr)))
680 stack;
681
682 dev_manager_destroy(dm);
683
684 return r;
685 }
686
687 int lv_raid_percent(const struct logical_volume *lv, percent_t *percent)
688 {
689 return lv_mirror_percent(lv->vg->cmd, lv, 0, percent, NULL);
690 }
691
692 static int _lv_active(struct cmd_context *cmd, struct logical_volume *lv)
693 {
694 struct lvinfo info;
695
696 if (!lv_info(cmd, lv, 0, &info, 0, 0)) {
697 stack;
698 return -1;
699 }
700
701 return info.exists;
702 }
703
704 static int _lv_open_count(struct cmd_context *cmd, struct logical_volume *lv)
705 {
706 struct lvinfo info;
707
708 if (!lv_info(cmd, lv, 0, &info, 1, 0)) {
709 stack;
710 return -1;
711 }
712
713 return info.open_count;
714 }
715
716 static int _lv_activate_lv(struct logical_volume *lv, struct lv_activate_opts *laopts)
717 {
718 int r;
719 struct dev_manager *dm;
720
721 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, (lv->status & PVMOVE) ? 0 : 1)))
722 return_0;
723
724 if (!(r = dev_manager_activate(dm, lv, laopts)))
725 stack;
726
727 dev_manager_destroy(dm);
728 return r;
729 }
730
731 static int _lv_preload(struct logical_volume *lv, struct lv_activate_opts *laopts,
732 int *flush_required)
733 {
734 int r;
735 struct dev_manager *dm;
736
737 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, (lv->status & PVMOVE) ? 0 : 1)))
738 return_0;
739
740 if (!(r = dev_manager_preload(dm, lv, laopts, flush_required)))
741 stack;
742
743 dev_manager_destroy(dm);
744 return r;
745 }
746
747 static int _lv_deactivate(struct logical_volume *lv)
748 {
749 int r;
750 struct dev_manager *dm;
751
752 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
753 return_0;
754
755 if (!(r = dev_manager_deactivate(dm, lv)))
756 stack;
757
758 dev_manager_destroy(dm);
759 return r;
760 }
761
762 static int _lv_suspend_lv(struct logical_volume *lv, struct lv_activate_opts *laopts,
763 int lockfs, int flush_required)
764 {
765 int r;
766 struct dev_manager *dm;
767
768 /*
769 * When we are asked to manipulate (normally suspend/resume) the PVMOVE
770 * device directly, we don't want to touch the devices that use it.
771 */
772 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, (lv->status & PVMOVE) ? 0 : 1)))
773 return_0;
774
775 if (!(r = dev_manager_suspend(dm, lv, laopts, lockfs, flush_required)))
776 stack;
777
778 dev_manager_destroy(dm);
779 return r;
780 }
781
782 /*
783 * These two functions return the number of visible LVs in the state,
784 * or -1 on error. FIXME Check this.
785 */
786 int lvs_in_vg_activated(struct volume_group *vg)
787 {
788 struct lv_list *lvl;
789 int count = 0;
790
791 if (!activation())
792 return 0;
793
794 dm_list_iterate_items(lvl, &vg->lvs)
795 if (lv_is_visible(lvl->lv))
796 count += (_lv_active(vg->cmd, lvl->lv) == 1);
797
798 log_debug("Counted %d active LVs in VG %s", count, vg->name);
799
800 return count;
801 }
802
803 int lvs_in_vg_opened(const struct volume_group *vg)
804 {
805 const struct lv_list *lvl;
806 int count = 0;
807
808 if (!activation())
809 return 0;
810
811 dm_list_iterate_items(lvl, &vg->lvs)
812 if (lv_is_visible(lvl->lv))
813 count += (_lv_open_count(vg->cmd, lvl->lv) > 0);
814
815 log_debug("Counted %d open LVs in VG %s", count, vg->name);
816
817 return count;
818 }
819
820 /*
821 * _lv_is_active
822 * @lv: logical volume being queried
823 * @locally: set if active locally (when provided)
824 * @exclusive: set if active exclusively (when provided)
825 *
826 * Determine whether an LV is active locally or in a cluster.
827 * In addition to the return code which indicates whether or
828 * not the LV is active somewhere, two other values are set
829 * to yield more information about the status of the activation:
830 * return locally exclusively status
831 * ====== ======= =========== ======
832 * 0 0 0 not active
833 * 1 0 0 active remotely
834 * 1 0 1 exclusive remotely
835 * 1 1 0 active locally and possibly remotely
836 * 1 1 1 exclusive locally (or local && !cluster)
837 * The VG lock must be held to call this function.
838 *
839 * Returns: 0 or 1
840 */
841 static int _lv_is_active(struct logical_volume *lv,
842 int *locally, int *exclusive)
843 {
844 int r, l, e; /* remote, local, and exclusive */
845
846 r = l = e = 0;
847
848 if (_lv_active(lv->vg->cmd, lv))
849 l = 1;
850
851 if (!vg_is_clustered(lv->vg)) {
852 e = 1; /* exclusive by definition */
853 goto out;
854 }
855
856 /* Active locally, and the caller doesn't care about exclusive */
857 if (l && !exclusive)
858 goto out;
859
860 if ((r = remote_lock_held(lv->lvid.s, &e)) >= 0)
861 goto out;
862
863 /*
864 * If lock query is not supported (due to interfacing with old
865 * code), then we cannot evaluate exclusivity properly.
866 *
867 * Old users of this function will never be affected by this,
868 * since they are only concerned about active vs. not active.
869 * New users of this function who specifically ask for 'exclusive'
870 * will be given an error message.
871 */
872 if (l) {
873 if (exclusive)
874 log_error("Unable to determine exclusivity of %s",
875 lv->name);
876 goto out;
877 }
878
879 /* FIXME: Is this fallback alright? */
880 if (activate_lv_excl(lv->vg->cmd, lv)) {
881 if (!deactivate_lv(lv->vg->cmd, lv))
882 stack;
883 /* FIXME: locally & exclusive are undefined. */
884 return 0;
885 }
886 /* FIXME: Check exclusive value here. */
887 out:
888 if (locally)
889 *locally = l;
890 if (exclusive)
891 *exclusive = e;
892
893 log_very_verbose("%s/%s is %sactive%s%s",
894 lv->vg->name, lv->name,
895 (r || l) ? "" : "not ",
896 (exclusive && e) ? " exclusive" : "",
897 e ? (l ? " locally" : " remotely") : "");
898
899 return r || l;
900 }
901
902 int lv_is_active(struct logical_volume *lv)
903 {
904 return _lv_is_active(lv, NULL, NULL);
905 }
906
907 int lv_is_active_but_not_locally(struct logical_volume *lv)
908 {
909 int l;
910 return _lv_is_active(lv, &l, NULL) && !l;
911 }
912
913 int lv_is_active_exclusive(struct logical_volume *lv)
914 {
915 int e;
916
917 return _lv_is_active(lv, NULL, &e) && e;
918 }
919
920 int lv_is_active_exclusive_locally(struct logical_volume *lv)
921 {
922 int l, e;
923
924 return _lv_is_active(lv, &l, &e) && l && e;
925 }
926
927 int lv_is_active_exclusive_remotely(struct logical_volume *lv)
928 {
929 int l, e;
930
931 return _lv_is_active(lv, &l, &e) && !l && e;
932 }
933
934 #ifdef DMEVENTD
935 static struct dm_event_handler *_create_dm_event_handler(struct cmd_context *cmd, const char *dmuuid, const char *dso,
936 const int timeout, enum dm_event_mask mask)
937 {
938 struct dm_event_handler *dmevh;
939
940 if (!(dmevh = dm_event_handler_create()))
941 return_NULL;
942
943 if (dm_event_handler_set_dmeventd_path(dmevh, find_config_tree_str(cmd, "dmeventd/executable", NULL)))
944 goto_bad;
945
946 if (dm_event_handler_set_dso(dmevh, dso))
947 goto_bad;
948
949 if (dm_event_handler_set_uuid(dmevh, dmuuid))
950 goto_bad;
951
952 dm_event_handler_set_timeout(dmevh, timeout);
953 dm_event_handler_set_event_mask(dmevh, mask);
954
955 return dmevh;
956
957 bad:
958 dm_event_handler_destroy(dmevh);
959 return NULL;
960 }
961
962 char *get_monitor_dso_path(struct cmd_context *cmd, const char *libpath)
963 {
964 char *path;
965
966 if (!(path = dm_pool_alloc(cmd->mem, PATH_MAX))) {
967 log_error("Failed to allocate dmeventd library path.");
968 return NULL;
969 }
970
971 get_shared_library_path(cmd, libpath, path, PATH_MAX);
972
973 return path;
974 }
975
976 static char *_build_target_uuid(struct cmd_context *cmd, struct logical_volume *lv)
977 {
978 const char *layer;
979
980 if (lv_is_thin_pool(lv))
981 layer = "tpool"; /* Monitor "tpool" for the "thin pool". */
982 else if (lv_is_origin(lv))
983 layer = "real"; /* Monitor "real" for "snapshot-origin". */
984 else
985 layer = NULL;
986
987 return build_dm_uuid(cmd->mem, lv->lvid.s, layer);
988 }
989
990 int target_registered_with_dmeventd(struct cmd_context *cmd, const char *dso,
991 struct logical_volume *lv, int *pending)
992 {
993 char *uuid;
994 enum dm_event_mask evmask = 0;
995 struct dm_event_handler *dmevh;
996 *pending = 0;
997
998 if (!dso)
999 return_0;
1000
1001 if (!(uuid = _build_target_uuid(cmd, lv)))
1002 return_0;
1003
1004 if (!(dmevh = _create_dm_event_handler(cmd, uuid, dso, 0, DM_EVENT_ALL_ERRORS)))
1005 return_0;
1006
1007 if (dm_event_get_registered_device(dmevh, 0)) {
1008 dm_event_handler_destroy(dmevh);
1009 return 0;
1010 }
1011
1012 evmask = dm_event_handler_get_event_mask(dmevh);
1013 if (evmask & DM_EVENT_REGISTRATION_PENDING) {
1014 *pending = 1;
1015 evmask &= ~DM_EVENT_REGISTRATION_PENDING;
1016 }
1017
1018 dm_event_handler_destroy(dmevh);
1019
1020 return evmask;
1021 }
1022
1023 int target_register_events(struct cmd_context *cmd, const char *dso, struct logical_volume *lv,
1024 int evmask __attribute__((unused)), int set, int timeout)
1025 {
1026 char *uuid;
1027 struct dm_event_handler *dmevh;
1028 int r;
1029
1030 if (!dso)
1031 return_0;
1032
1033 /* We always monitor the "real" device, never the "snapshot-origin" itself. */
1034 if (!(uuid = _build_target_uuid(cmd, lv)))
1035 return_0;
1036
1037 if (!(dmevh = _create_dm_event_handler(cmd, uuid, dso, timeout,
1038 DM_EVENT_ALL_ERRORS | (timeout ? DM_EVENT_TIMEOUT : 0))))
1039 return_0;
1040
1041 r = set ? dm_event_register_handler(dmevh) : dm_event_unregister_handler(dmevh);
1042
1043 dm_event_handler_destroy(dmevh);
1044
1045 if (!r)
1046 return_0;
1047
1048 log_info("%s %s for events", set ? "Monitored" : "Unmonitored", uuid);
1049
1050 return 1;
1051 }
1052
1053 #endif
1054
1055 /*
1056 * Returns 0 if an attempt to (un)monitor the device failed.
1057 * Returns 1 otherwise.
1058 */
1059 int monitor_dev_for_events(struct cmd_context *cmd, struct logical_volume *lv,
1060 const struct lv_activate_opts *laopts, int monitor)
1061 {
1062 #ifdef DMEVENTD
1063 int i, pending = 0, monitored;
1064 int r = 1;
1065 struct dm_list *tmp, *snh, *snht;
1066 struct lv_segment *seg;
1067 struct lv_segment *log_seg;
1068 int (*monitor_fn) (struct lv_segment *s, int e);
1069 uint32_t s;
1070 static const struct lv_activate_opts zlaopts = { 0 };
1071
1072 if (!laopts)
1073 laopts = &zlaopts;
1074
1075 /* skip dmeventd code altogether */
1076 if (dmeventd_monitor_mode() == DMEVENTD_MONITOR_IGNORE)
1077 return 1;
1078
1079 /*
1080 * Nothing to do if dmeventd configured not to be used.
1081 */
1082 if (monitor && !dmeventd_monitor_mode())
1083 return 1;
1084
1085 /*
1086 * In case of a snapshot device, we monitor lv->snapshot->lv,
1087 * not the actual LV itself.
1088 */
1089 if (lv_is_cow(lv) && (laopts->no_merging || !lv_is_merging_cow(lv)))
1090 return monitor_dev_for_events(cmd, lv->snapshot->lv, NULL, monitor);
1091
1092 /*
1093 * In case this LV is a snapshot origin, we instead monitor
1094 * each of its respective snapshots. The origin itself may
1095 * also need to be monitored if it is a mirror, for example.
1096 */
1097 if (!laopts->origin_only && lv_is_origin(lv))
1098 dm_list_iterate_safe(snh, snht, &lv->snapshot_segs)
1099 if (!monitor_dev_for_events(cmd, dm_list_struct_base(snh,
1100 struct lv_segment, origin_list)->cow, NULL, monitor))
1101 r = 0;
1102
1103 /*
1104 * If the volume is mirrored and its log is also mirrored, monitor
1105 * the log volume as well.
1106 */
1107 if ((seg = first_seg(lv)) != NULL && seg->log_lv != NULL &&
1108 (log_seg = first_seg(seg->log_lv)) != NULL &&
1109 seg_is_mirrored(log_seg))
1110 if (!monitor_dev_for_events(cmd, seg->log_lv, NULL, monitor))
1111 r = 0;
1112
1113 dm_list_iterate(tmp, &lv->segments) {
1114 seg = dm_list_item(tmp, struct lv_segment);
1115
1116 /* Recurse for AREA_LV */
1117 for (s = 0; s < seg->area_count; s++) {
1118 if (seg_type(seg, s) != AREA_LV)
1119 continue;
1120 if (!monitor_dev_for_events(cmd, seg_lv(seg, s), NULL,
1121 monitor)) {
1122 log_error("Failed to %smonitor %s",
1123 monitor ? "" : "un",
1124 seg_lv(seg, s)->name);
1125 r = 0;
1126 }
1127 }
1128
1129 if (!seg_monitored(seg) || (seg->status & PVMOVE))
1130 continue;
1131
1132 monitor_fn = NULL;
1133
1134 /* Check monitoring status */
1135 if (seg->segtype->ops->target_monitored)
1136 monitored = seg->segtype->ops->target_monitored(seg, &pending);
1137 else
1138 continue; /* segtype doesn't support registration */
1139
1140 /*
1141 * FIXME: We should really try again if pending
1142 */
1143 monitored = (pending) ? 0 : monitored;
1144
1145 if (monitor) {
1146 if (monitored)
1147 log_verbose("%s/%s already monitored.", lv->vg->name, lv->name);
1148 else if (seg->segtype->ops->target_monitor_events)
1149 monitor_fn = seg->segtype->ops->target_monitor_events;
1150 } else {
1151 if (!monitored)
1152 log_verbose("%s/%s already not monitored.", lv->vg->name, lv->name);
1153 else if (seg->segtype->ops->target_unmonitor_events)
1154 monitor_fn = seg->segtype->ops->target_unmonitor_events;
1155 }
1156
1157 /* Do [un]monitor */
1158 if (!monitor_fn)
1159 continue;
1160
1161 log_verbose("%sonitoring %s/%s%s", monitor ? "M" : "Not m", lv->vg->name, lv->name,
1162 test_mode() ? " [Test mode: skipping this]" : "");
1163
1164 /* FIXME Test mode should really continue a bit further. */
1165 if (test_mode())
1166 continue;
1167
1168 /* FIXME specify events */
1169 if (!monitor_fn(seg, 0)) {
1170 log_error("%s/%s: %s segment monitoring function failed.",
1171 lv->vg->name, lv->name, seg->segtype->name);
1172 return 0;
1173 }
1174
1175 /* Check [un]monitor results */
1176 /* Try a couple times if pending, but not forever... */
1177 for (i = 0; i < 10; i++) {
1178 pending = 0;
1179 monitored = seg->segtype->ops->target_monitored(seg, &pending);
1180 if (pending ||
1181 (!monitored && monitor) ||
1182 (monitored && !monitor))
1183 log_very_verbose("%s/%s %smonitoring still pending: waiting...",
1184 lv->vg->name, lv->name, monitor ? "" : "un");
1185 else
1186 break;
1187 sleep(1);
1188 }
1189
1190 if (r)
1191 r = (monitored && monitor) || (!monitored && !monitor);
1192 }
1193
1194 return r;
1195 #else
1196 return 1;
1197 #endif
1198 }
1199
1200 struct detached_lv_data {
1201 struct logical_volume *lv_pre;
1202 struct lv_activate_opts *laopts;
1203 int *flush_required;
1204 };
1205
1206 static int _preload_detached_lv(struct cmd_context *cmd, struct logical_volume *lv, void *data)
1207 {
1208 struct detached_lv_data *detached = data;
1209 struct lv_list *lvl_pre;
1210
1211 if ((lvl_pre = find_lv_in_vg(detached->lv_pre->vg, lv->name))) {
1212 if (lv_is_visible(lvl_pre->lv) && lv_is_active(lv) && (!lv_is_cow(lv) || !lv_is_cow(lvl_pre->lv)) &&
1213 !_lv_preload(lvl_pre->lv, detached->laopts, detached->flush_required))
1214 return_0;
1215 }
1216
1217 return 1;
1218 }
1219
1220 static int _lv_suspend(struct cmd_context *cmd, const char *lvid_s,
1221 struct lv_activate_opts *laopts, int error_if_not_suspended)
1222 {
1223 struct logical_volume *lv = NULL, *lv_pre = NULL, *pvmove_lv = NULL;
1224 struct lv_list *lvl_pre;
1225 struct seg_list *sl;
1226 struct lv_segment *snap_seg;
1227 struct lvinfo info;
1228 int r = 0, lockfs = 0, flush_required = 0;
1229 struct detached_lv_data detached;
1230
1231 if (!activation())
1232 return 1;
1233
1234 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1235 goto_out;
1236
1237 /* Use precommitted metadata if present */
1238 if (!(lv_pre = lv_from_lvid(cmd, lvid_s, 1)))
1239 goto_out;
1240
1241 /* Ignore origin_only unless LV is origin in both old and new metadata */
1242 if (!lv_is_origin(lv) || !lv_is_origin(lv_pre))
1243 laopts->origin_only = 0;
1244
1245 if (test_mode()) {
1246 _skip("Suspending %s%s.", lv->name, laopts->origin_only ? " origin without snapshots" : "");
1247 r = 1;
1248 goto out;
1249 }
1250
1251 if (!lv_info(cmd, lv, laopts->origin_only, &info, 0, 0))
1252 goto_out;
1253
1254 if (!info.exists || info.suspended) {
1255 if (!error_if_not_suspended) {
1256 r = 1;
1257 if (info.suspended)
1258 critical_section_inc(cmd, "already suspended");
1259 }
1260 goto out;
1261 }
1262
1263 if (!lv_read_replicator_vgs(lv))
1264 goto_out;
1265
1266 lv_calculate_readahead(lv, NULL);
1267
1268 /*
1269 * Preload devices for the LV.
1270 * If the PVMOVE LV is being removed, it's only present in the old
1271 * metadata and not the new, so we must explicitly add the new
1272 * tables for all the changed LVs here, as the relationships
1273 * are not found by walking the new metadata.
1274 */
1275 if (!(lv_pre->status & LOCKED) &&
1276 (lv->status & LOCKED) &&
1277 (pvmove_lv = find_pvmove_lv_in_lv(lv))) {
1278 /* Preload all the LVs above the PVMOVE LV */
1279 dm_list_iterate_items(sl, &pvmove_lv->segs_using_this_lv) {
1280 if (!(lvl_pre = find_lv_in_vg(lv_pre->vg, sl->seg->lv->name))) {
1281 log_error(INTERNAL_ERROR "LV %s missing from preload metadata", sl->seg->lv->name);
1282 goto out;
1283 }
1284 if (!_lv_preload(lvl_pre->lv, laopts, &flush_required))
1285 goto_out;
1286 }
1287 /* Now preload the PVMOVE LV itself */
1288 if (!(lvl_pre = find_lv_in_vg(lv_pre->vg, pvmove_lv->name))) {
1289 log_error(INTERNAL_ERROR "LV %s missing from preload metadata", pvmove_lv->name);
1290 goto out;
1291 }
1292 if (!_lv_preload(lvl_pre->lv, laopts, &flush_required))
1293 goto_out;
1294 } else {
1295 if (!_lv_preload(lv_pre, laopts, &flush_required))
1296 /* FIXME Revert preloading */
1297 goto_out;
1298
1299 /*
1300 * Search for existing LVs that have become detached and preload them.
1301 */
1302 detached.lv_pre = lv_pre;
1303 detached.laopts = laopts;
1304 detached.flush_required = &flush_required;
1305
1306 if (!for_each_sub_lv(cmd, lv, &_preload_detached_lv, &detached))
1307 goto_out;
1308
1309 /*
1310 * Preload any snapshots that are being removed.
1311 */
1312 if (!laopts->origin_only && lv_is_origin(lv)) {
1313 dm_list_iterate_items_gen(snap_seg, &lv->snapshot_segs, origin_list) {
1314 if (!(lvl_pre = find_lv_in_vg_by_lvid(lv_pre->vg, &snap_seg->cow->lvid))) {
1315 log_error(INTERNAL_ERROR "LV %s (%s) missing from preload metadata",
1316 snap_seg->cow->name, snap_seg->cow->lvid.id[1].uuid);
1317 goto out;
1318 }
1319 if (!lv_is_cow(lvl_pre->lv) &&
1320 !_lv_preload(lvl_pre->lv, laopts, &flush_required))
1321 goto_out;
1322 }
1323 }
1324 }
1325
1326 if (!monitor_dev_for_events(cmd, lv, laopts, 0))
1327 /* FIXME Consider aborting here */
1328 stack;
1329
1330 critical_section_inc(cmd, "suspending");
1331 if (pvmove_lv)
1332 critical_section_inc(cmd, "suspending pvmove LV");
1333
1334 if (!laopts->origin_only &&
1335 (lv_is_origin(lv_pre) || lv_is_cow(lv_pre)))
1336 lockfs = 1;
1337
1338 /*
1339 * Suspending an LV directly above a PVMOVE LV also
1340 * suspends other LVs using that same PVMOVE LV.
1341 * FIXME Remove this and delay the 'clear node' until
1342 * after the code knows whether there's a different
1343 * inactive table to load or not instead so lv_suspend
1344 * can be called separately for each LV safely.
1345 */
1346 if ((lv_pre->vg->status & PRECOMMITTED) &&
1347 (lv_pre->status & LOCKED) && find_pvmove_lv_in_lv(lv_pre)) {
1348 if (!_lv_suspend_lv(lv_pre, laopts, lockfs, flush_required)) {
1349 critical_section_dec(cmd, "failed precommitted suspend");
1350 if (pvmove_lv)
1351 critical_section_dec(cmd, "failed precommitted suspend (pvmove)");
1352 goto_out;
1353 }
1354 } else {
1355 /* Normal suspend */
1356 if (!_lv_suspend_lv(lv, laopts, lockfs, flush_required)) {
1357 critical_section_dec(cmd, "failed suspend");
1358 if (pvmove_lv)
1359 critical_section_dec(cmd, "failed suspend (pvmove)");
1360 goto_out;
1361 }
1362 }
1363
1364 r = 1;
1365 out:
1366 if (lv_pre)
1367 release_vg(lv_pre->vg);
1368 if (lv) {
1369 lv_release_replicator_vgs(lv);
1370 release_vg(lv->vg);
1371 }
1372
1373 return r;
1374 }
1375
1376 /* Returns success if the device is not active */
1377 int lv_suspend_if_active(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only)
1378 {
1379 struct lv_activate_opts laopts = { .origin_only = origin_only };
1380
1381 return _lv_suspend(cmd, lvid_s, &laopts, 0);
1382 }
1383
1384 /* No longer used */
1385 /***********
1386 int lv_suspend(struct cmd_context *cmd, const char *lvid_s)
1387 {
1388 return _lv_suspend(cmd, lvid_s, 1);
1389 }
1390 ***********/
1391
1392 /*
1393 * _lv_resume
1394 * @cmd
1395 * @lvid_s
1396 * @origin_only
1397 * @exclusive: This parameter only has an affect in cluster-context.
1398 * It forces local target type to be used (instead of
1399 * cluster-aware type).
1400 * @error_if_not_active
1401 */
1402 static int _lv_resume(struct cmd_context *cmd, const char *lvid_s,
1403 struct lv_activate_opts *laopts, int error_if_not_active)
1404 {
1405 struct logical_volume *lv;
1406 struct lvinfo info;
1407 int r = 0;
1408
1409 if (!activation())
1410 return 1;
1411
1412 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1413 goto_out;
1414
1415 if (!lv_is_origin(lv))
1416 laopts->origin_only = 0;
1417
1418 if (test_mode()) {
1419 _skip("Resuming %s%s%s.", lv->name, laopts->origin_only ? " without snapshots" : "",
1420 laopts->revert ? " (reverting)" : "");
1421 r = 1;
1422 goto out;
1423 }
1424
1425 log_debug("Resuming LV %s/%s%s%s%s.", lv->vg->name, lv->name,
1426 error_if_not_active ? "" : " if active",
1427 laopts->origin_only ? " without snapshots" : "",
1428 laopts->revert ? " (reverting)" : "");
1429
1430 if (!lv_info(cmd, lv, laopts->origin_only, &info, 0, 0))
1431 goto_out;
1432
1433 if (!info.exists || !info.suspended) {
1434 if (error_if_not_active)
1435 goto_out;
1436 r = 1;
1437 if (!info.suspended)
1438 critical_section_dec(cmd, "already resumed");
1439 goto out;
1440 }
1441
1442 if (!_lv_activate_lv(lv, laopts))
1443 goto_out;
1444
1445 critical_section_dec(cmd, "resumed");
1446
1447 if (!monitor_dev_for_events(cmd, lv, laopts, 1))
1448 stack;
1449
1450 r = 1;
1451 out:
1452 if (lv)
1453 release_vg(lv->vg);
1454
1455 return r;
1456 }
1457
1458 /* Returns success if the device is not active */
1459 int lv_resume_if_active(struct cmd_context *cmd, const char *lvid_s,
1460 unsigned origin_only, unsigned exclusive, unsigned revert)
1461 {
1462 struct lv_activate_opts laopts = {
1463 .origin_only = origin_only,
1464 /*
1465 * When targets are activated exclusively in a cluster, the
1466 * non-clustered target should be used. This only happens
1467 * if exclusive is set.
1468 */
1469 .exclusive = exclusive,
1470 .revert = revert
1471 };
1472
1473 return _lv_resume(cmd, lvid_s, &laopts, 0);
1474 }
1475
1476 int lv_resume(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only)
1477 {
1478 struct lv_activate_opts laopts = { .origin_only = origin_only, };
1479
1480 return _lv_resume(cmd, lvid_s, &laopts, 1);
1481 }
1482
1483 static int _lv_has_open_snapshots(struct logical_volume *lv)
1484 {
1485 struct lv_segment *snap_seg;
1486 struct lvinfo info;
1487 int r = 0;
1488
1489 dm_list_iterate_items_gen(snap_seg, &lv->snapshot_segs, origin_list) {
1490 if (!lv_info(lv->vg->cmd, snap_seg->cow, 0, &info, 1, 0)) {
1491 r = 1;
1492 continue;
1493 }
1494
1495 if (info.exists && info.open_count) {
1496 log_error("LV %s/%s has open snapshot %s: "
1497 "not deactivating", lv->vg->name, lv->name,
1498 snap_seg->cow->name);
1499 r = 1;
1500 }
1501 }
1502
1503 return r;
1504 }
1505
1506 int lv_deactivate(struct cmd_context *cmd, const char *lvid_s)
1507 {
1508 struct logical_volume *lv;
1509 struct lvinfo info;
1510 int r = 0;
1511
1512 if (!activation())
1513 return 1;
1514
1515 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1516 goto out;
1517
1518 if (test_mode()) {
1519 _skip("Deactivating '%s'.", lv->name);
1520 r = 1;
1521 goto out;
1522 }
1523
1524 log_debug("Deactivating %s/%s.", lv->vg->name, lv->name);
1525
1526 if (!lv_info(cmd, lv, 0, &info, 1, 0))
1527 goto_out;
1528
1529 if (!info.exists) {
1530 r = 1;
1531 goto out;
1532 }
1533
1534 if (lv_is_visible(lv)) {
1535 if (!lv_check_not_in_use(cmd, lv, &info))
1536 goto_out;
1537
1538 if (lv_is_origin(lv) && _lv_has_open_snapshots(lv))
1539 goto_out;
1540 }
1541
1542 if (!lv_read_replicator_vgs(lv))
1543 goto_out;
1544
1545 lv_calculate_readahead(lv, NULL);
1546
1547 if (!monitor_dev_for_events(cmd, lv, NULL, 0))
1548 stack;
1549
1550 critical_section_inc(cmd, "deactivating");
1551 r = _lv_deactivate(lv);
1552 critical_section_dec(cmd, "deactivated");
1553
1554 if (!lv_info(cmd, lv, 0, &info, 0, 0) || info.exists)
1555 r = 0;
1556 out:
1557 if (lv) {
1558 lv_release_replicator_vgs(lv);
1559 release_vg(lv->vg);
1560 }
1561
1562 return r;
1563 }
1564
1565 /* Test if LV passes filter */
1566 int lv_activation_filter(struct cmd_context *cmd, const char *lvid_s,
1567 int *activate_lv)
1568 {
1569 struct logical_volume *lv;
1570 int r = 0;
1571
1572 if (!activation()) {
1573 *activate_lv = 1;
1574 return 1;
1575 }
1576
1577 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1578 goto out;
1579
1580 if (!_passes_activation_filter(cmd, lv)) {
1581 log_verbose("Not activating %s/%s since it does not pass "
1582 "activation filter.", lv->vg->name, lv->name);
1583 *activate_lv = 0;
1584 } else
1585 *activate_lv = 1;
1586 r = 1;
1587 out:
1588 if (lv)
1589 release_vg(lv->vg);
1590
1591 return r;
1592 }
1593
1594 static int _lv_activate(struct cmd_context *cmd, const char *lvid_s,
1595 struct lv_activate_opts *laopts, int filter)
1596 {
1597 struct logical_volume *lv;
1598 struct lvinfo info;
1599 int r = 0;
1600
1601 if (!activation())
1602 return 1;
1603
1604 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1605 goto out;
1606
1607 if (filter && !_passes_activation_filter(cmd, lv)) {
1608 log_error("Not activating %s/%s since it does not pass "
1609 "activation filter.", lv->vg->name, lv->name);
1610 goto out;
1611 }
1612
1613 if ((!lv->vg->cmd->partial_activation) && (lv->status & PARTIAL_LV)) {
1614 log_error("Refusing activation of partial LV %s. Use --partial to override.",
1615 lv->name);
1616 goto_out;
1617 }
1618
1619 if (lv_has_unknown_segments(lv)) {
1620 log_error("Refusing activation of LV %s containing "
1621 "an unrecognised segment.", lv->name);
1622 goto_out;
1623 }
1624
1625 if (test_mode()) {
1626 _skip("Activating '%s'.", lv->name);
1627 r = 1;
1628 goto out;
1629 }
1630
1631 log_debug("Activating %s/%s%s.", lv->vg->name, lv->name,
1632 laopts->exclusive ? " exclusively" : "");
1633
1634 if (!lv_info(cmd, lv, 0, &info, 0, 0))
1635 goto_out;
1636
1637 if (info.exists && !info.suspended && info.live_table) {
1638 r = 1;
1639 goto out;
1640 }
1641
1642 if (!lv_read_replicator_vgs(lv))
1643 goto_out;
1644
1645 lv_calculate_readahead(lv, NULL);
1646
1647 critical_section_inc(cmd, "activating");
1648 if (!(r = _lv_activate_lv(lv, laopts)))
1649 stack;
1650 critical_section_dec(cmd, "activated");
1651
1652 if (r && !monitor_dev_for_events(cmd, lv, laopts, 1))
1653 stack;
1654
1655 out:
1656 if (lv) {
1657 lv_release_replicator_vgs(lv);
1658 release_vg(lv->vg);
1659 }
1660
1661 return r;
1662 }
1663
1664 /* Activate LV */
1665 int lv_activate(struct cmd_context *cmd, const char *lvid_s, int exclusive)
1666 {
1667 struct lv_activate_opts laopts = { .exclusive = exclusive };
1668
1669 if (!_lv_activate(cmd, lvid_s, &laopts, 0))
1670 return_0;
1671
1672 return 1;
1673 }
1674
1675 /* Activate LV only if it passes filter */
1676 int lv_activate_with_filter(struct cmd_context *cmd, const char *lvid_s, int exclusive)
1677 {
1678 struct lv_activate_opts laopts = { .exclusive = exclusive };
1679
1680 if (!_lv_activate(cmd, lvid_s, &laopts, 1))
1681 return_0;
1682
1683 return 1;
1684 }
1685
1686 int lv_mknodes(struct cmd_context *cmd, const struct logical_volume *lv)
1687 {
1688 int r = 1;
1689
1690 if (!lv) {
1691 r = dm_mknodes(NULL);
1692 fs_unlock();
1693 return r;
1694 }
1695
1696 if (!activation())
1697 return 1;
1698
1699 r = dev_manager_mknodes(lv);
1700
1701 fs_unlock();
1702
1703 return r;
1704 }
1705
1706 /*
1707 * Does PV use VG somewhere in its construction?
1708 * Returns 1 on failure.
1709 */
1710 int pv_uses_vg(struct physical_volume *pv,
1711 struct volume_group *vg)
1712 {
1713 if (!activation())
1714 return 0;
1715
1716 if (!dm_is_dm_major(MAJOR(pv->dev->dev)))
1717 return 0;
1718
1719 return dev_manager_device_uses_vg(pv->dev, vg);
1720 }
1721
1722 void activation_release(void)
1723 {
1724 dev_manager_release();
1725 }
1726
1727 void activation_exit(void)
1728 {
1729 dev_manager_exit();
1730 }
1731 #endif
This page took 0.112538 seconds and 6 git commands to generate.