]> sourceware.org Git - lvm2.git/blob - lib/activate/activate.c
Thin pool activation change
[lvm2.git] / lib / activate / activate.c
1 /*
2 * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
3 * Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved.
4 *
5 * This file is part of LVM2.
6 *
7 * This copyrighted material is made available to anyone wishing to use,
8 * modify, copy, or redistribute it subject to the terms and conditions
9 * of the GNU Lesser General Public License v.2.1.
10 *
11 * You should have received a copy of the GNU Lesser General Public License
12 * along with this program; if not, write to the Free Software Foundation,
13 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 */
15
16 #include "lib.h"
17 #include "metadata.h"
18 #include "activate.h"
19 #include "memlock.h"
20 #include "display.h"
21 #include "fs.h"
22 #include "lvm-exec.h"
23 #include "lvm-file.h"
24 #include "lvm-string.h"
25 #include "toolcontext.h"
26 #include "dev_manager.h"
27 #include "str_list.h"
28 #include "config.h"
29 #include "filter.h"
30 #include "segtype.h"
31 #include "sharedlib.h"
32
33 #include <limits.h>
34 #include <fcntl.h>
35 #include <unistd.h>
36
37 #define _skip(fmt, args...) log_very_verbose("Skipping: " fmt , ## args)
38
39 int lvm1_present(struct cmd_context *cmd)
40 {
41 char path[PATH_MAX];
42
43 if (dm_snprintf(path, sizeof(path), "%s/lvm/global", cmd->proc_dir)
44 < 0) {
45 log_error("LVM1 proc global snprintf failed");
46 return 0;
47 }
48
49 if (path_exists(path))
50 return 1;
51 else
52 return 0;
53 }
54
55 int list_segment_modules(struct dm_pool *mem, const struct lv_segment *seg,
56 struct dm_list *modules)
57 {
58 unsigned int s;
59 struct lv_segment *seg2, *snap_seg;
60 struct dm_list *snh;
61
62 if (seg->segtype->ops->modules_needed &&
63 !seg->segtype->ops->modules_needed(mem, seg, modules)) {
64 log_error("module string allocation failed");
65 return 0;
66 }
67
68 if (lv_is_origin(seg->lv))
69 dm_list_iterate(snh, &seg->lv->snapshot_segs)
70 if (!list_lv_modules(mem,
71 dm_list_struct_base(snh,
72 struct lv_segment,
73 origin_list)->cow,
74 modules))
75 return_0;
76
77 if (lv_is_cow(seg->lv)) {
78 snap_seg = find_cow(seg->lv);
79 if (snap_seg->segtype->ops->modules_needed &&
80 !snap_seg->segtype->ops->modules_needed(mem, snap_seg,
81 modules)) {
82 log_error("snap_seg module string allocation failed");
83 return 0;
84 }
85 }
86
87 for (s = 0; s < seg->area_count; s++) {
88 switch (seg_type(seg, s)) {
89 case AREA_LV:
90 seg2 = find_seg_by_le(seg_lv(seg, s), seg_le(seg, s));
91 if (seg2 && !list_segment_modules(mem, seg2, modules))
92 return_0;
93 break;
94 case AREA_PV:
95 case AREA_UNASSIGNED:
96 ;
97 }
98 }
99
100 return 1;
101 }
102
103 int list_lv_modules(struct dm_pool *mem, const struct logical_volume *lv,
104 struct dm_list *modules)
105 {
106 struct lv_segment *seg;
107
108 dm_list_iterate_items(seg, &lv->segments)
109 if (!list_segment_modules(mem, seg, modules))
110 return_0;
111
112 return 1;
113 }
114
115 #ifndef DEVMAPPER_SUPPORT
116 void set_activation(int act)
117 {
118 static int warned = 0;
119
120 if (warned || !act)
121 return;
122
123 log_error("Compiled without libdevmapper support. "
124 "Can't enable activation.");
125
126 warned = 1;
127 }
128 int activation(void)
129 {
130 return 0;
131 }
132 int library_version(char *version, size_t size)
133 {
134 return 0;
135 }
136 int driver_version(char *version, size_t size)
137 {
138 return 0;
139 }
140 int target_version(const char *target_name, uint32_t *maj,
141 uint32_t *min, uint32_t *patchlevel)
142 {
143 return 0;
144 }
145 int target_present(struct cmd_context *cmd, const char *target_name,
146 int use_modprobe)
147 {
148 return 0;
149 }
150 int lv_info(struct cmd_context *cmd, const struct logical_volume *lv, unsigned origin_only,
151 struct lvinfo *info, int with_open_count, int with_read_ahead)
152 {
153 return 0;
154 }
155 int lv_info_by_lvid(struct cmd_context *cmd, const char *lvid_s,
156 unsigned origin_only,
157 struct lvinfo *info, int with_open_count, int with_read_ahead)
158 {
159 return 0;
160 }
161 int lv_snapshot_percent(const struct logical_volume *lv, percent_t *percent)
162 {
163 return 0;
164 }
165 int lv_mirror_percent(struct cmd_context *cmd, const struct logical_volume *lv,
166 int wait, percent_t *percent, uint32_t *event_nr)
167 {
168 return 0;
169 }
170 int lvs_in_vg_activated(struct volume_group *vg)
171 {
172 return 0;
173 }
174 int lvs_in_vg_opened(const struct volume_group *vg)
175 {
176 return 0;
177 }
178 /******
179 int lv_suspend(struct cmd_context *cmd, const char *lvid_s)
180 {
181 return 1;
182 }
183 *******/
184 int lv_suspend_if_active(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only)
185 {
186 return 1;
187 }
188 int lv_resume(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only)
189 {
190 return 1;
191 }
192 int lv_resume_if_active(struct cmd_context *cmd, const char *lvid_s,
193 unsigned origin_only, unsigned exclusive, unsigned revert)
194 {
195 return 1;
196 }
197 int lv_deactivate(struct cmd_context *cmd, const char *lvid_s)
198 {
199 return 1;
200 }
201 int lv_activation_filter(struct cmd_context *cmd, const char *lvid_s,
202 int *activate_lv)
203 {
204 return 1;
205 }
206 int lv_activate(struct cmd_context *cmd, const char *lvid_s, int exclusive)
207 {
208 return 1;
209 }
210 int lv_activate_with_filter(struct cmd_context *cmd, const char *lvid_s, int exclusive)
211 {
212 return 1;
213 }
214 int lv_mknodes(struct cmd_context *cmd, const struct logical_volume *lv)
215 {
216 return 1;
217 }
218 int lv_send_message(const struct logical_volume *lv, const char *message)
219 {
220 return 0;
221 }
222 int pv_uses_vg(struct physical_volume *pv,
223 struct volume_group *vg)
224 {
225 return 0;
226 }
227 void activation_release(void)
228 {
229 }
230 void activation_exit(void)
231 {
232 }
233
234 int lv_is_active(struct logical_volume *lv)
235 {
236 return 0;
237 }
238 int lv_is_active_but_not_locally(struct logical_volume *lv)
239 {
240 return 0;
241 }
242 int lv_is_active_exclusive(struct logical_volume *lv)
243 {
244 return 0;
245 }
246 int lv_is_active_exclusive_locally(struct logical_volume *lv)
247 {
248 return 0;
249 }
250 int lv_is_active_exclusive_remotely(struct logical_volume *lv)
251 {
252 return 0;
253 }
254
255 int lv_check_transient(struct logical_volume *lv)
256 {
257 return 1;
258 }
259 int monitor_dev_for_events(struct cmd_context *cmd, struct logical_volume *lv,
260 struct lv_activate_opts *laopts, int monitor)
261 {
262 return 1;
263 }
264 #else /* DEVMAPPER_SUPPORT */
265
266 static int _activation = 1;
267
268 void set_activation(int act)
269 {
270 if (act == _activation)
271 return;
272
273 _activation = act;
274 if (_activation)
275 log_verbose("Activation enabled. Device-mapper kernel "
276 "driver will be used.");
277 else
278 log_warn("WARNING: Activation disabled. No device-mapper "
279 "interaction will be attempted.");
280 }
281
282 int activation(void)
283 {
284 return _activation;
285 }
286
287 static int _passes_activation_filter(struct cmd_context *cmd,
288 struct logical_volume *lv)
289 {
290 const struct dm_config_node *cn;
291 const struct dm_config_value *cv;
292 const char *str;
293 char path[PATH_MAX];
294
295 if (!(cn = find_config_tree_node(cmd, "activation/volume_list"))) {
296 log_verbose("activation/volume_list configuration setting "
297 "not defined, checking only host tags for %s/%s",
298 lv->vg->name, lv->name);
299
300 /* If no host tags defined, activate */
301 if (dm_list_empty(&cmd->tags))
302 return 1;
303
304 /* If any host tag matches any LV or VG tag, activate */
305 if (str_list_match_list(&cmd->tags, &lv->tags, NULL) ||
306 str_list_match_list(&cmd->tags, &lv->vg->tags, NULL))
307 return 1;
308
309 log_verbose("No host tag matches %s/%s",
310 lv->vg->name, lv->name);
311
312 /* Don't activate */
313 return 0;
314 }
315 else
316 log_verbose("activation/volume_list configuration setting "
317 "defined, checking the list to match %s/%s",
318 lv->vg->name, lv->name);
319
320 for (cv = cn->v; cv; cv = cv->next) {
321 if (cv->type != DM_CFG_STRING) {
322 log_error("Ignoring invalid string in config file "
323 "activation/volume_list");
324 continue;
325 }
326 str = cv->v.str;
327 if (!*str) {
328 log_error("Ignoring empty string in config file "
329 "activation/volume_list");
330 continue;
331 }
332
333
334 /* Tag? */
335 if (*str == '@') {
336 str++;
337 if (!*str) {
338 log_error("Ignoring empty tag in config file "
339 "activation/volume_list");
340 continue;
341 }
342 /* If any host tag matches any LV or VG tag, activate */
343 if (!strcmp(str, "*")) {
344 if (str_list_match_list(&cmd->tags, &lv->tags, NULL)
345 || str_list_match_list(&cmd->tags,
346 &lv->vg->tags, NULL))
347 return 1;
348 else
349 continue;
350 }
351 /* If supplied tag matches LV or VG tag, activate */
352 if (str_list_match_item(&lv->tags, str) ||
353 str_list_match_item(&lv->vg->tags, str))
354 return 1;
355 else
356 continue;
357 }
358 if (!strchr(str, '/')) {
359 /* vgname supplied */
360 if (!strcmp(str, lv->vg->name))
361 return 1;
362 else
363 continue;
364 }
365 /* vgname/lvname */
366 if (dm_snprintf(path, sizeof(path), "%s/%s", lv->vg->name,
367 lv->name) < 0) {
368 log_error("dm_snprintf error from %s/%s", lv->vg->name,
369 lv->name);
370 continue;
371 }
372 if (!strcmp(path, str))
373 return 1;
374 }
375
376 log_verbose("No item supplied in activation/volume_list configuration "
377 "setting matches %s/%s", lv->vg->name, lv->name);
378
379 return 0;
380 }
381
382 int library_version(char *version, size_t size)
383 {
384 if (!activation())
385 return 0;
386
387 return dm_get_library_version(version, size);
388 }
389
390 int driver_version(char *version, size_t size)
391 {
392 if (!activation())
393 return 0;
394
395 log_very_verbose("Getting driver version");
396
397 return dm_driver_version(version, size);
398 }
399
400 int target_version(const char *target_name, uint32_t *maj,
401 uint32_t *min, uint32_t *patchlevel)
402 {
403 int r = 0;
404 struct dm_task *dmt;
405 struct dm_versions *target, *last_target;
406
407 log_very_verbose("Getting target version for %s", target_name);
408 if (!(dmt = dm_task_create(DM_DEVICE_LIST_VERSIONS)))
409 return_0;
410
411 if (activation_checks() && !dm_task_enable_checks(dmt))
412 goto_out;
413
414 if (!dm_task_run(dmt)) {
415 log_debug("Failed to get %s target version", target_name);
416 /* Assume this was because LIST_VERSIONS isn't supported */
417 return 1;
418 }
419
420 target = dm_task_get_versions(dmt);
421
422 do {
423 last_target = target;
424
425 if (!strcmp(target_name, target->name)) {
426 r = 1;
427 *maj = target->version[0];
428 *min = target->version[1];
429 *patchlevel = target->version[2];
430 goto out;
431 }
432
433 target = (struct dm_versions *)((char *) target + target->next);
434 } while (last_target != target);
435
436 out:
437 dm_task_destroy(dmt);
438
439 return r;
440 }
441
442 int module_present(struct cmd_context *cmd, const char *target_name)
443 {
444 int ret = 0;
445 #ifdef MODPROBE_CMD
446 char module[128];
447 const char *argv[3];
448
449 if (dm_snprintf(module, sizeof(module), "dm-%s", target_name) < 0) {
450 log_error("module_present module name too long: %s",
451 target_name);
452 return 0;
453 }
454
455 argv[0] = MODPROBE_CMD;
456 argv[1] = module;
457 argv[2] = NULL;
458
459 ret = exec_cmd(cmd, argv, NULL, 0);
460 #endif
461 return ret;
462 }
463
464 int target_present(struct cmd_context *cmd, const char *target_name,
465 int use_modprobe)
466 {
467 uint32_t maj, min, patchlevel;
468
469 if (!activation())
470 return 0;
471
472 #ifdef MODPROBE_CMD
473 if (use_modprobe) {
474 if (target_version(target_name, &maj, &min, &patchlevel))
475 return 1;
476
477 if (!module_present(cmd, target_name))
478 return_0;
479 }
480 #endif
481
482 return target_version(target_name, &maj, &min, &patchlevel);
483 }
484
485 /*
486 * Returns 1 if info structure populated, else 0 on failure.
487 */
488 int lv_info(struct cmd_context *cmd, const struct logical_volume *lv, unsigned origin_only,
489 struct lvinfo *info, int with_open_count, int with_read_ahead)
490 {
491 struct dm_info dminfo;
492
493 if (!activation())
494 return 0;
495 /*
496 * If open_count info is requested and we have to be sure our own udev
497 * transactions are finished
498 * For non-clustered locking type we are only interested for non-delete operation
499 * in progress - as only those could lead to opened files
500 */
501 if (with_open_count) {
502 if (locking_is_clustered())
503 sync_local_dev_names(cmd); /* Wait to have udev in sync */
504 else if (fs_has_non_delete_ops())
505 fs_unlock(); /* For non clustered - wait if there are non-delete ops */
506 }
507
508 if (!dev_manager_info(lv->vg->cmd->mem, lv, origin_only ? "real" : NULL, with_open_count,
509 with_read_ahead, &dminfo, &info->read_ahead))
510 return_0;
511
512 info->exists = dminfo.exists;
513 info->suspended = dminfo.suspended;
514 info->open_count = dminfo.open_count;
515 info->major = dminfo.major;
516 info->minor = dminfo.minor;
517 info->read_only = dminfo.read_only;
518 info->live_table = dminfo.live_table;
519 info->inactive_table = dminfo.inactive_table;
520
521 return 1;
522 }
523
524 int lv_info_by_lvid(struct cmd_context *cmd, const char *lvid_s,
525 unsigned origin_only,
526 struct lvinfo *info, int with_open_count, int with_read_ahead)
527 {
528 int r;
529 struct logical_volume *lv;
530
531 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
532 return 0;
533
534 if (!lv_is_origin(lv))
535 origin_only = 0;
536
537 r = lv_info(cmd, lv, origin_only, info, with_open_count, with_read_ahead);
538 release_vg(lv->vg);
539
540 return r;
541 }
542
543 int lv_check_not_in_use(struct cmd_context *cmd __attribute__((unused)),
544 struct logical_volume *lv, struct lvinfo *info)
545 {
546 if (!info->exists)
547 return 1;
548
549 /* If sysfs is not used, use open_count information only. */
550 if (!*dm_sysfs_dir()) {
551 if (info->open_count) {
552 log_error("Logical volume %s/%s in use.",
553 lv->vg->name, lv->name);
554 return 0;
555 }
556
557 return 1;
558 }
559
560 if (dm_device_has_holders(info->major, info->minor)) {
561 log_error("Logical volume %s/%s is used by another device.",
562 lv->vg->name, lv->name);
563 return 0;
564 }
565
566 if (dm_device_has_mounted_fs(info->major, info->minor)) {
567 log_error("Logical volume %s/%s contains a filesystem in use.",
568 lv->vg->name, lv->name);
569 return 0;
570 }
571
572 return 1;
573 }
574
575 /*
576 * Returns 1 if percent set, else 0 on failure.
577 */
578 int lv_check_transient(struct logical_volume *lv)
579 {
580 int r;
581 struct dev_manager *dm;
582
583 if (!activation())
584 return 0;
585
586 log_debug("Checking transient status for LV %s/%s", lv->vg->name, lv->name);
587
588 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
589 return_0;
590
591 if (!(r = dev_manager_transient(dm, lv)))
592 stack;
593
594 dev_manager_destroy(dm);
595
596 return r;
597 }
598
599 /*
600 * Returns 1 if percent set, else 0 on failure.
601 */
602 int lv_snapshot_percent(const struct logical_volume *lv, percent_t *percent)
603 {
604 int r;
605 struct dev_manager *dm;
606
607 if (!activation())
608 return 0;
609
610 log_debug("Checking snapshot percent for LV %s/%s", lv->vg->name, lv->name);
611
612 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
613 return_0;
614
615 if (!(r = dev_manager_snapshot_percent(dm, lv, percent)))
616 stack;
617
618 dev_manager_destroy(dm);
619
620 return r;
621 }
622
623 /* FIXME Merge with snapshot_percent */
624 int lv_mirror_percent(struct cmd_context *cmd, const struct logical_volume *lv,
625 int wait, percent_t *percent, uint32_t *event_nr)
626 {
627 int r;
628 struct dev_manager *dm;
629 struct lvinfo info;
630
631 /* If mirrored LV is temporarily shrinked to 1 area (= linear),
632 * it should be considered in-sync. */
633 if (dm_list_size(&lv->segments) == 1 && first_seg(lv)->area_count == 1) {
634 *percent = PERCENT_100;
635 return 1;
636 }
637
638 if (!activation())
639 return 0;
640
641 log_debug("Checking mirror percent for LV %s/%s", lv->vg->name, lv->name);
642
643 if (!lv_info(cmd, lv, 0, &info, 0, 0))
644 return_0;
645
646 if (!info.exists)
647 return 0;
648
649 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
650 return_0;
651
652 if (!(r = dev_manager_mirror_percent(dm, lv, wait, percent, event_nr)))
653 stack;
654
655 dev_manager_destroy(dm);
656
657 return r;
658 }
659
660 int lv_raid_percent(const struct logical_volume *lv, percent_t *percent)
661 {
662 return lv_mirror_percent(lv->vg->cmd, lv, 0, percent, NULL);
663 }
664
665 static int _lv_active(struct cmd_context *cmd, struct logical_volume *lv)
666 {
667 struct lvinfo info;
668
669 if (!lv_info(cmd, lv, 0, &info, 0, 0)) {
670 stack;
671 return -1;
672 }
673
674 return info.exists;
675 }
676
677 static int _lv_open_count(struct cmd_context *cmd, struct logical_volume *lv)
678 {
679 struct lvinfo info;
680
681 if (!lv_info(cmd, lv, 0, &info, 1, 0)) {
682 stack;
683 return -1;
684 }
685
686 return info.open_count;
687 }
688
689 static int _lv_activate_lv(struct logical_volume *lv, struct lv_activate_opts *laopts)
690 {
691 int r;
692 struct dev_manager *dm;
693
694 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, (lv->status & PVMOVE) ? 0 : 1)))
695 return_0;
696
697 if (!(r = dev_manager_activate(dm, lv, laopts)))
698 stack;
699
700 dev_manager_destroy(dm);
701 return r;
702 }
703
704 static int _lv_preload(struct logical_volume *lv, struct lv_activate_opts *laopts,
705 int *flush_required)
706 {
707 int r;
708 struct dev_manager *dm;
709
710 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, (lv->status & PVMOVE) ? 0 : 1)))
711 return_0;
712
713 if (!(r = dev_manager_preload(dm, lv, laopts, flush_required)))
714 stack;
715
716 dev_manager_destroy(dm);
717 return r;
718 }
719
720 static int _lv_deactivate(struct logical_volume *lv)
721 {
722 int r;
723 struct dev_manager *dm;
724
725 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
726 return_0;
727
728 if (!(r = dev_manager_deactivate(dm, lv)))
729 stack;
730
731 dev_manager_destroy(dm);
732 return r;
733 }
734
735 static int _lv_suspend_lv(struct logical_volume *lv, struct lv_activate_opts *laopts,
736 int lockfs, int flush_required)
737 {
738 int r;
739 struct dev_manager *dm;
740
741 /*
742 * When we are asked to manipulate (normally suspend/resume) the PVMOVE
743 * device directly, we don't want to touch the devices that use it.
744 */
745 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, (lv->status & PVMOVE) ? 0 : 1)))
746 return_0;
747
748 if (!(r = dev_manager_suspend(dm, lv, laopts, lockfs, flush_required)))
749 stack;
750
751 dev_manager_destroy(dm);
752 return r;
753 }
754
755 /*
756 * These two functions return the number of visible LVs in the state,
757 * or -1 on error. FIXME Check this.
758 */
759 int lvs_in_vg_activated(struct volume_group *vg)
760 {
761 struct lv_list *lvl;
762 int count = 0;
763
764 if (!activation())
765 return 0;
766
767 dm_list_iterate_items(lvl, &vg->lvs)
768 if (lv_is_visible(lvl->lv))
769 count += (_lv_active(vg->cmd, lvl->lv) == 1);
770
771 log_debug("Counted %d active LVs in VG %s", count, vg->name);
772
773 return count;
774 }
775
776 int lvs_in_vg_opened(const struct volume_group *vg)
777 {
778 const struct lv_list *lvl;
779 int count = 0;
780
781 if (!activation())
782 return 0;
783
784 dm_list_iterate_items(lvl, &vg->lvs)
785 if (lv_is_visible(lvl->lv) && !lv_is_used_thin_pool(lvl->lv))
786 count += (_lv_open_count(vg->cmd, lvl->lv) > 0);
787
788 log_debug("Counted %d open LVs in VG %s", count, vg->name);
789
790 return count;
791 }
792
793 /*
794 * _lv_is_active
795 * @lv: logical volume being queried
796 * @locally: set if active locally (when provided)
797 * @exclusive: set if active exclusively (when provided)
798 *
799 * Determine whether an LV is active locally or in a cluster.
800 * In addition to the return code which indicates whether or
801 * not the LV is active somewhere, two other values are set
802 * to yield more information about the status of the activation:
803 * return locally exclusively status
804 * ====== ======= =========== ======
805 * 0 0 0 not active
806 * 1 0 0 active remotely
807 * 1 0 1 exclusive remotely
808 * 1 1 0 active locally and possibly remotely
809 * 1 1 1 exclusive locally (or local && !cluster)
810 * The VG lock must be held to call this function.
811 *
812 * Returns: 0 or 1
813 */
814 static int _lv_is_active(struct logical_volume *lv,
815 int *locally, int *exclusive)
816 {
817 int r, l, e; /* remote, local, and exclusive */
818
819 r = l = e = 0;
820
821 if (_lv_active(lv->vg->cmd, lv))
822 l = 1;
823
824 if (!vg_is_clustered(lv->vg)) {
825 e = 1; /* exclusive by definition */
826 goto out;
827 }
828
829 /* Active locally, and the caller doesn't care about exclusive */
830 if (l && !exclusive)
831 goto out;
832
833 if ((r = remote_lock_held(lv->lvid.s, &e)) >= 0)
834 goto out;
835
836 /*
837 * If lock query is not supported (due to interfacing with old
838 * code), then we cannot evaluate exclusivity properly.
839 *
840 * Old users of this function will never be affected by this,
841 * since they are only concerned about active vs. not active.
842 * New users of this function who specifically ask for 'exclusive'
843 * will be given an error message.
844 */
845 if (l) {
846 if (exclusive)
847 log_error("Unable to determine exclusivity of %s",
848 lv->name);
849 goto out;
850 }
851
852 /* FIXME: Is this fallback alright? */
853 if (activate_lv_excl(lv->vg->cmd, lv)) {
854 if (!deactivate_lv(lv->vg->cmd, lv))
855 stack;
856 /* FIXME: locally & exclusive are undefined. */
857 return 0;
858 }
859 /* FIXME: Check exclusive value here. */
860 out:
861 if (locally)
862 *locally = l;
863 if (exclusive)
864 *exclusive = e;
865
866 log_very_verbose("%s/%s is %sactive%s%s",
867 lv->vg->name, lv->name,
868 (r || l) ? "" : "not ",
869 (exclusive && e) ? " exclusive" : "",
870 e ? (l ? " locally" : " remotely") : "");
871
872 return r || l;
873 }
874
875 int lv_is_active(struct logical_volume *lv)
876 {
877 return _lv_is_active(lv, NULL, NULL);
878 }
879
880 int lv_is_active_but_not_locally(struct logical_volume *lv)
881 {
882 int l;
883 return _lv_is_active(lv, &l, NULL) && !l;
884 }
885
886 int lv_is_active_exclusive(struct logical_volume *lv)
887 {
888 int e;
889
890 return _lv_is_active(lv, NULL, &e) && e;
891 }
892
893 int lv_is_active_exclusive_locally(struct logical_volume *lv)
894 {
895 int l, e;
896
897 return _lv_is_active(lv, &l, &e) && l && e;
898 }
899
900 int lv_is_active_exclusive_remotely(struct logical_volume *lv)
901 {
902 int l, e;
903
904 return _lv_is_active(lv, &l, &e) && !l && e;
905 }
906
907 #ifdef DMEVENTD
908 static struct dm_event_handler *_create_dm_event_handler(struct cmd_context *cmd, const char *dmuuid, const char *dso,
909 const int timeout, enum dm_event_mask mask)
910 {
911 struct dm_event_handler *dmevh;
912
913 if (!(dmevh = dm_event_handler_create()))
914 return_NULL;
915
916 if (dm_event_handler_set_dmeventd_path(dmevh, find_config_tree_str(cmd, "dmeventd/executable", NULL)))
917 goto_bad;
918
919 if (dm_event_handler_set_dso(dmevh, dso))
920 goto_bad;
921
922 if (dm_event_handler_set_uuid(dmevh, dmuuid))
923 goto_bad;
924
925 dm_event_handler_set_timeout(dmevh, timeout);
926 dm_event_handler_set_event_mask(dmevh, mask);
927
928 return dmevh;
929
930 bad:
931 dm_event_handler_destroy(dmevh);
932 return NULL;
933 }
934
935 char *get_monitor_dso_path(struct cmd_context *cmd, const char *libpath)
936 {
937 char *path;
938
939 if (!(path = dm_pool_alloc(cmd->mem, PATH_MAX))) {
940 log_error("Failed to allocate dmeventd library path.");
941 return NULL;
942 }
943
944 get_shared_library_path(cmd, libpath, path, PATH_MAX);
945
946 return path;
947 }
948
949 int target_registered_with_dmeventd(struct cmd_context *cmd, const char *dso,
950 struct logical_volume *lv, int *pending)
951 {
952 char *uuid;
953 enum dm_event_mask evmask = 0;
954 struct dm_event_handler *dmevh;
955
956 *pending = 0;
957
958 if (!dso)
959 return_0;
960
961 /* We always monitor the "real" device, never the "snapshot-origin" itself. */
962 if (!(uuid = build_dm_uuid(cmd->mem, lv->lvid.s, lv_is_origin(lv) ? "real" : NULL)))
963 return_0;
964
965 if (!(dmevh = _create_dm_event_handler(cmd, uuid, dso, 0, DM_EVENT_ALL_ERRORS)))
966 return_0;
967
968 if (dm_event_get_registered_device(dmevh, 0)) {
969 dm_event_handler_destroy(dmevh);
970 return 0;
971 }
972
973 evmask = dm_event_handler_get_event_mask(dmevh);
974 if (evmask & DM_EVENT_REGISTRATION_PENDING) {
975 *pending = 1;
976 evmask &= ~DM_EVENT_REGISTRATION_PENDING;
977 }
978
979 dm_event_handler_destroy(dmevh);
980
981 return evmask;
982 }
983
984 int target_register_events(struct cmd_context *cmd, const char *dso, struct logical_volume *lv,
985 int evmask __attribute__((unused)), int set, int timeout)
986 {
987 char *uuid;
988 struct dm_event_handler *dmevh;
989 int r;
990
991 if (!dso)
992 return_0;
993
994 /* We always monitor the "real" device, never the "snapshot-origin" itself. */
995 if (!(uuid = build_dm_uuid(cmd->mem, lv->lvid.s, lv_is_origin(lv) ? "real" : NULL)))
996 return_0;
997
998 if (!(dmevh = _create_dm_event_handler(cmd, uuid, dso, timeout,
999 DM_EVENT_ALL_ERRORS | (timeout ? DM_EVENT_TIMEOUT : 0))))
1000 return_0;
1001
1002 r = set ? dm_event_register_handler(dmevh) : dm_event_unregister_handler(dmevh);
1003
1004 dm_event_handler_destroy(dmevh);
1005
1006 if (!r)
1007 return_0;
1008
1009 log_info("%s %s for events", set ? "Monitored" : "Unmonitored", uuid);
1010
1011 return 1;
1012 }
1013
1014 #endif
1015
1016 /*
1017 * Returns 0 if an attempt to (un)monitor the device failed.
1018 * Returns 1 otherwise.
1019 */
1020 int monitor_dev_for_events(struct cmd_context *cmd, struct logical_volume *lv,
1021 const struct lv_activate_opts *laopts, int monitor)
1022 {
1023 #ifdef DMEVENTD
1024 int i, pending = 0, monitored;
1025 int r = 1;
1026 struct dm_list *tmp, *snh, *snht;
1027 struct lv_segment *seg;
1028 struct lv_segment *log_seg;
1029 int (*monitor_fn) (struct lv_segment *s, int e);
1030 uint32_t s;
1031 static const struct lv_activate_opts zlaopts = { 0 };
1032
1033 if (!laopts)
1034 laopts = &zlaopts;
1035
1036 /* skip dmeventd code altogether */
1037 if (dmeventd_monitor_mode() == DMEVENTD_MONITOR_IGNORE)
1038 return 1;
1039
1040 /*
1041 * Nothing to do if dmeventd configured not to be used.
1042 */
1043 if (monitor && !dmeventd_monitor_mode())
1044 return 1;
1045
1046 /*
1047 * In case of a snapshot device, we monitor lv->snapshot->lv,
1048 * not the actual LV itself.
1049 */
1050 if (lv_is_cow(lv) && (laopts->no_merging || !lv_is_merging_cow(lv)))
1051 return monitor_dev_for_events(cmd, lv->snapshot->lv, NULL, monitor);
1052
1053 /*
1054 * In case this LV is a snapshot origin, we instead monitor
1055 * each of its respective snapshots. The origin itself may
1056 * also need to be monitored if it is a mirror, for example.
1057 */
1058 if (!laopts->origin_only && lv_is_origin(lv))
1059 dm_list_iterate_safe(snh, snht, &lv->snapshot_segs)
1060 if (!monitor_dev_for_events(cmd, dm_list_struct_base(snh,
1061 struct lv_segment, origin_list)->cow, NULL, monitor))
1062 r = 0;
1063
1064 /*
1065 * If the volume is mirrored and its log is also mirrored, monitor
1066 * the log volume as well.
1067 */
1068 if ((seg = first_seg(lv)) != NULL && seg->log_lv != NULL &&
1069 (log_seg = first_seg(seg->log_lv)) != NULL &&
1070 seg_is_mirrored(log_seg))
1071 if (!monitor_dev_for_events(cmd, seg->log_lv, NULL, monitor))
1072 r = 0;
1073
1074 dm_list_iterate(tmp, &lv->segments) {
1075 seg = dm_list_item(tmp, struct lv_segment);
1076
1077 /* Recurse for AREA_LV */
1078 for (s = 0; s < seg->area_count; s++) {
1079 if (seg_type(seg, s) != AREA_LV)
1080 continue;
1081 if (!monitor_dev_for_events(cmd, seg_lv(seg, s), NULL,
1082 monitor)) {
1083 log_error("Failed to %smonitor %s",
1084 monitor ? "" : "un",
1085 seg_lv(seg, s)->name);
1086 r = 0;
1087 }
1088 }
1089
1090 if (!seg_monitored(seg) || (seg->status & PVMOVE))
1091 continue;
1092
1093 monitor_fn = NULL;
1094
1095 /* Check monitoring status */
1096 if (seg->segtype->ops->target_monitored)
1097 monitored = seg->segtype->ops->target_monitored(seg, &pending);
1098 else
1099 continue; /* segtype doesn't support registration */
1100
1101 /*
1102 * FIXME: We should really try again if pending
1103 */
1104 monitored = (pending) ? 0 : monitored;
1105
1106 if (monitor) {
1107 if (monitored)
1108 log_verbose("%s/%s already monitored.", lv->vg->name, lv->name);
1109 else if (seg->segtype->ops->target_monitor_events)
1110 monitor_fn = seg->segtype->ops->target_monitor_events;
1111 } else {
1112 if (!monitored)
1113 log_verbose("%s/%s already not monitored.", lv->vg->name, lv->name);
1114 else if (seg->segtype->ops->target_unmonitor_events)
1115 monitor_fn = seg->segtype->ops->target_unmonitor_events;
1116 }
1117
1118 /* Do [un]monitor */
1119 if (!monitor_fn)
1120 continue;
1121
1122 log_verbose("%sonitoring %s/%s%s", monitor ? "M" : "Not m", lv->vg->name, lv->name,
1123 test_mode() ? " [Test mode: skipping this]" : "");
1124
1125 /* FIXME Test mode should really continue a bit further. */
1126 if (test_mode())
1127 continue;
1128
1129 /* FIXME specify events */
1130 if (!monitor_fn(seg, 0)) {
1131 log_error("%s/%s: %s segment monitoring function failed.",
1132 lv->vg->name, lv->name, seg->segtype->name);
1133 return 0;
1134 }
1135
1136 /* Check [un]monitor results */
1137 /* Try a couple times if pending, but not forever... */
1138 for (i = 0; i < 10; i++) {
1139 pending = 0;
1140 monitored = seg->segtype->ops->target_monitored(seg, &pending);
1141 if (pending ||
1142 (!monitored && monitor) ||
1143 (monitored && !monitor))
1144 log_very_verbose("%s/%s %smonitoring still pending: waiting...",
1145 lv->vg->name, lv->name, monitor ? "" : "un");
1146 else
1147 break;
1148 sleep(1);
1149 }
1150
1151 if (r)
1152 r = (monitored && monitor) || (!monitored && !monitor);
1153 }
1154
1155 return r;
1156 #else
1157 return 1;
1158 #endif
1159 }
1160
1161 struct detached_lv_data {
1162 struct logical_volume *lv_pre;
1163 struct lv_activate_opts *laopts;
1164 int *flush_required;
1165 };
1166
1167 static int _preload_detached_lv(struct cmd_context *cmd, struct logical_volume *lv, void *data)
1168 {
1169 struct detached_lv_data *detached = data;
1170 struct lv_list *lvl_pre;
1171
1172 if ((lvl_pre = find_lv_in_vg(detached->lv_pre->vg, lv->name))) {
1173 if (lv_is_visible(lvl_pre->lv) && lv_is_active(lv) && (!lv_is_cow(lv) || !lv_is_cow(lvl_pre->lv)) &&
1174 !_lv_preload(lvl_pre->lv, detached->laopts, detached->flush_required))
1175 return_0;
1176 }
1177
1178 return 1;
1179 }
1180
1181 static int _lv_suspend(struct cmd_context *cmd, const char *lvid_s,
1182 struct lv_activate_opts *laopts, int error_if_not_suspended)
1183 {
1184 struct logical_volume *lv = NULL, *lv_pre = NULL, *pvmove_lv = NULL;
1185 struct lv_list *lvl_pre;
1186 struct seg_list *sl;
1187 struct lv_segment *snap_seg;
1188 struct lvinfo info;
1189 int r = 0, lockfs = 0, flush_required = 0;
1190 struct detached_lv_data detached;
1191
1192 if (!activation())
1193 return 1;
1194
1195 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1196 goto_out;
1197
1198 /* Use precommitted metadata if present */
1199 if (!(lv_pre = lv_from_lvid(cmd, lvid_s, 1)))
1200 goto_out;
1201
1202 /* Ignore origin_only unless LV is origin in both old and new metadata */
1203 if (!lv_is_origin(lv) || !lv_is_origin(lv_pre))
1204 laopts->origin_only = 0;
1205
1206 if (test_mode()) {
1207 _skip("Suspending %s%s.", lv->name, laopts->origin_only ? " origin without snapshots" : "");
1208 r = 1;
1209 goto out;
1210 }
1211
1212 if (!lv_info(cmd, lv, laopts->origin_only, &info, 0, 0))
1213 goto_out;
1214
1215 if (!info.exists || info.suspended) {
1216 if (!error_if_not_suspended) {
1217 r = 1;
1218 if (info.suspended)
1219 critical_section_inc(cmd, "already suspended");
1220 }
1221 goto out;
1222 }
1223
1224 if (!lv_read_replicator_vgs(lv))
1225 goto_out;
1226
1227 lv_calculate_readahead(lv, NULL);
1228
1229 /*
1230 * Preload devices for the LV.
1231 * If the PVMOVE LV is being removed, it's only present in the old
1232 * metadata and not the new, so we must explicitly add the new
1233 * tables for all the changed LVs here, as the relationships
1234 * are not found by walking the new metadata.
1235 */
1236 if (!(lv_pre->status & LOCKED) &&
1237 (lv->status & LOCKED) &&
1238 (pvmove_lv = find_pvmove_lv_in_lv(lv))) {
1239 /* Preload all the LVs above the PVMOVE LV */
1240 dm_list_iterate_items(sl, &pvmove_lv->segs_using_this_lv) {
1241 if (!(lvl_pre = find_lv_in_vg(lv_pre->vg, sl->seg->lv->name))) {
1242 log_error(INTERNAL_ERROR "LV %s missing from preload metadata", sl->seg->lv->name);
1243 goto out;
1244 }
1245 if (!_lv_preload(lvl_pre->lv, laopts, &flush_required))
1246 goto_out;
1247 }
1248 /* Now preload the PVMOVE LV itself */
1249 if (!(lvl_pre = find_lv_in_vg(lv_pre->vg, pvmove_lv->name))) {
1250 log_error(INTERNAL_ERROR "LV %s missing from preload metadata", pvmove_lv->name);
1251 goto out;
1252 }
1253 if (!_lv_preload(lvl_pre->lv, laopts, &flush_required))
1254 goto_out;
1255 } else {
1256 if (!_lv_preload(lv_pre, laopts, &flush_required))
1257 /* FIXME Revert preloading */
1258 goto_out;
1259
1260 /*
1261 * Search for existing LVs that have become detached and preload them.
1262 */
1263 detached.lv_pre = lv_pre;
1264 detached.laopts = laopts;
1265 detached.flush_required = &flush_required;
1266
1267 if (!for_each_sub_lv(cmd, lv, &_preload_detached_lv, &detached))
1268 goto_out;
1269
1270 /*
1271 * Preload any snapshots that are being removed.
1272 */
1273 if (!laopts->origin_only && lv_is_origin(lv)) {
1274 dm_list_iterate_items_gen(snap_seg, &lv->snapshot_segs, origin_list) {
1275 if (!(lvl_pre = find_lv_in_vg_by_lvid(lv_pre->vg, &snap_seg->cow->lvid))) {
1276 log_error(INTERNAL_ERROR "LV %s (%s) missing from preload metadata",
1277 snap_seg->cow->name, snap_seg->cow->lvid.id[1].uuid);
1278 goto out;
1279 }
1280 if (!lv_is_cow(lvl_pre->lv) &&
1281 !_lv_preload(lvl_pre->lv, laopts, &flush_required))
1282 goto_out;
1283 }
1284 }
1285 }
1286
1287 if (!monitor_dev_for_events(cmd, lv, laopts, 0))
1288 /* FIXME Consider aborting here */
1289 stack;
1290
1291 critical_section_inc(cmd, "suspending");
1292 if (pvmove_lv)
1293 critical_section_inc(cmd, "suspending pvmove LV");
1294
1295 if (!laopts->origin_only &&
1296 (lv_is_origin(lv_pre) || lv_is_cow(lv_pre)))
1297 lockfs = 1;
1298
1299 /*
1300 * Suspending an LV directly above a PVMOVE LV also
1301 * suspends other LVs using that same PVMOVE LV.
1302 * FIXME Remove this and delay the 'clear node' until
1303 * after the code knows whether there's a different
1304 * inactive table to load or not instead so lv_suspend
1305 * can be called separately for each LV safely.
1306 */
1307 if ((lv_pre->vg->status & PRECOMMITTED) &&
1308 (lv_pre->status & LOCKED) && find_pvmove_lv_in_lv(lv_pre)) {
1309 if (!_lv_suspend_lv(lv_pre, laopts, lockfs, flush_required)) {
1310 critical_section_dec(cmd, "failed precommitted suspend");
1311 if (pvmove_lv)
1312 critical_section_dec(cmd, "failed precommitted suspend (pvmove)");
1313 goto_out;
1314 }
1315 } else {
1316 /* Normal suspend */
1317 if (!_lv_suspend_lv(lv, laopts, lockfs, flush_required)) {
1318 critical_section_dec(cmd, "failed suspend");
1319 if (pvmove_lv)
1320 critical_section_dec(cmd, "failed suspend (pvmove)");
1321 goto_out;
1322 }
1323 }
1324
1325 r = 1;
1326 out:
1327 if (lv_pre)
1328 release_vg(lv_pre->vg);
1329 if (lv) {
1330 lv_release_replicator_vgs(lv);
1331 release_vg(lv->vg);
1332 }
1333
1334 return r;
1335 }
1336
1337 /* Returns success if the device is not active */
1338 int lv_suspend_if_active(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only)
1339 {
1340 struct lv_activate_opts laopts = { .origin_only = origin_only };
1341
1342 return _lv_suspend(cmd, lvid_s, &laopts, 0);
1343 }
1344
1345 /* No longer used */
1346 /***********
1347 int lv_suspend(struct cmd_context *cmd, const char *lvid_s)
1348 {
1349 return _lv_suspend(cmd, lvid_s, 1);
1350 }
1351 ***********/
1352
1353 /*
1354 * _lv_resume
1355 * @cmd
1356 * @lvid_s
1357 * @origin_only
1358 * @exclusive: This parameter only has an affect in cluster-context.
1359 * It forces local target type to be used (instead of
1360 * cluster-aware type).
1361 * @error_if_not_active
1362 */
1363 static int _lv_resume(struct cmd_context *cmd, const char *lvid_s,
1364 struct lv_activate_opts *laopts, int error_if_not_active)
1365 {
1366 struct logical_volume *lv;
1367 struct lvinfo info;
1368 int r = 0;
1369
1370 if (!activation())
1371 return 1;
1372
1373 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1374 goto_out;
1375
1376 if (!lv_is_origin(lv))
1377 laopts->origin_only = 0;
1378
1379 if (test_mode()) {
1380 _skip("Resuming %s%s%s.", lv->name, laopts->origin_only ? " without snapshots" : "",
1381 laopts->revert ? " (reverting)" : "");
1382 r = 1;
1383 goto out;
1384 }
1385
1386 log_debug("Resuming LV %s/%s%s%s%s.", lv->vg->name, lv->name,
1387 error_if_not_active ? "" : " if active",
1388 laopts->origin_only ? " without snapshots" : "",
1389 laopts->revert ? " (reverting)" : "");
1390
1391 if (!lv_info(cmd, lv, laopts->origin_only, &info, 0, 0))
1392 goto_out;
1393
1394 if (!info.exists || !info.suspended) {
1395 if (error_if_not_active)
1396 goto_out;
1397 r = 1;
1398 if (!info.suspended)
1399 critical_section_dec(cmd, "already resumed");
1400 goto out;
1401 }
1402
1403 if (!_lv_activate_lv(lv, laopts))
1404 goto_out;
1405
1406 critical_section_dec(cmd, "resumed");
1407
1408 if (!monitor_dev_for_events(cmd, lv, laopts, 1))
1409 stack;
1410
1411 r = 1;
1412 out:
1413 if (lv)
1414 release_vg(lv->vg);
1415
1416 return r;
1417 }
1418
1419 /* Returns success if the device is not active */
1420 int lv_resume_if_active(struct cmd_context *cmd, const char *lvid_s,
1421 unsigned origin_only, unsigned exclusive, unsigned revert)
1422 {
1423 struct lv_activate_opts laopts = {
1424 .origin_only = origin_only,
1425 /*
1426 * When targets are activated exclusively in a cluster, the
1427 * non-clustered target should be used. This only happens
1428 * if exclusive is set.
1429 */
1430 .exclusive = exclusive,
1431 .revert = revert
1432 };
1433
1434 return _lv_resume(cmd, lvid_s, &laopts, 0);
1435 }
1436
1437 int lv_resume(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only)
1438 {
1439 struct lv_activate_opts laopts = { .origin_only = origin_only, };
1440
1441 return _lv_resume(cmd, lvid_s, &laopts, 1);
1442 }
1443
1444 static int _lv_has_open_snapshots(struct logical_volume *lv)
1445 {
1446 struct lv_segment *snap_seg;
1447 struct lvinfo info;
1448 int r = 0;
1449
1450 dm_list_iterate_items_gen(snap_seg, &lv->snapshot_segs, origin_list) {
1451 if (!lv_info(lv->vg->cmd, snap_seg->cow, 0, &info, 1, 0)) {
1452 r = 1;
1453 continue;
1454 }
1455
1456 if (info.exists && info.open_count) {
1457 log_error("LV %s/%s has open snapshot %s: "
1458 "not deactivating", lv->vg->name, lv->name,
1459 snap_seg->cow->name);
1460 r = 1;
1461 }
1462 }
1463
1464 return r;
1465 }
1466
1467 int lv_deactivate(struct cmd_context *cmd, const char *lvid_s)
1468 {
1469 struct logical_volume *lv;
1470 struct lvinfo info;
1471 int r = 0;
1472
1473 if (!activation())
1474 return 1;
1475
1476 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1477 goto out;
1478
1479 if (test_mode()) {
1480 _skip("Deactivating '%s'.", lv->name);
1481 r = 1;
1482 goto out;
1483 }
1484
1485 log_debug("Deactivating %s/%s.", lv->vg->name, lv->name);
1486
1487 if (!lv_info(cmd, lv, 0, &info, 1, 0))
1488 goto_out;
1489
1490 if (!info.exists) {
1491 r = 1;
1492 goto out;
1493 }
1494
1495 if (lv_is_visible(lv)) {
1496 if (!lv_check_not_in_use(cmd, lv, &info))
1497 goto_out;
1498
1499 if (lv_is_origin(lv) && _lv_has_open_snapshots(lv))
1500 goto_out;
1501 }
1502
1503 if (!lv_read_replicator_vgs(lv))
1504 goto_out;
1505
1506 lv_calculate_readahead(lv, NULL);
1507
1508 if (!monitor_dev_for_events(cmd, lv, NULL, 0))
1509 stack;
1510
1511 critical_section_inc(cmd, "deactivating");
1512 r = _lv_deactivate(lv);
1513 critical_section_dec(cmd, "deactivated");
1514
1515 if (!lv_info(cmd, lv, 0, &info, 0, 0) || info.exists)
1516 r = 0;
1517 out:
1518 if (lv) {
1519 lv_release_replicator_vgs(lv);
1520 release_vg(lv->vg);
1521 }
1522
1523 return r;
1524 }
1525
1526 /* Test if LV passes filter */
1527 int lv_activation_filter(struct cmd_context *cmd, const char *lvid_s,
1528 int *activate_lv)
1529 {
1530 struct logical_volume *lv;
1531 int r = 0;
1532
1533 if (!activation()) {
1534 *activate_lv = 1;
1535 return 1;
1536 }
1537
1538 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1539 goto out;
1540
1541 if (!_passes_activation_filter(cmd, lv)) {
1542 log_verbose("Not activating %s/%s since it does not pass "
1543 "activation filter.", lv->vg->name, lv->name);
1544 *activate_lv = 0;
1545 } else
1546 *activate_lv = 1;
1547 r = 1;
1548 out:
1549 if (lv)
1550 release_vg(lv->vg);
1551
1552 return r;
1553 }
1554
1555 static int _lv_activate(struct cmd_context *cmd, const char *lvid_s,
1556 struct lv_activate_opts *laopts, int filter)
1557 {
1558 struct logical_volume *lv;
1559 struct lvinfo info;
1560 int r = 0;
1561
1562 if (!activation())
1563 return 1;
1564
1565 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1566 goto out;
1567
1568 if (filter && !_passes_activation_filter(cmd, lv)) {
1569 log_error("Not activating %s/%s since it does not pass "
1570 "activation filter.", lv->vg->name, lv->name);
1571 goto out;
1572 }
1573
1574 if ((!lv->vg->cmd->partial_activation) && (lv->status & PARTIAL_LV)) {
1575 log_error("Refusing activation of partial LV %s. Use --partial to override.",
1576 lv->name);
1577 goto_out;
1578 }
1579
1580 if (lv_has_unknown_segments(lv)) {
1581 log_error("Refusing activation of LV %s containing "
1582 "an unrecognised segment.", lv->name);
1583 goto_out;
1584 }
1585
1586 if (test_mode()) {
1587 _skip("Activating '%s'.", lv->name);
1588 r = 1;
1589 goto out;
1590 }
1591
1592 log_debug("Activating %s/%s%s.", lv->vg->name, lv->name,
1593 laopts->exclusive ? " exclusively" : "");
1594
1595 if (!lv_info(cmd, lv, 0, &info, 0, 0))
1596 goto_out;
1597
1598 if (info.exists && !info.suspended && info.live_table) {
1599 r = 1;
1600 goto out;
1601 }
1602
1603 if (!lv_read_replicator_vgs(lv))
1604 goto_out;
1605
1606 lv_calculate_readahead(lv, NULL);
1607
1608 critical_section_inc(cmd, "activating");
1609 if (!(r = _lv_activate_lv(lv, laopts)))
1610 stack;
1611 critical_section_dec(cmd, "activated");
1612
1613 if (r && !monitor_dev_for_events(cmd, lv, laopts, 1))
1614 stack;
1615
1616 out:
1617 if (lv) {
1618 lv_release_replicator_vgs(lv);
1619 release_vg(lv->vg);
1620 }
1621
1622 return r;
1623 }
1624
1625 /* Activate LV */
1626 int lv_activate(struct cmd_context *cmd, const char *lvid_s, int exclusive)
1627 {
1628 struct lv_activate_opts laopts = { .exclusive = exclusive };
1629
1630 if (!_lv_activate(cmd, lvid_s, &laopts, 0))
1631 return_0;
1632
1633 return 1;
1634 }
1635
1636 /* Activate LV only if it passes filter */
1637 int lv_activate_with_filter(struct cmd_context *cmd, const char *lvid_s, int exclusive)
1638 {
1639 struct lv_activate_opts laopts = { .exclusive = exclusive };
1640
1641 if (!_lv_activate(cmd, lvid_s, &laopts, 1))
1642 return_0;
1643
1644 return 1;
1645 }
1646
1647 int lv_mknodes(struct cmd_context *cmd, const struct logical_volume *lv)
1648 {
1649 int r = 1;
1650
1651 if (!lv) {
1652 r = dm_mknodes(NULL);
1653 fs_unlock();
1654 return r;
1655 }
1656
1657 if (!activation())
1658 return 1;
1659
1660 r = dev_manager_mknodes(lv);
1661
1662 fs_unlock();
1663
1664 return r;
1665 }
1666
1667 /*
1668 * Does PV use VG somewhere in its construction?
1669 * Returns 1 on failure.
1670 */
1671 int pv_uses_vg(struct physical_volume *pv,
1672 struct volume_group *vg)
1673 {
1674 if (!activation())
1675 return 0;
1676
1677 if (!dm_is_dm_major(MAJOR(pv->dev->dev)))
1678 return 0;
1679
1680 return dev_manager_device_uses_vg(pv->dev, vg);
1681 }
1682
1683 void activation_release(void)
1684 {
1685 dev_manager_release();
1686 }
1687
1688 void activation_exit(void)
1689 {
1690 dev_manager_exit();
1691 }
1692 #endif
This page took 0.106627 seconds and 6 git commands to generate.