]> sourceware.org Git - lvm2.git/blob - lib/activate/activate.c
activate: add autoactivation hooks
[lvm2.git] / lib / activate / activate.c
1 /*
2 * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
3 * Copyright (C) 2004-2012 Red Hat, Inc. All rights reserved.
4 *
5 * This file is part of LVM2.
6 *
7 * This copyrighted material is made available to anyone wishing to use,
8 * modify, copy, or redistribute it subject to the terms and conditions
9 * of the GNU Lesser General Public License v.2.1.
10 *
11 * You should have received a copy of the GNU Lesser General Public License
12 * along with this program; if not, write to the Free Software Foundation,
13 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 */
15
16 #include "lib.h"
17 #include "metadata.h"
18 #include "activate.h"
19 #include "memlock.h"
20 #include "display.h"
21 #include "fs.h"
22 #include "lvm-exec.h"
23 #include "lvm-file.h"
24 #include "lvm-string.h"
25 #include "toolcontext.h"
26 #include "dev_manager.h"
27 #include "str_list.h"
28 #include "config.h"
29 #include "filter.h"
30 #include "segtype.h"
31 #include "sharedlib.h"
32
33 #include <limits.h>
34 #include <fcntl.h>
35 #include <unistd.h>
36
37 #define _skip(fmt, args...) log_very_verbose("Skipping: " fmt , ## args)
38
39 int lvm1_present(struct cmd_context *cmd)
40 {
41 static char path[PATH_MAX];
42
43 if (dm_snprintf(path, sizeof(path), "%s/lvm/global", cmd->proc_dir)
44 < 0) {
45 log_error("LVM1 proc global snprintf failed");
46 return 0;
47 }
48
49 if (path_exists(path))
50 return 1;
51 else
52 return 0;
53 }
54
55 int list_segment_modules(struct dm_pool *mem, const struct lv_segment *seg,
56 struct dm_list *modules)
57 {
58 unsigned int s;
59 struct lv_segment *seg2, *snap_seg;
60 struct dm_list *snh;
61
62 if (seg->segtype->ops->modules_needed &&
63 !seg->segtype->ops->modules_needed(mem, seg, modules)) {
64 log_error("module string allocation failed");
65 return 0;
66 }
67
68 if (lv_is_origin(seg->lv))
69 dm_list_iterate(snh, &seg->lv->snapshot_segs)
70 if (!list_lv_modules(mem,
71 dm_list_struct_base(snh,
72 struct lv_segment,
73 origin_list)->cow,
74 modules))
75 return_0;
76
77 if (lv_is_cow(seg->lv)) {
78 snap_seg = find_cow(seg->lv);
79 if (snap_seg->segtype->ops->modules_needed &&
80 !snap_seg->segtype->ops->modules_needed(mem, snap_seg,
81 modules)) {
82 log_error("snap_seg module string allocation failed");
83 return 0;
84 }
85 }
86
87 for (s = 0; s < seg->area_count; s++) {
88 switch (seg_type(seg, s)) {
89 case AREA_LV:
90 seg2 = find_seg_by_le(seg_lv(seg, s), seg_le(seg, s));
91 if (seg2 && !list_segment_modules(mem, seg2, modules))
92 return_0;
93 break;
94 case AREA_PV:
95 case AREA_UNASSIGNED:
96 ;
97 }
98 }
99
100 return 1;
101 }
102
103 int list_lv_modules(struct dm_pool *mem, const struct logical_volume *lv,
104 struct dm_list *modules)
105 {
106 struct lv_segment *seg;
107
108 dm_list_iterate_items(seg, &lv->segments)
109 if (!list_segment_modules(mem, seg, modules))
110 return_0;
111
112 return 1;
113 }
114
115 #ifndef DEVMAPPER_SUPPORT
116 void set_activation(int act)
117 {
118 static int warned = 0;
119
120 if (warned || !act)
121 return;
122
123 log_error("Compiled without libdevmapper support. "
124 "Can't enable activation.");
125
126 warned = 1;
127 }
128 int activation(void)
129 {
130 return 0;
131 }
132 int library_version(char *version, size_t size)
133 {
134 return 0;
135 }
136 int driver_version(char *version, size_t size)
137 {
138 return 0;
139 }
140 int target_version(const char *target_name, uint32_t *maj,
141 uint32_t *min, uint32_t *patchlevel)
142 {
143 return 0;
144 }
145 int target_present(struct cmd_context *cmd, const char *target_name,
146 int use_modprobe)
147 {
148 return 0;
149 }
150 int lvm_dm_prefix_check(int major, int minor, const char *prefix)
151 {
152 return 0;
153 }
154 int lv_info(struct cmd_context *cmd, const struct logical_volume *lv, int use_layer,
155 struct lvinfo *info, int with_open_count, int with_read_ahead)
156 {
157 return 0;
158 }
159 int lv_info_by_lvid(struct cmd_context *cmd, const char *lvid_s, int use_layer,
160 struct lvinfo *info, int with_open_count, int with_read_ahead)
161 {
162 return 0;
163 }
164 int lv_check_not_in_use(struct cmd_context *cmd __attribute__((unused)),
165 struct logical_volume *lv, struct lvinfo *info)
166 {
167 return 0;
168 }
169 int lv_snapshot_percent(const struct logical_volume *lv, percent_t *percent)
170 {
171 return 0;
172 }
173 int lv_mirror_percent(struct cmd_context *cmd, const struct logical_volume *lv,
174 int wait, percent_t *percent, uint32_t *event_nr)
175 {
176 return 0;
177 }
178 int lv_raid_percent(const struct logical_volume *lv, percent_t *percent)
179 {
180 return 0;
181 }
182 int lv_thin_pool_percent(const struct logical_volume *lv, int metadata,
183 percent_t *percent)
184 {
185 return 0;
186 }
187 int lv_thin_percent(const struct logical_volume *lv, int mapped,
188 percent_t *percent)
189 {
190 return 0;
191 }
192 int lv_thin_pool_transaction_id(const struct logical_volume *lv,
193 uint64_t *transaction_id)
194 {
195 return 0;
196 }
197 int lvs_in_vg_activated(const struct volume_group *vg)
198 {
199 return 0;
200 }
201 int lvs_in_vg_opened(const struct volume_group *vg)
202 {
203 return 0;
204 }
205 /******
206 int lv_suspend(struct cmd_context *cmd, const char *lvid_s)
207 {
208 return 1;
209 }
210 *******/
211 int lv_suspend_if_active(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only, unsigned exclusive)
212 {
213 return 1;
214 }
215 int lv_resume(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only)
216 {
217 return 1;
218 }
219 int lv_resume_if_active(struct cmd_context *cmd, const char *lvid_s,
220 unsigned origin_only, unsigned exclusive, unsigned revert)
221 {
222 return 1;
223 }
224 int lv_deactivate(struct cmd_context *cmd, const char *lvid_s)
225 {
226 return 1;
227 }
228 int lv_activation_filter(struct cmd_context *cmd, const char *lvid_s,
229 int *activate_lv)
230 {
231 return 1;
232 }
233 int lv_activate(struct cmd_context *cmd, const char *lvid_s, int exclusive)
234 {
235 return 1;
236 }
237 int lv_activate_with_filter(struct cmd_context *cmd, const char *lvid_s, int exclusive)
238 {
239 return 1;
240 }
241 int lv_mknodes(struct cmd_context *cmd, const struct logical_volume *lv)
242 {
243 return 1;
244 }
245 int pv_uses_vg(struct physical_volume *pv,
246 struct volume_group *vg)
247 {
248 return 0;
249 }
250 void activation_release(void)
251 {
252 }
253 void activation_exit(void)
254 {
255 }
256
257 int lv_is_active(const struct logical_volume *lv)
258 {
259 return 0;
260 }
261 int lv_is_active_but_not_locally(const struct logical_volume *lv)
262 {
263 return 0;
264 }
265 int lv_is_active_exclusive(const struct logical_volume *lv)
266 {
267 return 0;
268 }
269 int lv_is_active_exclusive_locally(const struct logical_volume *lv)
270 {
271 return 0;
272 }
273 int lv_is_active_exclusive_remotely(const struct logical_volume *lv)
274 {
275 return 0;
276 }
277
278 int lv_check_transient(struct logical_volume *lv)
279 {
280 return 1;
281 }
282 int monitor_dev_for_events(struct cmd_context *cmd, struct logical_volume *lv,
283 const struct lv_activate_opts *laopts, int monitor)
284 {
285 return 1;
286 }
287 /* fs.c */
288 void fs_unlock(void)
289 {
290 }
291 /* dev_manager.c */
292 #include "targets.h"
293 int add_areas_line(struct dev_manager *dm, struct lv_segment *seg,
294 struct dm_tree_node *node, uint32_t start_area,
295 uint32_t areas)
296 {
297 return 0;
298 }
299 int device_is_usable(struct device *dev)
300 {
301 return 0;
302 }
303 int lv_has_target_type(struct dm_pool *mem, struct logical_volume *lv,
304 const char *layer, const char *target_type)
305 {
306 return 0;
307 }
308 #else /* DEVMAPPER_SUPPORT */
309
310 static int _activation = 1;
311
312 void set_activation(int act)
313 {
314 if (act == _activation)
315 return;
316
317 _activation = act;
318 if (_activation)
319 log_verbose("Activation enabled. Device-mapper kernel "
320 "driver will be used.");
321 else
322 log_warn("WARNING: Activation disabled. No device-mapper "
323 "interaction will be attempted.");
324 }
325
326 int activation(void)
327 {
328 return _activation;
329 }
330
331 int lv_passes_volumes_filter(struct cmd_context *cmd, struct logical_volume *lv,
332 const struct dm_config_node *cn, const char *config_path)
333 {
334 const struct dm_config_value *cv;
335 const char *str;
336 static char path[PATH_MAX];
337
338 log_verbose("%s configuration setting defined: "
339 "Checking the list to match %s/%s",
340 config_path, lv->vg->name, lv->name);
341
342 for (cv = cn->v; cv; cv = cv->next) {
343 if (cv->type != DM_CFG_STRING) {
344 log_error("Ignoring invalid string in config file %s",
345 config_path);
346 continue;
347 }
348 str = cv->v.str;
349 if (!*str) {
350 log_error("Ignoring empty string in config file %s",
351 config_path);
352 continue;
353 }
354
355
356 /* Tag? */
357 if (*str == '@') {
358 str++;
359 if (!*str) {
360 log_error("Ignoring empty tag in config file "
361 "%s", config_path);
362 continue;
363 }
364 /* If any host tag matches any LV or VG tag, activate */
365 if (!strcmp(str, "*")) {
366 if (str_list_match_list(&cmd->tags, &lv->tags, NULL)
367 || str_list_match_list(&cmd->tags,
368 &lv->vg->tags, NULL))
369 return 1;
370 else
371 continue;
372 }
373 /* If supplied tag matches LV or VG tag, activate */
374 if (str_list_match_item(&lv->tags, str) ||
375 str_list_match_item(&lv->vg->tags, str))
376 return 1;
377 else
378 continue;
379 }
380 if (!strchr(str, '/')) {
381 /* vgname supplied */
382 if (!strcmp(str, lv->vg->name))
383 return 1;
384 else
385 continue;
386 }
387 /* vgname/lvname */
388 if (dm_snprintf(path, sizeof(path), "%s/%s", lv->vg->name,
389 lv->name) < 0) {
390 log_error("dm_snprintf error from %s/%s", lv->vg->name,
391 lv->name);
392 continue;
393 }
394 if (!strcmp(path, str))
395 return 1;
396 }
397
398 log_verbose("No item supplied in %s configuration setting "
399 "matches %s/%s", config_path, lv->vg->name, lv->name);
400
401 return 0;
402 }
403
404 static int _passes_activation_filter(struct cmd_context *cmd,
405 struct logical_volume *lv)
406 {
407 const struct dm_config_node *cn;
408
409 if (!(cn = find_config_tree_node(cmd, "activation/volume_list"))) {
410 log_verbose("activation/volume_list configuration setting "
411 "not defined: Checking only host tags for %s/%s",
412 lv->vg->name, lv->name);
413
414 /* If no host tags defined, activate */
415 if (dm_list_empty(&cmd->tags))
416 return 1;
417
418 /* If any host tag matches any LV or VG tag, activate */
419 if (str_list_match_list(&cmd->tags, &lv->tags, NULL) ||
420 str_list_match_list(&cmd->tags, &lv->vg->tags, NULL))
421 return 1;
422
423 log_verbose("No host tag matches %s/%s",
424 lv->vg->name, lv->name);
425
426 /* Don't activate */
427 return 0;
428 }
429
430 return lv_passes_volumes_filter(cmd, lv, cn, "activation/volume_list");
431 }
432
433 static int _passes_readonly_filter(struct cmd_context *cmd,
434 struct logical_volume *lv)
435 {
436 const struct dm_config_node *cn;
437
438 if (!(cn = find_config_tree_node(cmd, "activation/read_only_volume_list")))
439 return 0;
440
441 return lv_passes_volumes_filter(cmd, lv, cn, "activation/read_only_volume_list");
442 }
443
444
445 int lv_passes_auto_activation_filter(struct cmd_context *cmd, struct logical_volume *lv)
446 {
447 const struct dm_config_node *cn;
448
449 if (!(cn = find_config_tree_node(cmd, "activation/auto_activation_volume_list"))) {
450 log_verbose("activation/auto_activation_volume_list configuration setting "
451 "not defined: All logical volumes will be auto-activated.");
452 return 1;
453 }
454
455 return lv_passes_volumes_filter(cmd, lv, cn, "activation/auto_activation_volume_list");
456 }
457
458 int library_version(char *version, size_t size)
459 {
460 if (!activation())
461 return 0;
462
463 return dm_get_library_version(version, size);
464 }
465
466 int driver_version(char *version, size_t size)
467 {
468 if (!activation())
469 return 0;
470
471 log_very_verbose("Getting driver version");
472
473 return dm_driver_version(version, size);
474 }
475
476 int target_version(const char *target_name, uint32_t *maj,
477 uint32_t *min, uint32_t *patchlevel)
478 {
479 int r = 0;
480 struct dm_task *dmt;
481 struct dm_versions *target, *last_target;
482
483 log_very_verbose("Getting target version for %s", target_name);
484 if (!(dmt = dm_task_create(DM_DEVICE_LIST_VERSIONS)))
485 return_0;
486
487 if (activation_checks() && !dm_task_enable_checks(dmt))
488 goto_out;
489
490 if (!dm_task_run(dmt)) {
491 log_debug("Failed to get %s target version", target_name);
492 /* Assume this was because LIST_VERSIONS isn't supported */
493 *maj = 0;
494 *min = 0;
495 *patchlevel = 0;
496 r = 1;
497 goto out;
498 }
499
500 target = dm_task_get_versions(dmt);
501
502 do {
503 last_target = target;
504
505 if (!strcmp(target_name, target->name)) {
506 r = 1;
507 *maj = target->version[0];
508 *min = target->version[1];
509 *patchlevel = target->version[2];
510 goto out;
511 }
512
513 target = (struct dm_versions *)((char *) target + target->next);
514 } while (last_target != target);
515
516 out:
517 dm_task_destroy(dmt);
518
519 return r;
520 }
521
522 int lvm_dm_prefix_check(int major, int minor, const char *prefix)
523 {
524 struct dm_task *dmt;
525 const char *uuid;
526 int r;
527
528 if (!(dmt = dm_task_create(DM_DEVICE_STATUS)))
529 return_0;
530
531 if (!dm_task_set_minor(dmt, minor) ||
532 !dm_task_set_major(dmt, major) ||
533 !dm_task_run(dmt) ||
534 !(uuid = dm_task_get_uuid(dmt))) {
535 dm_task_destroy(dmt);
536 return 0;
537 }
538
539 r = strncasecmp(uuid, prefix, strlen(prefix));
540 dm_task_destroy(dmt);
541
542 return r ? 0 : 1;
543 }
544
545 int module_present(struct cmd_context *cmd, const char *target_name)
546 {
547 int ret = 0;
548 #ifdef MODPROBE_CMD
549 char module[128];
550 const char *argv[3];
551
552 if (dm_snprintf(module, sizeof(module), "dm-%s", target_name) < 0) {
553 log_error("module_present module name too long: %s",
554 target_name);
555 return 0;
556 }
557
558 argv[0] = MODPROBE_CMD;
559 argv[1] = module;
560 argv[2] = NULL;
561
562 ret = exec_cmd(cmd, argv, NULL, 0);
563 #endif
564 return ret;
565 }
566
567 int target_present(struct cmd_context *cmd, const char *target_name,
568 int use_modprobe)
569 {
570 uint32_t maj, min, patchlevel;
571
572 if (!activation())
573 return 0;
574
575 #ifdef MODPROBE_CMD
576 if (use_modprobe) {
577 if (target_version(target_name, &maj, &min, &patchlevel))
578 return 1;
579
580 if (!module_present(cmd, target_name))
581 return_0;
582 }
583 #endif
584
585 return target_version(target_name, &maj, &min, &patchlevel);
586 }
587
588 /*
589 * Returns 1 if info structure populated, else 0 on failure.
590 */
591 int lv_info(struct cmd_context *cmd, const struct logical_volume *lv, int use_layer,
592 struct lvinfo *info, int with_open_count, int with_read_ahead)
593 {
594 struct dm_info dminfo;
595 const char *layer;
596
597 if (!activation())
598 return 0;
599 /*
600 * If open_count info is requested and we have to be sure our own udev
601 * transactions are finished
602 * For non-clustered locking type we are only interested for non-delete operation
603 * in progress - as only those could lead to opened files
604 */
605 if (with_open_count) {
606 if (locking_is_clustered())
607 sync_local_dev_names(cmd); /* Wait to have udev in sync */
608 else if (fs_has_non_delete_ops())
609 fs_unlock(); /* For non clustered - wait if there are non-delete ops */
610 }
611
612 if (use_layer && lv_is_thin_pool(lv))
613 layer = "tpool";
614 else if (use_layer && lv_is_origin(lv))
615 layer = "real";
616 else
617 layer = NULL;
618
619 if (!dev_manager_info(lv->vg->cmd->mem, lv, layer, with_open_count,
620 with_read_ahead, &dminfo, &info->read_ahead))
621 return_0;
622
623 info->exists = dminfo.exists;
624 info->suspended = dminfo.suspended;
625 info->open_count = dminfo.open_count;
626 info->major = dminfo.major;
627 info->minor = dminfo.minor;
628 info->read_only = dminfo.read_only;
629 info->live_table = dminfo.live_table;
630 info->inactive_table = dminfo.inactive_table;
631
632 return 1;
633 }
634
635 int lv_info_by_lvid(struct cmd_context *cmd, const char *lvid_s, int use_layer,
636 struct lvinfo *info, int with_open_count, int with_read_ahead)
637 {
638 int r;
639 struct logical_volume *lv;
640
641 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
642 return 0;
643
644 r = lv_info(cmd, lv, use_layer, info, with_open_count, with_read_ahead);
645 release_vg(lv->vg);
646
647 return r;
648 }
649
650 int lv_check_not_in_use(struct cmd_context *cmd __attribute__((unused)),
651 struct logical_volume *lv, struct lvinfo *info)
652 {
653 if (!info->exists)
654 return 1;
655
656 /* If sysfs is not used, use open_count information only. */
657 if (!*dm_sysfs_dir()) {
658 if (info->open_count) {
659 log_error("Logical volume %s/%s in use.",
660 lv->vg->name, lv->name);
661 return 0;
662 }
663
664 return 1;
665 }
666
667 if (dm_device_has_holders(info->major, info->minor)) {
668 log_error("Logical volume %s/%s is used by another device.",
669 lv->vg->name, lv->name);
670 return 0;
671 }
672
673 if (dm_device_has_mounted_fs(info->major, info->minor)) {
674 log_error("Logical volume %s/%s contains a filesystem in use.",
675 lv->vg->name, lv->name);
676 return 0;
677 }
678
679 return 1;
680 }
681
682 /*
683 * Returns 1 if percent set, else 0 on failure.
684 */
685 int lv_check_transient(struct logical_volume *lv)
686 {
687 int r;
688 struct dev_manager *dm;
689
690 if (!activation())
691 return 0;
692
693 log_debug("Checking transient status for LV %s/%s", lv->vg->name, lv->name);
694
695 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
696 return_0;
697
698 if (!(r = dev_manager_transient(dm, lv)))
699 stack;
700
701 dev_manager_destroy(dm);
702
703 return r;
704 }
705
706 /*
707 * Returns 1 if percent set, else 0 on failure.
708 */
709 int lv_snapshot_percent(const struct logical_volume *lv, percent_t *percent)
710 {
711 int r;
712 struct dev_manager *dm;
713
714 if (!activation())
715 return 0;
716
717 log_debug("Checking snapshot percent for LV %s/%s", lv->vg->name, lv->name);
718
719 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
720 return_0;
721
722 if (!(r = dev_manager_snapshot_percent(dm, lv, percent)))
723 stack;
724
725 dev_manager_destroy(dm);
726
727 return r;
728 }
729
730 /* FIXME Merge with snapshot_percent */
731 int lv_mirror_percent(struct cmd_context *cmd, const struct logical_volume *lv,
732 int wait, percent_t *percent, uint32_t *event_nr)
733 {
734 int r;
735 struct dev_manager *dm;
736 struct lvinfo info;
737
738 /* If mirrored LV is temporarily shrinked to 1 area (= linear),
739 * it should be considered in-sync. */
740 if (dm_list_size(&lv->segments) == 1 && first_seg(lv)->area_count == 1) {
741 *percent = PERCENT_100;
742 return 1;
743 }
744
745 if (!activation())
746 return 0;
747
748 log_debug("Checking mirror percent for LV %s/%s", lv->vg->name, lv->name);
749
750 if (!lv_info(cmd, lv, 0, &info, 0, 0))
751 return_0;
752
753 if (!info.exists)
754 return 0;
755
756 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
757 return_0;
758
759 if (!(r = dev_manager_mirror_percent(dm, lv, wait, percent, event_nr)))
760 stack;
761
762 dev_manager_destroy(dm);
763
764 return r;
765 }
766
767 int lv_raid_percent(const struct logical_volume *lv, percent_t *percent)
768 {
769 return lv_mirror_percent(lv->vg->cmd, lv, 0, percent, NULL);
770 }
771
772 /*
773 * Returns data or metadata percent usage, depends on metadata 0/1.
774 * Returns 1 if percent set, else 0 on failure.
775 */
776 int lv_thin_pool_percent(const struct logical_volume *lv, int metadata,
777 percent_t *percent)
778 {
779 int r;
780 struct dev_manager *dm;
781
782 if (!activation())
783 return 0;
784
785 log_debug("Checking thin %sdata percent for LV %s/%s",
786 (metadata) ? "meta" : "", lv->vg->name, lv->name);
787
788 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
789 return_0;
790
791 if (!(r = dev_manager_thin_pool_percent(dm, lv, metadata, percent)))
792 stack;
793
794 dev_manager_destroy(dm);
795
796 return r;
797 }
798
799 /*
800 * Returns 1 if percent set, else 0 on failure.
801 */
802 int lv_thin_percent(const struct logical_volume *lv,
803 int mapped, percent_t *percent)
804 {
805 int r;
806 struct dev_manager *dm;
807
808 if (!activation())
809 return 0;
810
811 log_debug("Checking thin percent for LV %s/%s",
812 lv->vg->name, lv->name);
813
814 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
815 return_0;
816
817 if (!(r = dev_manager_thin_percent(dm, lv, mapped, percent)))
818 stack;
819
820 dev_manager_destroy(dm);
821
822 return r;
823 }
824
825 /*
826 * Returns 1 if transaction_id set, else 0 on failure.
827 */
828 int lv_thin_pool_transaction_id(const struct logical_volume *lv,
829 uint64_t *transaction_id)
830 {
831 int r;
832 struct dev_manager *dm;
833 struct dm_status_thin_pool *status;
834
835 if (!activation())
836 return 0;
837
838 log_debug("Checking thin percent for LV %s/%s",
839 lv->vg->name, lv->name);
840
841 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
842 return_0;
843
844 if (!(r = dev_manager_thin_pool_status(dm, lv, &status)))
845 stack;
846 else
847 *transaction_id = status->transaction_id;
848
849 dev_manager_destroy(dm);
850
851 return r;
852 }
853
854 static int _lv_active(struct cmd_context *cmd, const struct logical_volume *lv)
855 {
856 struct lvinfo info;
857
858 if (!lv_info(cmd, lv, 0, &info, 0, 0)) {
859 stack;
860 return -1;
861 }
862
863 return info.exists;
864 }
865
866 static int _lv_open_count(struct cmd_context *cmd, struct logical_volume *lv)
867 {
868 struct lvinfo info;
869
870 if (!lv_info(cmd, lv, 0, &info, 1, 0)) {
871 stack;
872 return -1;
873 }
874
875 return info.open_count;
876 }
877
878 static int _lv_activate_lv(struct logical_volume *lv, struct lv_activate_opts *laopts)
879 {
880 int r;
881 struct dev_manager *dm;
882
883 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, (lv->status & PVMOVE) ? 0 : 1)))
884 return_0;
885
886 if (!(r = dev_manager_activate(dm, lv, laopts)))
887 stack;
888
889 dev_manager_destroy(dm);
890 return r;
891 }
892
893 static int _lv_preload(struct logical_volume *lv, struct lv_activate_opts *laopts,
894 int *flush_required)
895 {
896 int r = 0;
897 struct dev_manager *dm;
898 int old_readonly = laopts->read_only;
899
900 laopts->read_only = _passes_readonly_filter(lv->vg->cmd, lv);
901
902 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, (lv->status & PVMOVE) ? 0 : 1)))
903 goto_out;
904
905 if (!(r = dev_manager_preload(dm, lv, laopts, flush_required)))
906 stack;
907
908 dev_manager_destroy(dm);
909
910 laopts->read_only = old_readonly;
911 out:
912 return r;
913 }
914
915 static int _lv_deactivate(struct logical_volume *lv)
916 {
917 int r;
918 struct dev_manager *dm;
919
920 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
921 return_0;
922
923 if (!(r = dev_manager_deactivate(dm, lv)))
924 stack;
925
926 dev_manager_destroy(dm);
927 return r;
928 }
929
930 static int _lv_suspend_lv(struct logical_volume *lv, struct lv_activate_opts *laopts,
931 int lockfs, int flush_required)
932 {
933 int r;
934 struct dev_manager *dm;
935
936 laopts->read_only = _passes_readonly_filter(lv->vg->cmd, lv);
937
938 /*
939 * When we are asked to manipulate (normally suspend/resume) the PVMOVE
940 * device directly, we don't want to touch the devices that use it.
941 */
942 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, (lv->status & PVMOVE) ? 0 : 1)))
943 return_0;
944
945 if (!(r = dev_manager_suspend(dm, lv, laopts, lockfs, flush_required)))
946 stack;
947
948 dev_manager_destroy(dm);
949 return r;
950 }
951
952 /*
953 * These two functions return the number of visible LVs in the state,
954 * or -1 on error. FIXME Check this.
955 */
956 int lvs_in_vg_activated(const struct volume_group *vg)
957 {
958 struct lv_list *lvl;
959 int count = 0;
960
961 if (!activation())
962 return 0;
963
964 dm_list_iterate_items(lvl, &vg->lvs)
965 if (lv_is_visible(lvl->lv))
966 count += (_lv_active(vg->cmd, lvl->lv) == 1);
967
968 log_debug("Counted %d active LVs in VG %s", count, vg->name);
969
970 return count;
971 }
972
973 int lvs_in_vg_opened(const struct volume_group *vg)
974 {
975 const struct lv_list *lvl;
976 int count = 0;
977
978 if (!activation())
979 return 0;
980
981 dm_list_iterate_items(lvl, &vg->lvs)
982 if (lv_is_visible(lvl->lv))
983 count += (_lv_open_count(vg->cmd, lvl->lv) > 0);
984
985 log_debug("Counted %d open LVs in VG %s", count, vg->name);
986
987 return count;
988 }
989
990 /*
991 * _lv_is_active
992 * @lv: logical volume being queried
993 * @locally: set if active locally (when provided)
994 * @exclusive: set if active exclusively (when provided)
995 *
996 * Determine whether an LV is active locally or in a cluster.
997 * In addition to the return code which indicates whether or
998 * not the LV is active somewhere, two other values are set
999 * to yield more information about the status of the activation:
1000 * return locally exclusively status
1001 * ====== ======= =========== ======
1002 * 0 0 0 not active
1003 * 1 0 0 active remotely
1004 * 1 0 1 exclusive remotely
1005 * 1 1 0 active locally and possibly remotely
1006 * 1 1 1 exclusive locally (or local && !cluster)
1007 * The VG lock must be held to call this function.
1008 *
1009 * Returns: 0 or 1
1010 */
1011 static int _lv_is_active(const struct logical_volume *lv,
1012 int *locally, int *exclusive)
1013 {
1014 int r, l, e; /* remote, local, and exclusive */
1015
1016 r = l = e = 0;
1017
1018 if (_lv_active(lv->vg->cmd, lv))
1019 l = 1;
1020
1021 if (!vg_is_clustered(lv->vg)) {
1022 if (l)
1023 e = 1; /* exclusive by definition */
1024 goto out;
1025 }
1026
1027 /* Active locally, and the caller doesn't care about exclusive */
1028 if (l && !exclusive)
1029 goto out;
1030
1031 if ((r = remote_lock_held(lv->lvid.s, &e)) >= 0)
1032 goto out;
1033
1034 /*
1035 * If lock query is not supported (due to interfacing with old
1036 * code), then we cannot evaluate exclusivity properly.
1037 *
1038 * Old users of this function will never be affected by this,
1039 * since they are only concerned about active vs. not active.
1040 * New users of this function who specifically ask for 'exclusive'
1041 * will be given an error message.
1042 */
1043 log_error("Unable to determine exclusivity of %s", lv->name);
1044
1045 e = 0;
1046
1047 /*
1048 * We used to attempt activate_lv_excl_local(lv->vg->cmd, lv) here,
1049 * but it's unreliable.
1050 */
1051
1052 out:
1053 if (locally)
1054 *locally = l;
1055 if (exclusive)
1056 *exclusive = e;
1057
1058 log_very_verbose("%s/%s is %sactive%s%s",
1059 lv->vg->name, lv->name,
1060 (r || l) ? "" : "not ",
1061 (exclusive && e) ? " exclusive" : "",
1062 e ? (l ? " locally" : " remotely") : "");
1063
1064 return r || l;
1065 }
1066
1067 int lv_is_active(const struct logical_volume *lv)
1068 {
1069 return _lv_is_active(lv, NULL, NULL);
1070 }
1071
1072 int lv_is_active_but_not_locally(const struct logical_volume *lv)
1073 {
1074 int l;
1075 return _lv_is_active(lv, &l, NULL) && !l;
1076 }
1077
1078 int lv_is_active_exclusive(const struct logical_volume *lv)
1079 {
1080 int e;
1081
1082 return _lv_is_active(lv, NULL, &e) && e;
1083 }
1084
1085 int lv_is_active_exclusive_locally(const struct logical_volume *lv)
1086 {
1087 int l, e;
1088
1089 return _lv_is_active(lv, &l, &e) && l && e;
1090 }
1091
1092 int lv_is_active_exclusive_remotely(const struct logical_volume *lv)
1093 {
1094 int l, e;
1095
1096 return _lv_is_active(lv, &l, &e) && !l && e;
1097 }
1098
1099 #ifdef DMEVENTD
1100 static struct dm_event_handler *_create_dm_event_handler(struct cmd_context *cmd, const char *dmuuid, const char *dso,
1101 const int timeout, enum dm_event_mask mask)
1102 {
1103 struct dm_event_handler *dmevh;
1104
1105 if (!(dmevh = dm_event_handler_create()))
1106 return_NULL;
1107
1108 if (dm_event_handler_set_dmeventd_path(dmevh, find_config_tree_str(cmd, "dmeventd/executable", NULL)))
1109 goto_bad;
1110
1111 if (dm_event_handler_set_dso(dmevh, dso))
1112 goto_bad;
1113
1114 if (dm_event_handler_set_uuid(dmevh, dmuuid))
1115 goto_bad;
1116
1117 dm_event_handler_set_timeout(dmevh, timeout);
1118 dm_event_handler_set_event_mask(dmevh, mask);
1119
1120 return dmevh;
1121
1122 bad:
1123 dm_event_handler_destroy(dmevh);
1124 return NULL;
1125 }
1126
1127 char *get_monitor_dso_path(struct cmd_context *cmd, const char *libpath)
1128 {
1129 char *path;
1130
1131 if (!(path = dm_pool_alloc(cmd->mem, PATH_MAX))) {
1132 log_error("Failed to allocate dmeventd library path.");
1133 return NULL;
1134 }
1135
1136 get_shared_library_path(cmd, libpath, path, PATH_MAX);
1137
1138 return path;
1139 }
1140
1141 static char *_build_target_uuid(struct cmd_context *cmd, struct logical_volume *lv)
1142 {
1143 const char *layer;
1144
1145 if (lv_is_thin_pool(lv))
1146 layer = "tpool"; /* Monitor "tpool" for the "thin pool". */
1147 else if (lv_is_origin(lv))
1148 layer = "real"; /* Monitor "real" for "snapshot-origin". */
1149 else
1150 layer = NULL;
1151
1152 return build_dm_uuid(cmd->mem, lv->lvid.s, layer);
1153 }
1154
1155 int target_registered_with_dmeventd(struct cmd_context *cmd, const char *dso,
1156 struct logical_volume *lv, int *pending)
1157 {
1158 char *uuid;
1159 enum dm_event_mask evmask = 0;
1160 struct dm_event_handler *dmevh;
1161 *pending = 0;
1162
1163 if (!dso)
1164 return_0;
1165
1166 if (!(uuid = _build_target_uuid(cmd, lv)))
1167 return_0;
1168
1169 if (!(dmevh = _create_dm_event_handler(cmd, uuid, dso, 0, DM_EVENT_ALL_ERRORS)))
1170 return_0;
1171
1172 if (dm_event_get_registered_device(dmevh, 0)) {
1173 dm_event_handler_destroy(dmevh);
1174 return 0;
1175 }
1176
1177 evmask = dm_event_handler_get_event_mask(dmevh);
1178 if (evmask & DM_EVENT_REGISTRATION_PENDING) {
1179 *pending = 1;
1180 evmask &= ~DM_EVENT_REGISTRATION_PENDING;
1181 }
1182
1183 dm_event_handler_destroy(dmevh);
1184
1185 return evmask;
1186 }
1187
1188 int target_register_events(struct cmd_context *cmd, const char *dso, struct logical_volume *lv,
1189 int evmask __attribute__((unused)), int set, int timeout)
1190 {
1191 char *uuid;
1192 struct dm_event_handler *dmevh;
1193 int r;
1194
1195 if (!dso)
1196 return_0;
1197
1198 /* We always monitor the "real" device, never the "snapshot-origin" itself. */
1199 if (!(uuid = _build_target_uuid(cmd, lv)))
1200 return_0;
1201
1202 if (!(dmevh = _create_dm_event_handler(cmd, uuid, dso, timeout,
1203 DM_EVENT_ALL_ERRORS | (timeout ? DM_EVENT_TIMEOUT : 0))))
1204 return_0;
1205
1206 r = set ? dm_event_register_handler(dmevh) : dm_event_unregister_handler(dmevh);
1207
1208 dm_event_handler_destroy(dmevh);
1209
1210 if (!r)
1211 return_0;
1212
1213 log_info("%s %s for events", set ? "Monitored" : "Unmonitored", uuid);
1214
1215 return 1;
1216 }
1217
1218 #endif
1219
1220 /*
1221 * Returns 0 if an attempt to (un)monitor the device failed.
1222 * Returns 1 otherwise.
1223 */
1224 int monitor_dev_for_events(struct cmd_context *cmd, struct logical_volume *lv,
1225 const struct lv_activate_opts *laopts, int monitor)
1226 {
1227 #ifdef DMEVENTD
1228 int i, pending = 0, monitored;
1229 int r = 1;
1230 struct dm_list *tmp, *snh, *snht;
1231 struct lv_segment *seg;
1232 struct lv_segment *log_seg;
1233 int (*monitor_fn) (struct lv_segment *s, int e);
1234 uint32_t s;
1235 static const struct lv_activate_opts zlaopts = { 0 };
1236 static const struct lv_activate_opts thinopts = { .skip_in_use = 1 };
1237 struct lvinfo info;
1238
1239 if (!laopts)
1240 laopts = &zlaopts;
1241
1242 /* skip dmeventd code altogether */
1243 if (dmeventd_monitor_mode() == DMEVENTD_MONITOR_IGNORE)
1244 return 1;
1245
1246 /*
1247 * Nothing to do if dmeventd configured not to be used.
1248 */
1249 if (monitor && !dmeventd_monitor_mode())
1250 return 1;
1251
1252 /*
1253 * Allow to unmonitor thin pool via explicit pool unmonitor
1254 * or unmonitor before the last thin pool user deactivation
1255 * Skip unmonitor, if invoked via unmonitor of thin volume
1256 * and there is another thin pool user (open_count > 1)
1257 */
1258 if (laopts->skip_in_use && lv_info(lv->vg->cmd, lv, 1, &info, 1, 0) &&
1259 (info.open_count != 1)) {
1260 log_debug("Skipping unmonitor of opened %s (open:%d)",
1261 lv->name, info.open_count);
1262 return 1;
1263 }
1264
1265 /*
1266 * In case of a snapshot device, we monitor lv->snapshot->lv,
1267 * not the actual LV itself.
1268 */
1269 if (lv_is_cow(lv) && (laopts->no_merging || !lv_is_merging_cow(lv)))
1270 return monitor_dev_for_events(cmd, lv->snapshot->lv, NULL, monitor);
1271
1272 /*
1273 * In case this LV is a snapshot origin, we instead monitor
1274 * each of its respective snapshots. The origin itself may
1275 * also need to be monitored if it is a mirror, for example.
1276 */
1277 if (!laopts->origin_only && lv_is_origin(lv))
1278 dm_list_iterate_safe(snh, snht, &lv->snapshot_segs)
1279 if (!monitor_dev_for_events(cmd, dm_list_struct_base(snh,
1280 struct lv_segment, origin_list)->cow, NULL, monitor))
1281 r = 0;
1282
1283 /*
1284 * If the volume is mirrored and its log is also mirrored, monitor
1285 * the log volume as well.
1286 */
1287 if ((seg = first_seg(lv)) != NULL && seg->log_lv != NULL &&
1288 (log_seg = first_seg(seg->log_lv)) != NULL &&
1289 seg_is_mirrored(log_seg))
1290 if (!monitor_dev_for_events(cmd, seg->log_lv, NULL, monitor))
1291 r = 0;
1292
1293 dm_list_iterate(tmp, &lv->segments) {
1294 seg = dm_list_item(tmp, struct lv_segment);
1295
1296 /* Recurse for AREA_LV */
1297 for (s = 0; s < seg->area_count; s++) {
1298 if (seg_type(seg, s) != AREA_LV)
1299 continue;
1300 if (!monitor_dev_for_events(cmd, seg_lv(seg, s), NULL,
1301 monitor)) {
1302 log_error("Failed to %smonitor %s",
1303 monitor ? "" : "un",
1304 seg_lv(seg, s)->name);
1305 r = 0;
1306 }
1307 }
1308
1309 /*
1310 * If requested unmonitoring of thin volume, request test
1311 * if there is no other thin pool user
1312 *
1313 * FIXME: code here looks like _lv_postorder()
1314 */
1315 if (seg->pool_lv &&
1316 !monitor_dev_for_events(cmd, seg->pool_lv,
1317 (!monitor) ? &thinopts : NULL, monitor))
1318 r = 0;
1319
1320 if (seg->metadata_lv &&
1321 !monitor_dev_for_events(cmd, seg->metadata_lv, NULL, monitor))
1322 r = 0;
1323
1324 if (!seg_monitored(seg) || (seg->status & PVMOVE))
1325 continue;
1326
1327 monitor_fn = NULL;
1328
1329 /* Check monitoring status */
1330 if (seg->segtype->ops->target_monitored)
1331 monitored = seg->segtype->ops->target_monitored(seg, &pending);
1332 else
1333 continue; /* segtype doesn't support registration */
1334
1335 /*
1336 * FIXME: We should really try again if pending
1337 */
1338 monitored = (pending) ? 0 : monitored;
1339
1340 if (monitor) {
1341 if (monitored)
1342 log_verbose("%s/%s already monitored.", lv->vg->name, lv->name);
1343 else if (seg->segtype->ops->target_monitor_events)
1344 monitor_fn = seg->segtype->ops->target_monitor_events;
1345 } else {
1346 if (!monitored)
1347 log_verbose("%s/%s already not monitored.", lv->vg->name, lv->name);
1348 else if (seg->segtype->ops->target_unmonitor_events)
1349 monitor_fn = seg->segtype->ops->target_unmonitor_events;
1350 }
1351
1352 /* Do [un]monitor */
1353 if (!monitor_fn)
1354 continue;
1355
1356 log_verbose("%sonitoring %s/%s%s", monitor ? "M" : "Not m", lv->vg->name, lv->name,
1357 test_mode() ? " [Test mode: skipping this]" : "");
1358
1359 /* FIXME Test mode should really continue a bit further. */
1360 if (test_mode())
1361 continue;
1362
1363 /* FIXME specify events */
1364 if (!monitor_fn(seg, 0)) {
1365 log_error("%s/%s: %s segment monitoring function failed.",
1366 lv->vg->name, lv->name, seg->segtype->name);
1367 return 0;
1368 }
1369
1370 /* Check [un]monitor results */
1371 /* Try a couple times if pending, but not forever... */
1372 for (i = 0; i < 10; i++) {
1373 pending = 0;
1374 monitored = seg->segtype->ops->target_monitored(seg, &pending);
1375 if (pending ||
1376 (!monitored && monitor) ||
1377 (monitored && !monitor))
1378 log_very_verbose("%s/%s %smonitoring still pending: waiting...",
1379 lv->vg->name, lv->name, monitor ? "" : "un");
1380 else
1381 break;
1382 sleep(1);
1383 }
1384
1385 if (r)
1386 r = (monitored && monitor) || (!monitored && !monitor);
1387 }
1388
1389 return r;
1390 #else
1391 return 1;
1392 #endif
1393 }
1394
1395 struct detached_lv_data {
1396 struct logical_volume *lv_pre;
1397 struct lv_activate_opts *laopts;
1398 int *flush_required;
1399 };
1400
1401 static int _preload_detached_lv(struct cmd_context *cmd, struct logical_volume *lv, void *data)
1402 {
1403 struct detached_lv_data *detached = data;
1404 struct lv_list *lvl_pre;
1405
1406 if ((lvl_pre = find_lv_in_vg(detached->lv_pre->vg, lv->name))) {
1407 if (lv_is_visible(lvl_pre->lv) && lv_is_active(lv) && (!lv_is_cow(lv) || !lv_is_cow(lvl_pre->lv)) &&
1408 !_lv_preload(lvl_pre->lv, detached->laopts, detached->flush_required))
1409 return_0;
1410 }
1411
1412 return 1;
1413 }
1414
1415 static int _lv_suspend(struct cmd_context *cmd, const char *lvid_s,
1416 struct lv_activate_opts *laopts, int error_if_not_suspended)
1417 {
1418 struct logical_volume *lv = NULL, *lv_pre = NULL, *pvmove_lv = NULL;
1419 struct lv_list *lvl_pre;
1420 struct seg_list *sl;
1421 struct lv_segment *snap_seg;
1422 struct lvinfo info;
1423 int r = 0, lockfs = 0, flush_required = 0;
1424 struct detached_lv_data detached;
1425
1426 if (!activation())
1427 return 1;
1428
1429 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1430 goto_out;
1431
1432 /* Use precommitted metadata if present */
1433 if (!(lv_pre = lv_from_lvid(cmd, lvid_s, 1)))
1434 goto_out;
1435
1436 /* Ignore origin_only unless LV is origin in both old and new metadata */
1437 if (!lv_is_thin_volume(lv) && !(lv_is_origin(lv) && lv_is_origin(lv_pre)))
1438 laopts->origin_only = 0;
1439
1440 if (test_mode()) {
1441 _skip("Suspending %s%s.", lv->name, laopts->origin_only ? " origin without snapshots" : "");
1442 r = 1;
1443 goto out;
1444 }
1445
1446 if (!lv_info(cmd, lv, laopts->origin_only, &info, 0, 0))
1447 goto_out;
1448
1449 if (!info.exists || info.suspended) {
1450 if (!error_if_not_suspended) {
1451 r = 1;
1452 if (info.suspended)
1453 critical_section_inc(cmd, "already suspended");
1454 }
1455 goto out;
1456 }
1457
1458 if (!lv_read_replicator_vgs(lv))
1459 goto_out;
1460
1461 lv_calculate_readahead(lv, NULL);
1462
1463 /*
1464 * Preload devices for the LV.
1465 * If the PVMOVE LV is being removed, it's only present in the old
1466 * metadata and not the new, so we must explicitly add the new
1467 * tables for all the changed LVs here, as the relationships
1468 * are not found by walking the new metadata.
1469 */
1470 if (!(lv_pre->status & LOCKED) &&
1471 (lv->status & LOCKED) &&
1472 (pvmove_lv = find_pvmove_lv_in_lv(lv))) {
1473 /* Preload all the LVs above the PVMOVE LV */
1474 dm_list_iterate_items(sl, &pvmove_lv->segs_using_this_lv) {
1475 if (!(lvl_pre = find_lv_in_vg(lv_pre->vg, sl->seg->lv->name))) {
1476 log_error(INTERNAL_ERROR "LV %s missing from preload metadata", sl->seg->lv->name);
1477 goto out;
1478 }
1479 if (!_lv_preload(lvl_pre->lv, laopts, &flush_required))
1480 goto_out;
1481 }
1482 /* Now preload the PVMOVE LV itself */
1483 if (!(lvl_pre = find_lv_in_vg(lv_pre->vg, pvmove_lv->name))) {
1484 log_error(INTERNAL_ERROR "LV %s missing from preload metadata", pvmove_lv->name);
1485 goto out;
1486 }
1487 if (!_lv_preload(lvl_pre->lv, laopts, &flush_required))
1488 goto_out;
1489 } else {
1490 if (!_lv_preload(lv_pre, laopts, &flush_required))
1491 /* FIXME Revert preloading */
1492 goto_out;
1493
1494 /*
1495 * Search for existing LVs that have become detached and preload them.
1496 */
1497 detached.lv_pre = lv_pre;
1498 detached.laopts = laopts;
1499 detached.flush_required = &flush_required;
1500
1501 if (!for_each_sub_lv(cmd, lv, &_preload_detached_lv, &detached))
1502 goto_out;
1503
1504 /*
1505 * Preload any snapshots that are being removed.
1506 */
1507 if (!laopts->origin_only && lv_is_origin(lv)) {
1508 dm_list_iterate_items_gen(snap_seg, &lv->snapshot_segs, origin_list) {
1509 if (!(lvl_pre = find_lv_in_vg_by_lvid(lv_pre->vg, &snap_seg->cow->lvid))) {
1510 log_error(INTERNAL_ERROR "LV %s (%s) missing from preload metadata",
1511 snap_seg->cow->name, snap_seg->cow->lvid.id[1].uuid);
1512 goto out;
1513 }
1514 if (!lv_is_cow(lvl_pre->lv) &&
1515 !_lv_preload(lvl_pre->lv, laopts, &flush_required))
1516 goto_out;
1517 }
1518 }
1519 }
1520
1521 if (!monitor_dev_for_events(cmd, lv, laopts, 0))
1522 /* FIXME Consider aborting here */
1523 stack;
1524
1525 critical_section_inc(cmd, "suspending");
1526 if (pvmove_lv)
1527 critical_section_inc(cmd, "suspending pvmove LV");
1528
1529 if (!laopts->origin_only &&
1530 (lv_is_origin(lv_pre) || lv_is_cow(lv_pre)))
1531 lockfs = 1;
1532
1533 if (laopts->origin_only && lv_is_thin_volume(lv) && lv_is_thin_volume(lv_pre))
1534 lockfs = 1;
1535
1536 /*
1537 * Suspending an LV directly above a PVMOVE LV also
1538 * suspends other LVs using that same PVMOVE LV.
1539 * FIXME Remove this and delay the 'clear node' until
1540 * after the code knows whether there's a different
1541 * inactive table to load or not instead so lv_suspend
1542 * can be called separately for each LV safely.
1543 */
1544 if ((lv_pre->vg->status & PRECOMMITTED) &&
1545 (lv_pre->status & LOCKED) && find_pvmove_lv_in_lv(lv_pre)) {
1546 if (!_lv_suspend_lv(lv_pre, laopts, lockfs, flush_required)) {
1547 critical_section_dec(cmd, "failed precommitted suspend");
1548 if (pvmove_lv)
1549 critical_section_dec(cmd, "failed precommitted suspend (pvmove)");
1550 goto_out;
1551 }
1552 } else {
1553 /* Normal suspend */
1554 if (!_lv_suspend_lv(lv, laopts, lockfs, flush_required)) {
1555 critical_section_dec(cmd, "failed suspend");
1556 if (pvmove_lv)
1557 critical_section_dec(cmd, "failed suspend (pvmove)");
1558 goto_out;
1559 }
1560 }
1561
1562 r = 1;
1563 out:
1564 if (lv_pre)
1565 release_vg(lv_pre->vg);
1566 if (lv) {
1567 lv_release_replicator_vgs(lv);
1568 release_vg(lv->vg);
1569 }
1570
1571 return r;
1572 }
1573
1574 /*
1575 * In a cluster, set exclusive to indicate that only one node is using the
1576 * device. Any preloaded tables may then use non-clustered targets.
1577 *
1578 * Returns success if the device is not active
1579 */
1580 int lv_suspend_if_active(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only, unsigned exclusive)
1581 {
1582 struct lv_activate_opts laopts = {
1583 .origin_only = origin_only,
1584 .exclusive = exclusive
1585 };
1586
1587 return _lv_suspend(cmd, lvid_s, &laopts, 0);
1588 }
1589
1590 /* No longer used */
1591 /***********
1592 int lv_suspend(struct cmd_context *cmd, const char *lvid_s)
1593 {
1594 return _lv_suspend(cmd, lvid_s, 1);
1595 }
1596 ***********/
1597
1598 static int _lv_resume(struct cmd_context *cmd, const char *lvid_s,
1599 struct lv_activate_opts *laopts, int error_if_not_active)
1600 {
1601 struct logical_volume *lv;
1602 struct lvinfo info;
1603 int r = 0;
1604 int messages_only = 0;
1605
1606 if (!activation())
1607 return 1;
1608
1609 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1610 goto_out;
1611
1612 if (lv_is_thin_pool(lv) && laopts->origin_only)
1613 messages_only = 1;
1614
1615 if (!lv_is_origin(lv) && !lv_is_thin_volume(lv))
1616 laopts->origin_only = 0;
1617
1618 if (test_mode()) {
1619 _skip("Resuming %s%s%s.", lv->name, laopts->origin_only ? " without snapshots" : "",
1620 laopts->revert ? " (reverting)" : "");
1621 r = 1;
1622 goto out;
1623 }
1624
1625 log_debug("Resuming LV %s/%s%s%s%s.", lv->vg->name, lv->name,
1626 error_if_not_active ? "" : " if active",
1627 laopts->origin_only ? " without snapshots" : "",
1628 laopts->revert ? " (reverting)" : "");
1629
1630 if (!lv_info(cmd, lv, laopts->origin_only, &info, 0, 0))
1631 goto_out;
1632
1633 if (!info.exists || !(info.suspended || messages_only)) {
1634 if (error_if_not_active)
1635 goto_out;
1636 r = 1;
1637 if (!info.suspended)
1638 critical_section_dec(cmd, "already resumed");
1639 goto out;
1640 }
1641
1642 laopts->read_only = _passes_readonly_filter(cmd, lv);
1643
1644 if (!_lv_activate_lv(lv, laopts))
1645 goto_out;
1646
1647 critical_section_dec(cmd, "resumed");
1648
1649 if (!monitor_dev_for_events(cmd, lv, laopts, 1))
1650 stack;
1651
1652 r = 1;
1653 out:
1654 if (lv)
1655 release_vg(lv->vg);
1656
1657 return r;
1658 }
1659
1660 /*
1661 * In a cluster, set exclusive to indicate that only one node is using the
1662 * device. Any tables loaded may then use non-clustered targets.
1663 *
1664 * @origin_only
1665 * @exclusive This parameter only has an affect in cluster-context.
1666 * It forces local target type to be used (instead of
1667 * cluster-aware type).
1668 * Returns success if the device is not active
1669 */
1670 int lv_resume_if_active(struct cmd_context *cmd, const char *lvid_s,
1671 unsigned origin_only, unsigned exclusive,
1672 unsigned revert)
1673 {
1674 struct lv_activate_opts laopts = {
1675 .origin_only = origin_only,
1676 .exclusive = exclusive,
1677 .revert = revert
1678 };
1679
1680 return _lv_resume(cmd, lvid_s, &laopts, 0);
1681 }
1682
1683 int lv_resume(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only)
1684 {
1685 struct lv_activate_opts laopts = { .origin_only = origin_only, };
1686
1687 return _lv_resume(cmd, lvid_s, &laopts, 1);
1688 }
1689
1690 static int _lv_has_open_snapshots(struct logical_volume *lv)
1691 {
1692 struct lv_segment *snap_seg;
1693 struct lvinfo info;
1694 int r = 0;
1695
1696 dm_list_iterate_items_gen(snap_seg, &lv->snapshot_segs, origin_list) {
1697 if (!lv_info(lv->vg->cmd, snap_seg->cow, 0, &info, 1, 0)) {
1698 r = 1;
1699 continue;
1700 }
1701
1702 if (info.exists && info.open_count) {
1703 log_error("LV %s/%s has open snapshot %s: "
1704 "not deactivating", lv->vg->name, lv->name,
1705 snap_seg->cow->name);
1706 r = 1;
1707 }
1708 }
1709
1710 return r;
1711 }
1712
1713 int lv_deactivate(struct cmd_context *cmd, const char *lvid_s)
1714 {
1715 struct logical_volume *lv;
1716 struct lvinfo info;
1717 int r = 0;
1718
1719 if (!activation())
1720 return 1;
1721
1722 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1723 goto out;
1724
1725 if (test_mode()) {
1726 _skip("Deactivating '%s'.", lv->name);
1727 r = 1;
1728 goto out;
1729 }
1730
1731 log_debug("Deactivating %s/%s.", lv->vg->name, lv->name);
1732
1733 if (!lv_info(cmd, lv, 0, &info, 1, 0))
1734 goto_out;
1735
1736 if (!info.exists) {
1737 r = 1;
1738 goto out;
1739 }
1740
1741 if (lv_is_visible(lv)) {
1742 if (!lv_check_not_in_use(cmd, lv, &info))
1743 goto_out;
1744
1745 if (lv_is_origin(lv) && _lv_has_open_snapshots(lv))
1746 goto_out;
1747 }
1748
1749 if (!lv_read_replicator_vgs(lv))
1750 goto_out;
1751
1752 lv_calculate_readahead(lv, NULL);
1753
1754 if (!monitor_dev_for_events(cmd, lv, NULL, 0))
1755 stack;
1756
1757 critical_section_inc(cmd, "deactivating");
1758 r = _lv_deactivate(lv);
1759 critical_section_dec(cmd, "deactivated");
1760
1761 if (!lv_info(cmd, lv, 0, &info, 0, 0) || info.exists)
1762 r = 0;
1763 out:
1764 if (lv) {
1765 lv_release_replicator_vgs(lv);
1766 release_vg(lv->vg);
1767 }
1768
1769 return r;
1770 }
1771
1772 /* Test if LV passes filter */
1773 int lv_activation_filter(struct cmd_context *cmd, const char *lvid_s,
1774 int *activate_lv)
1775 {
1776 struct logical_volume *lv;
1777 int r = 0;
1778
1779 if (!activation()) {
1780 *activate_lv = 1;
1781 return 1;
1782 }
1783
1784 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1785 goto out;
1786
1787 if (!_passes_activation_filter(cmd, lv)) {
1788 log_verbose("Not activating %s/%s since it does not pass "
1789 "activation filter.", lv->vg->name, lv->name);
1790 *activate_lv = 0;
1791 } else
1792 *activate_lv = 1;
1793 r = 1;
1794 out:
1795 if (lv)
1796 release_vg(lv->vg);
1797
1798 return r;
1799 }
1800
1801 static int _lv_activate(struct cmd_context *cmd, const char *lvid_s,
1802 struct lv_activate_opts *laopts, int filter)
1803 {
1804 struct logical_volume *lv;
1805 struct lvinfo info;
1806 int r = 0;
1807
1808 if (!activation())
1809 return 1;
1810
1811 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1812 goto out;
1813
1814 if (filter && !_passes_activation_filter(cmd, lv)) {
1815 log_error("Not activating %s/%s since it does not pass "
1816 "activation filter.", lv->vg->name, lv->name);
1817 goto out;
1818 }
1819
1820 if ((!lv->vg->cmd->partial_activation) && (lv->status & PARTIAL_LV)) {
1821 log_error("Refusing activation of partial LV %s. Use --partial to override.",
1822 lv->name);
1823 goto_out;
1824 }
1825
1826 if (lv_has_unknown_segments(lv)) {
1827 log_error("Refusing activation of LV %s containing "
1828 "an unrecognised segment.", lv->name);
1829 goto_out;
1830 }
1831
1832 if (test_mode()) {
1833 _skip("Activating '%s'.", lv->name);
1834 r = 1;
1835 goto out;
1836 }
1837
1838 if (filter)
1839 laopts->read_only = _passes_readonly_filter(cmd, lv);
1840
1841 log_debug("Activating %s/%s%s%s.", lv->vg->name, lv->name,
1842 laopts->exclusive ? " exclusively" : "",
1843 laopts->read_only ? " read-only" : "");
1844
1845 if (!lv_info(cmd, lv, 0, &info, 0, 0))
1846 goto_out;
1847
1848 /*
1849 * Nothing to do?
1850 */
1851 if (info.exists && !info.suspended && info.live_table &&
1852 (info.read_only == read_only_lv(lv, laopts))) {
1853 r = 1;
1854 goto out;
1855 }
1856
1857 if (!lv_read_replicator_vgs(lv))
1858 goto_out;
1859
1860 lv_calculate_readahead(lv, NULL);
1861
1862 critical_section_inc(cmd, "activating");
1863 if (!(r = _lv_activate_lv(lv, laopts)))
1864 stack;
1865 critical_section_dec(cmd, "activated");
1866
1867 if (r && !monitor_dev_for_events(cmd, lv, laopts, 1))
1868 stack;
1869
1870 out:
1871 if (lv) {
1872 lv_release_replicator_vgs(lv);
1873 release_vg(lv->vg);
1874 }
1875
1876 return r;
1877 }
1878
1879 /* Activate LV */
1880 int lv_activate(struct cmd_context *cmd, const char *lvid_s, int exclusive)
1881 {
1882 struct lv_activate_opts laopts = { .exclusive = exclusive };
1883
1884 if (!_lv_activate(cmd, lvid_s, &laopts, 0))
1885 return_0;
1886
1887 return 1;
1888 }
1889
1890 /* Activate LV only if it passes filter */
1891 int lv_activate_with_filter(struct cmd_context *cmd, const char *lvid_s, int exclusive)
1892 {
1893 struct lv_activate_opts laopts = { .exclusive = exclusive };
1894
1895 if (!_lv_activate(cmd, lvid_s, &laopts, 1))
1896 return_0;
1897
1898 return 1;
1899 }
1900
1901 int lv_mknodes(struct cmd_context *cmd, const struct logical_volume *lv)
1902 {
1903 int r = 1;
1904
1905 if (!lv) {
1906 r = dm_mknodes(NULL);
1907 fs_unlock();
1908 return r;
1909 }
1910
1911 if (!activation())
1912 return 1;
1913
1914 r = dev_manager_mknodes(lv);
1915
1916 fs_unlock();
1917
1918 return r;
1919 }
1920
1921 /*
1922 * Does PV use VG somewhere in its construction?
1923 * Returns 1 on failure.
1924 */
1925 int pv_uses_vg(struct physical_volume *pv,
1926 struct volume_group *vg)
1927 {
1928 if (!activation() || !pv->dev)
1929 return 0;
1930
1931 if (!dm_is_dm_major(MAJOR(pv->dev->dev)))
1932 return 0;
1933
1934 return dev_manager_device_uses_vg(pv->dev, vg);
1935 }
1936
1937 void activation_release(void)
1938 {
1939 dev_manager_release();
1940 }
1941
1942 void activation_exit(void)
1943 {
1944 dev_manager_exit();
1945 }
1946 #endif
This page took 0.129052 seconds and 6 git commands to generate.