]> sourceware.org Git - lvm2.git/blob - lib/activate/activate.c
activation: log target version present
[lvm2.git] / lib / activate / activate.c
1 /*
2 * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
3 * Copyright (C) 2004-2012 Red Hat, Inc. All rights reserved.
4 *
5 * This file is part of LVM2.
6 *
7 * This copyrighted material is made available to anyone wishing to use,
8 * modify, copy, or redistribute it subject to the terms and conditions
9 * of the GNU Lesser General Public License v.2.1.
10 *
11 * You should have received a copy of the GNU Lesser General Public License
12 * along with this program; if not, write to the Free Software Foundation,
13 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 */
15
16 #include "lib.h"
17 #include "metadata.h"
18 #include "activate.h"
19 #include "memlock.h"
20 #include "display.h"
21 #include "fs.h"
22 #include "lvm-exec.h"
23 #include "lvm-file.h"
24 #include "lvm-string.h"
25 #include "toolcontext.h"
26 #include "dev_manager.h"
27 #include "str_list.h"
28 #include "config.h"
29 #include "filter.h"
30 #include "segtype.h"
31 #include "sharedlib.h"
32
33 #include <limits.h>
34 #include <fcntl.h>
35 #include <unistd.h>
36
37 #define _skip(fmt, args...) log_very_verbose("Skipping: " fmt , ## args)
38
39 int lvm1_present(struct cmd_context *cmd)
40 {
41 static char path[PATH_MAX];
42
43 if (dm_snprintf(path, sizeof(path), "%s/lvm/global", cmd->proc_dir)
44 < 0) {
45 log_error("LVM1 proc global snprintf failed");
46 return 0;
47 }
48
49 if (path_exists(path))
50 return 1;
51 else
52 return 0;
53 }
54
55 int list_segment_modules(struct dm_pool *mem, const struct lv_segment *seg,
56 struct dm_list *modules)
57 {
58 unsigned int s;
59 struct lv_segment *seg2, *snap_seg;
60 struct dm_list *snh;
61
62 if (seg->segtype->ops->modules_needed &&
63 !seg->segtype->ops->modules_needed(mem, seg, modules)) {
64 log_error("module string allocation failed");
65 return 0;
66 }
67
68 if (lv_is_origin(seg->lv))
69 dm_list_iterate(snh, &seg->lv->snapshot_segs)
70 if (!list_lv_modules(mem,
71 dm_list_struct_base(snh,
72 struct lv_segment,
73 origin_list)->cow,
74 modules))
75 return_0;
76
77 if (lv_is_cow(seg->lv)) {
78 snap_seg = find_cow(seg->lv);
79 if (snap_seg->segtype->ops->modules_needed &&
80 !snap_seg->segtype->ops->modules_needed(mem, snap_seg,
81 modules)) {
82 log_error("snap_seg module string allocation failed");
83 return 0;
84 }
85 }
86
87 for (s = 0; s < seg->area_count; s++) {
88 switch (seg_type(seg, s)) {
89 case AREA_LV:
90 seg2 = find_seg_by_le(seg_lv(seg, s), seg_le(seg, s));
91 if (seg2 && !list_segment_modules(mem, seg2, modules))
92 return_0;
93 break;
94 case AREA_PV:
95 case AREA_UNASSIGNED:
96 ;
97 }
98 }
99
100 return 1;
101 }
102
103 int list_lv_modules(struct dm_pool *mem, const struct logical_volume *lv,
104 struct dm_list *modules)
105 {
106 struct lv_segment *seg;
107
108 dm_list_iterate_items(seg, &lv->segments)
109 if (!list_segment_modules(mem, seg, modules))
110 return_0;
111
112 return 1;
113 }
114
115 #ifndef DEVMAPPER_SUPPORT
116 void set_activation(int act)
117 {
118 static int warned = 0;
119
120 if (warned || !act)
121 return;
122
123 log_error("Compiled without libdevmapper support. "
124 "Can't enable activation.");
125
126 warned = 1;
127 }
128 int activation(void)
129 {
130 return 0;
131 }
132 int library_version(char *version, size_t size)
133 {
134 return 0;
135 }
136 int driver_version(char *version, size_t size)
137 {
138 return 0;
139 }
140 int target_version(const char *target_name, uint32_t *maj,
141 uint32_t *min, uint32_t *patchlevel)
142 {
143 return 0;
144 }
145 int target_present(struct cmd_context *cmd, const char *target_name,
146 int use_modprobe)
147 {
148 return 0;
149 }
150 int lvm_dm_prefix_check(int major, int minor, const char *prefix)
151 {
152 return 0;
153 }
154 int lv_info(struct cmd_context *cmd, const struct logical_volume *lv, int use_layer,
155 struct lvinfo *info, int with_open_count, int with_read_ahead)
156 {
157 return 0;
158 }
159 int lv_info_by_lvid(struct cmd_context *cmd, const char *lvid_s, int use_layer,
160 struct lvinfo *info, int with_open_count, int with_read_ahead)
161 {
162 return 0;
163 }
164 int lv_check_not_in_use(struct cmd_context *cmd __attribute__((unused)),
165 struct logical_volume *lv, struct lvinfo *info)
166 {
167 return 0;
168 }
169 int lv_snapshot_percent(const struct logical_volume *lv, percent_t *percent)
170 {
171 return 0;
172 }
173 int lv_mirror_percent(struct cmd_context *cmd, const struct logical_volume *lv,
174 int wait, percent_t *percent, uint32_t *event_nr)
175 {
176 return 0;
177 }
178 int lv_raid_percent(const struct logical_volume *lv, percent_t *percent)
179 {
180 return 0;
181 }
182 int lv_thin_pool_percent(const struct logical_volume *lv, int metadata,
183 percent_t *percent)
184 {
185 return 0;
186 }
187 int lv_thin_percent(const struct logical_volume *lv, int mapped,
188 percent_t *percent)
189 {
190 return 0;
191 }
192 int lv_thin_pool_transaction_id(const struct logical_volume *lv,
193 uint64_t *transaction_id)
194 {
195 return 0;
196 }
197 int lvs_in_vg_activated(const struct volume_group *vg)
198 {
199 return 0;
200 }
201 int lvs_in_vg_opened(const struct volume_group *vg)
202 {
203 return 0;
204 }
205 /******
206 int lv_suspend(struct cmd_context *cmd, const char *lvid_s)
207 {
208 return 1;
209 }
210 *******/
211 int lv_suspend_if_active(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only, unsigned exclusive)
212 {
213 return 1;
214 }
215 int lv_resume(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only)
216 {
217 return 1;
218 }
219 int lv_resume_if_active(struct cmd_context *cmd, const char *lvid_s,
220 unsigned origin_only, unsigned exclusive, unsigned revert)
221 {
222 return 1;
223 }
224 int lv_deactivate(struct cmd_context *cmd, const char *lvid_s)
225 {
226 return 1;
227 }
228 int lv_activation_filter(struct cmd_context *cmd, const char *lvid_s,
229 int *activate_lv)
230 {
231 return 1;
232 }
233 int lv_activate(struct cmd_context *cmd, const char *lvid_s, int exclusive)
234 {
235 return 1;
236 }
237 int lv_activate_with_filter(struct cmd_context *cmd, const char *lvid_s, int exclusive)
238 {
239 return 1;
240 }
241 int lv_mknodes(struct cmd_context *cmd, const struct logical_volume *lv)
242 {
243 return 1;
244 }
245 int pv_uses_vg(struct physical_volume *pv,
246 struct volume_group *vg)
247 {
248 return 0;
249 }
250 void activation_release(void)
251 {
252 }
253 void activation_exit(void)
254 {
255 }
256
257 int lv_is_active(const struct logical_volume *lv)
258 {
259 return 0;
260 }
261 int lv_is_active_but_not_locally(const struct logical_volume *lv)
262 {
263 return 0;
264 }
265 int lv_is_active_exclusive(const struct logical_volume *lv)
266 {
267 return 0;
268 }
269 int lv_is_active_exclusive_locally(const struct logical_volume *lv)
270 {
271 return 0;
272 }
273 int lv_is_active_exclusive_remotely(const struct logical_volume *lv)
274 {
275 return 0;
276 }
277
278 int lv_check_transient(struct logical_volume *lv)
279 {
280 return 1;
281 }
282 int monitor_dev_for_events(struct cmd_context *cmd, struct logical_volume *lv,
283 const struct lv_activate_opts *laopts, int monitor)
284 {
285 return 1;
286 }
287 /* fs.c */
288 void fs_unlock(void)
289 {
290 }
291 /* dev_manager.c */
292 #include "targets.h"
293 int add_areas_line(struct dev_manager *dm, struct lv_segment *seg,
294 struct dm_tree_node *node, uint32_t start_area,
295 uint32_t areas)
296 {
297 return 0;
298 }
299 int device_is_usable(struct device *dev)
300 {
301 return 0;
302 }
303 int lv_has_target_type(struct dm_pool *mem, struct logical_volume *lv,
304 const char *layer, const char *target_type)
305 {
306 return 0;
307 }
308 #else /* DEVMAPPER_SUPPORT */
309
310 static int _activation = 1;
311
312 void set_activation(int act)
313 {
314 if (act == _activation)
315 return;
316
317 _activation = act;
318 if (_activation)
319 log_verbose("Activation enabled. Device-mapper kernel "
320 "driver will be used.");
321 else
322 log_warn("WARNING: Activation disabled. No device-mapper "
323 "interaction will be attempted.");
324 }
325
326 int activation(void)
327 {
328 return _activation;
329 }
330
331 static int _lv_passes_volumes_filter(struct cmd_context *cmd, struct logical_volume *lv,
332 const struct dm_config_node *cn, const char *config_path)
333 {
334 const struct dm_config_value *cv;
335 const char *str;
336 static char path[PATH_MAX];
337
338 log_verbose("%s configuration setting defined: "
339 "Checking the list to match %s/%s",
340 config_path, lv->vg->name, lv->name);
341
342 for (cv = cn->v; cv; cv = cv->next) {
343 if (cv->type != DM_CFG_STRING) {
344 log_error("Ignoring invalid string in config file %s",
345 config_path);
346 continue;
347 }
348 str = cv->v.str;
349 if (!*str) {
350 log_error("Ignoring empty string in config file %s",
351 config_path);
352 continue;
353 }
354
355
356 /* Tag? */
357 if (*str == '@') {
358 str++;
359 if (!*str) {
360 log_error("Ignoring empty tag in config file "
361 "%s", config_path);
362 continue;
363 }
364 /* If any host tag matches any LV or VG tag, activate */
365 if (!strcmp(str, "*")) {
366 if (str_list_match_list(&cmd->tags, &lv->tags, NULL)
367 || str_list_match_list(&cmd->tags,
368 &lv->vg->tags, NULL))
369 return 1;
370 else
371 continue;
372 }
373 /* If supplied tag matches LV or VG tag, activate */
374 if (str_list_match_item(&lv->tags, str) ||
375 str_list_match_item(&lv->vg->tags, str))
376 return 1;
377 else
378 continue;
379 }
380 if (!strchr(str, '/')) {
381 /* vgname supplied */
382 if (!strcmp(str, lv->vg->name))
383 return 1;
384 else
385 continue;
386 }
387 /* vgname/lvname */
388 if (dm_snprintf(path, sizeof(path), "%s/%s", lv->vg->name,
389 lv->name) < 0) {
390 log_error("dm_snprintf error from %s/%s", lv->vg->name,
391 lv->name);
392 continue;
393 }
394 if (!strcmp(path, str))
395 return 1;
396 }
397
398 log_verbose("No item supplied in %s configuration setting "
399 "matches %s/%s", config_path, lv->vg->name, lv->name);
400
401 return 0;
402 }
403
404 static int _passes_activation_filter(struct cmd_context *cmd,
405 struct logical_volume *lv)
406 {
407 const struct dm_config_node *cn;
408
409 if (!(cn = find_config_tree_node(cmd, "activation/volume_list"))) {
410 log_verbose("activation/volume_list configuration setting "
411 "not defined: Checking only host tags for %s/%s",
412 lv->vg->name, lv->name);
413
414 /* If no host tags defined, activate */
415 if (dm_list_empty(&cmd->tags))
416 return 1;
417
418 /* If any host tag matches any LV or VG tag, activate */
419 if (str_list_match_list(&cmd->tags, &lv->tags, NULL) ||
420 str_list_match_list(&cmd->tags, &lv->vg->tags, NULL))
421 return 1;
422
423 log_verbose("No host tag matches %s/%s",
424 lv->vg->name, lv->name);
425
426 /* Don't activate */
427 return 0;
428 }
429
430 return _lv_passes_volumes_filter(cmd, lv, cn, "activation/volume_list");
431 }
432
433 static int _passes_readonly_filter(struct cmd_context *cmd,
434 struct logical_volume *lv)
435 {
436 const struct dm_config_node *cn;
437
438 if (!(cn = find_config_tree_node(cmd, "activation/read_only_volume_list")))
439 return 0;
440
441 return _lv_passes_volumes_filter(cmd, lv, cn, "activation/read_only_volume_list");
442 }
443
444
445 int lv_passes_auto_activation_filter(struct cmd_context *cmd, struct logical_volume *lv)
446 {
447 const struct dm_config_node *cn;
448
449 if (!(cn = find_config_tree_node(cmd, "activation/auto_activation_volume_list"))) {
450 log_verbose("activation/auto_activation_volume_list configuration setting "
451 "not defined: All logical volumes will be auto-activated.");
452 return 1;
453 }
454
455 return _lv_passes_volumes_filter(cmd, lv, cn, "activation/auto_activation_volume_list");
456 }
457
458 int library_version(char *version, size_t size)
459 {
460 if (!activation())
461 return 0;
462
463 return dm_get_library_version(version, size);
464 }
465
466 int driver_version(char *version, size_t size)
467 {
468 if (!activation())
469 return 0;
470
471 log_very_verbose("Getting driver version");
472
473 return dm_driver_version(version, size);
474 }
475
476 int target_version(const char *target_name, uint32_t *maj,
477 uint32_t *min, uint32_t *patchlevel)
478 {
479 int r = 0;
480 struct dm_task *dmt;
481 struct dm_versions *target, *last_target;
482
483 log_very_verbose("Getting target version for %s", target_name);
484 if (!(dmt = dm_task_create(DM_DEVICE_LIST_VERSIONS)))
485 return_0;
486
487 if (activation_checks() && !dm_task_enable_checks(dmt))
488 goto_out;
489
490 if (!dm_task_run(dmt)) {
491 log_debug("Failed to get %s target version", target_name);
492 /* Assume this was because LIST_VERSIONS isn't supported */
493 *maj = 0;
494 *min = 0;
495 *patchlevel = 0;
496 r = 1;
497 goto out;
498 }
499
500 target = dm_task_get_versions(dmt);
501
502 do {
503 last_target = target;
504
505 if (!strcmp(target_name, target->name)) {
506 r = 1;
507 *maj = target->version[0];
508 *min = target->version[1];
509 *patchlevel = target->version[2];
510 goto out;
511 }
512
513 target = (struct dm_versions *)((char *) target + target->next);
514 } while (last_target != target);
515
516 out:
517 if (r)
518 log_very_verbose("Found %s target "
519 "v%" PRIu32 ".%" PRIu32 ".%" PRIu32 ".",
520 target_name, *maj, *min, *patchlevel);
521
522 dm_task_destroy(dmt);
523
524 return r;
525 }
526
527 int lvm_dm_prefix_check(int major, int minor, const char *prefix)
528 {
529 struct dm_task *dmt;
530 const char *uuid;
531 int r;
532
533 if (!(dmt = dm_task_create(DM_DEVICE_STATUS)))
534 return_0;
535
536 if (!dm_task_set_minor(dmt, minor) ||
537 !dm_task_set_major(dmt, major) ||
538 !dm_task_run(dmt) ||
539 !(uuid = dm_task_get_uuid(dmt))) {
540 dm_task_destroy(dmt);
541 return 0;
542 }
543
544 r = strncasecmp(uuid, prefix, strlen(prefix));
545 dm_task_destroy(dmt);
546
547 return r ? 0 : 1;
548 }
549
550 int module_present(struct cmd_context *cmd, const char *target_name)
551 {
552 int ret = 0;
553 #ifdef MODPROBE_CMD
554 char module[128];
555 const char *argv[3];
556
557 if (dm_snprintf(module, sizeof(module), "dm-%s", target_name) < 0) {
558 log_error("module_present module name too long: %s",
559 target_name);
560 return 0;
561 }
562
563 argv[0] = MODPROBE_CMD;
564 argv[1] = module;
565 argv[2] = NULL;
566
567 ret = exec_cmd(cmd, argv, NULL, 0);
568 #endif
569 return ret;
570 }
571
572 int target_present(struct cmd_context *cmd, const char *target_name,
573 int use_modprobe)
574 {
575 uint32_t maj, min, patchlevel;
576
577 if (!activation())
578 return 0;
579
580 #ifdef MODPROBE_CMD
581 if (use_modprobe) {
582 if (target_version(target_name, &maj, &min, &patchlevel))
583 return 1;
584
585 if (!module_present(cmd, target_name))
586 return_0;
587 }
588 #endif
589
590 return target_version(target_name, &maj, &min, &patchlevel);
591 }
592
593 /*
594 * Returns 1 if info structure populated, else 0 on failure.
595 */
596 int lv_info(struct cmd_context *cmd, const struct logical_volume *lv, int use_layer,
597 struct lvinfo *info, int with_open_count, int with_read_ahead)
598 {
599 struct dm_info dminfo;
600 const char *layer;
601
602 if (!activation())
603 return 0;
604 /*
605 * If open_count info is requested and we have to be sure our own udev
606 * transactions are finished
607 * For non-clustered locking type we are only interested for non-delete operation
608 * in progress - as only those could lead to opened files
609 */
610 if (with_open_count) {
611 if (locking_is_clustered())
612 sync_local_dev_names(cmd); /* Wait to have udev in sync */
613 else if (fs_has_non_delete_ops())
614 fs_unlock(); /* For non clustered - wait if there are non-delete ops */
615 }
616
617 if (use_layer && lv_is_thin_pool(lv))
618 layer = "tpool";
619 else if (use_layer && lv_is_origin(lv))
620 layer = "real";
621 else
622 layer = NULL;
623
624 if (!dev_manager_info(lv->vg->cmd->mem, lv, layer, with_open_count,
625 with_read_ahead, &dminfo, &info->read_ahead))
626 return_0;
627
628 info->exists = dminfo.exists;
629 info->suspended = dminfo.suspended;
630 info->open_count = dminfo.open_count;
631 info->major = dminfo.major;
632 info->minor = dminfo.minor;
633 info->read_only = dminfo.read_only;
634 info->live_table = dminfo.live_table;
635 info->inactive_table = dminfo.inactive_table;
636
637 return 1;
638 }
639
640 int lv_info_by_lvid(struct cmd_context *cmd, const char *lvid_s, int use_layer,
641 struct lvinfo *info, int with_open_count, int with_read_ahead)
642 {
643 int r;
644 struct logical_volume *lv;
645
646 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
647 return 0;
648
649 r = lv_info(cmd, lv, use_layer, info, with_open_count, with_read_ahead);
650 release_vg(lv->vg);
651
652 return r;
653 }
654
655 int lv_check_not_in_use(struct cmd_context *cmd __attribute__((unused)),
656 struct logical_volume *lv, struct lvinfo *info)
657 {
658 if (!info->exists)
659 return 1;
660
661 /* If sysfs is not used, use open_count information only. */
662 if (!*dm_sysfs_dir()) {
663 if (info->open_count) {
664 log_error("Logical volume %s/%s in use.",
665 lv->vg->name, lv->name);
666 return 0;
667 }
668
669 return 1;
670 }
671
672 if (dm_device_has_holders(info->major, info->minor)) {
673 log_error("Logical volume %s/%s is used by another device.",
674 lv->vg->name, lv->name);
675 return 0;
676 }
677
678 if (dm_device_has_mounted_fs(info->major, info->minor)) {
679 log_error("Logical volume %s/%s contains a filesystem in use.",
680 lv->vg->name, lv->name);
681 return 0;
682 }
683
684 return 1;
685 }
686
687 /*
688 * Returns 1 if percent set, else 0 on failure.
689 */
690 int lv_check_transient(struct logical_volume *lv)
691 {
692 int r;
693 struct dev_manager *dm;
694
695 if (!activation())
696 return 0;
697
698 log_debug("Checking transient status for LV %s/%s", lv->vg->name, lv->name);
699
700 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
701 return_0;
702
703 if (!(r = dev_manager_transient(dm, lv)))
704 stack;
705
706 dev_manager_destroy(dm);
707
708 return r;
709 }
710
711 /*
712 * Returns 1 if percent set, else 0 on failure.
713 */
714 int lv_snapshot_percent(const struct logical_volume *lv, percent_t *percent)
715 {
716 int r;
717 struct dev_manager *dm;
718
719 if (!activation())
720 return 0;
721
722 log_debug("Checking snapshot percent for LV %s/%s", lv->vg->name, lv->name);
723
724 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
725 return_0;
726
727 if (!(r = dev_manager_snapshot_percent(dm, lv, percent)))
728 stack;
729
730 dev_manager_destroy(dm);
731
732 return r;
733 }
734
735 /* FIXME Merge with snapshot_percent */
736 int lv_mirror_percent(struct cmd_context *cmd, const struct logical_volume *lv,
737 int wait, percent_t *percent, uint32_t *event_nr)
738 {
739 int r;
740 struct dev_manager *dm;
741 struct lvinfo info;
742
743 /* If mirrored LV is temporarily shrinked to 1 area (= linear),
744 * it should be considered in-sync. */
745 if (dm_list_size(&lv->segments) == 1 && first_seg(lv)->area_count == 1) {
746 *percent = PERCENT_100;
747 return 1;
748 }
749
750 if (!activation())
751 return 0;
752
753 log_debug("Checking mirror percent for LV %s/%s", lv->vg->name, lv->name);
754
755 if (!lv_info(cmd, lv, 0, &info, 0, 0))
756 return_0;
757
758 if (!info.exists)
759 return 0;
760
761 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
762 return_0;
763
764 if (!(r = dev_manager_mirror_percent(dm, lv, wait, percent, event_nr)))
765 stack;
766
767 dev_manager_destroy(dm);
768
769 return r;
770 }
771
772 int lv_raid_percent(const struct logical_volume *lv, percent_t *percent)
773 {
774 return lv_mirror_percent(lv->vg->cmd, lv, 0, percent, NULL);
775 }
776
777 /*
778 * Returns data or metadata percent usage, depends on metadata 0/1.
779 * Returns 1 if percent set, else 0 on failure.
780 */
781 int lv_thin_pool_percent(const struct logical_volume *lv, int metadata,
782 percent_t *percent)
783 {
784 int r;
785 struct dev_manager *dm;
786
787 if (!activation())
788 return 0;
789
790 log_debug("Checking thin %sdata percent for LV %s/%s",
791 (metadata) ? "meta" : "", lv->vg->name, lv->name);
792
793 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
794 return_0;
795
796 if (!(r = dev_manager_thin_pool_percent(dm, lv, metadata, percent)))
797 stack;
798
799 dev_manager_destroy(dm);
800
801 return r;
802 }
803
804 /*
805 * Returns 1 if percent set, else 0 on failure.
806 */
807 int lv_thin_percent(const struct logical_volume *lv,
808 int mapped, percent_t *percent)
809 {
810 int r;
811 struct dev_manager *dm;
812
813 if (!activation())
814 return 0;
815
816 log_debug("Checking thin percent for LV %s/%s",
817 lv->vg->name, lv->name);
818
819 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
820 return_0;
821
822 if (!(r = dev_manager_thin_percent(dm, lv, mapped, percent)))
823 stack;
824
825 dev_manager_destroy(dm);
826
827 return r;
828 }
829
830 /*
831 * Returns 1 if transaction_id set, else 0 on failure.
832 */
833 int lv_thin_pool_transaction_id(const struct logical_volume *lv,
834 uint64_t *transaction_id)
835 {
836 int r;
837 struct dev_manager *dm;
838 struct dm_status_thin_pool *status;
839
840 if (!activation())
841 return 0;
842
843 log_debug("Checking thin percent for LV %s/%s",
844 lv->vg->name, lv->name);
845
846 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
847 return_0;
848
849 if (!(r = dev_manager_thin_pool_status(dm, lv, &status)))
850 stack;
851 else
852 *transaction_id = status->transaction_id;
853
854 dev_manager_destroy(dm);
855
856 return r;
857 }
858
859 static int _lv_active(struct cmd_context *cmd, const struct logical_volume *lv)
860 {
861 struct lvinfo info;
862
863 if (!lv_info(cmd, lv, 0, &info, 0, 0)) {
864 stack;
865 return -1;
866 }
867
868 return info.exists;
869 }
870
871 static int _lv_open_count(struct cmd_context *cmd, struct logical_volume *lv)
872 {
873 struct lvinfo info;
874
875 if (!lv_info(cmd, lv, 0, &info, 1, 0)) {
876 stack;
877 return -1;
878 }
879
880 return info.open_count;
881 }
882
883 static int _lv_activate_lv(struct logical_volume *lv, struct lv_activate_opts *laopts)
884 {
885 int r;
886 struct dev_manager *dm;
887
888 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, (lv->status & PVMOVE) ? 0 : 1)))
889 return_0;
890
891 if (!(r = dev_manager_activate(dm, lv, laopts)))
892 stack;
893
894 dev_manager_destroy(dm);
895 return r;
896 }
897
898 static int _lv_preload(struct logical_volume *lv, struct lv_activate_opts *laopts,
899 int *flush_required)
900 {
901 int r = 0;
902 struct dev_manager *dm;
903 int old_readonly = laopts->read_only;
904
905 laopts->read_only = _passes_readonly_filter(lv->vg->cmd, lv);
906
907 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, (lv->status & PVMOVE) ? 0 : 1)))
908 goto_out;
909
910 if (!(r = dev_manager_preload(dm, lv, laopts, flush_required)))
911 stack;
912
913 dev_manager_destroy(dm);
914
915 laopts->read_only = old_readonly;
916 out:
917 return r;
918 }
919
920 static int _lv_deactivate(struct logical_volume *lv)
921 {
922 int r;
923 struct dev_manager *dm;
924
925 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
926 return_0;
927
928 if (!(r = dev_manager_deactivate(dm, lv)))
929 stack;
930
931 dev_manager_destroy(dm);
932 return r;
933 }
934
935 static int _lv_suspend_lv(struct logical_volume *lv, struct lv_activate_opts *laopts,
936 int lockfs, int flush_required)
937 {
938 int r;
939 struct dev_manager *dm;
940
941 laopts->read_only = _passes_readonly_filter(lv->vg->cmd, lv);
942
943 /*
944 * When we are asked to manipulate (normally suspend/resume) the PVMOVE
945 * device directly, we don't want to touch the devices that use it.
946 */
947 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, (lv->status & PVMOVE) ? 0 : 1)))
948 return_0;
949
950 if (!(r = dev_manager_suspend(dm, lv, laopts, lockfs, flush_required)))
951 stack;
952
953 dev_manager_destroy(dm);
954 return r;
955 }
956
957 /*
958 * These two functions return the number of visible LVs in the state,
959 * or -1 on error. FIXME Check this.
960 */
961 int lvs_in_vg_activated(const struct volume_group *vg)
962 {
963 struct lv_list *lvl;
964 int count = 0;
965
966 if (!activation())
967 return 0;
968
969 dm_list_iterate_items(lvl, &vg->lvs)
970 if (lv_is_visible(lvl->lv))
971 count += (_lv_active(vg->cmd, lvl->lv) == 1);
972
973 log_debug("Counted %d active LVs in VG %s", count, vg->name);
974
975 return count;
976 }
977
978 int lvs_in_vg_opened(const struct volume_group *vg)
979 {
980 const struct lv_list *lvl;
981 int count = 0;
982
983 if (!activation())
984 return 0;
985
986 dm_list_iterate_items(lvl, &vg->lvs)
987 if (lv_is_visible(lvl->lv))
988 count += (_lv_open_count(vg->cmd, lvl->lv) > 0);
989
990 log_debug("Counted %d open LVs in VG %s", count, vg->name);
991
992 return count;
993 }
994
995 /*
996 * _lv_is_active
997 * @lv: logical volume being queried
998 * @locally: set if active locally (when provided)
999 * @exclusive: set if active exclusively (when provided)
1000 *
1001 * Determine whether an LV is active locally or in a cluster.
1002 * In addition to the return code which indicates whether or
1003 * not the LV is active somewhere, two other values are set
1004 * to yield more information about the status of the activation:
1005 * return locally exclusively status
1006 * ====== ======= =========== ======
1007 * 0 0 0 not active
1008 * 1 0 0 active remotely
1009 * 1 0 1 exclusive remotely
1010 * 1 1 0 active locally and possibly remotely
1011 * 1 1 1 exclusive locally (or local && !cluster)
1012 * The VG lock must be held to call this function.
1013 *
1014 * Returns: 0 or 1
1015 */
1016 static int _lv_is_active(const struct logical_volume *lv,
1017 int *locally, int *exclusive)
1018 {
1019 int r, l, e; /* remote, local, and exclusive */
1020
1021 r = l = e = 0;
1022
1023 if (_lv_active(lv->vg->cmd, lv))
1024 l = 1;
1025
1026 if (!vg_is_clustered(lv->vg)) {
1027 if (l)
1028 e = 1; /* exclusive by definition */
1029 goto out;
1030 }
1031
1032 /* Active locally, and the caller doesn't care about exclusive */
1033 if (l && !exclusive)
1034 goto out;
1035
1036 if ((r = remote_lock_held(lv->lvid.s, &e)) >= 0)
1037 goto out;
1038
1039 /*
1040 * If lock query is not supported (due to interfacing with old
1041 * code), then we cannot evaluate exclusivity properly.
1042 *
1043 * Old users of this function will never be affected by this,
1044 * since they are only concerned about active vs. not active.
1045 * New users of this function who specifically ask for 'exclusive'
1046 * will be given an error message.
1047 */
1048 log_error("Unable to determine exclusivity of %s", lv->name);
1049
1050 e = 0;
1051
1052 /*
1053 * We used to attempt activate_lv_excl_local(lv->vg->cmd, lv) here,
1054 * but it's unreliable.
1055 */
1056
1057 out:
1058 if (locally)
1059 *locally = l;
1060 if (exclusive)
1061 *exclusive = e;
1062
1063 log_very_verbose("%s/%s is %sactive%s%s",
1064 lv->vg->name, lv->name,
1065 (r || l) ? "" : "not ",
1066 (exclusive && e) ? " exclusive" : "",
1067 e ? (l ? " locally" : " remotely") : "");
1068
1069 return r || l;
1070 }
1071
1072 int lv_is_active(const struct logical_volume *lv)
1073 {
1074 return _lv_is_active(lv, NULL, NULL);
1075 }
1076
1077 int lv_is_active_but_not_locally(const struct logical_volume *lv)
1078 {
1079 int l;
1080 return _lv_is_active(lv, &l, NULL) && !l;
1081 }
1082
1083 int lv_is_active_exclusive(const struct logical_volume *lv)
1084 {
1085 int e;
1086
1087 return _lv_is_active(lv, NULL, &e) && e;
1088 }
1089
1090 int lv_is_active_exclusive_locally(const struct logical_volume *lv)
1091 {
1092 int l, e;
1093
1094 return _lv_is_active(lv, &l, &e) && l && e;
1095 }
1096
1097 int lv_is_active_exclusive_remotely(const struct logical_volume *lv)
1098 {
1099 int l, e;
1100
1101 return _lv_is_active(lv, &l, &e) && !l && e;
1102 }
1103
1104 #ifdef DMEVENTD
1105 static struct dm_event_handler *_create_dm_event_handler(struct cmd_context *cmd, const char *dmuuid, const char *dso,
1106 const int timeout, enum dm_event_mask mask)
1107 {
1108 struct dm_event_handler *dmevh;
1109
1110 if (!(dmevh = dm_event_handler_create()))
1111 return_NULL;
1112
1113 if (dm_event_handler_set_dmeventd_path(dmevh, find_config_tree_str(cmd, "dmeventd/executable", NULL)))
1114 goto_bad;
1115
1116 if (dm_event_handler_set_dso(dmevh, dso))
1117 goto_bad;
1118
1119 if (dm_event_handler_set_uuid(dmevh, dmuuid))
1120 goto_bad;
1121
1122 dm_event_handler_set_timeout(dmevh, timeout);
1123 dm_event_handler_set_event_mask(dmevh, mask);
1124
1125 return dmevh;
1126
1127 bad:
1128 dm_event_handler_destroy(dmevh);
1129 return NULL;
1130 }
1131
1132 char *get_monitor_dso_path(struct cmd_context *cmd, const char *libpath)
1133 {
1134 char *path;
1135
1136 if (!(path = dm_pool_alloc(cmd->mem, PATH_MAX))) {
1137 log_error("Failed to allocate dmeventd library path.");
1138 return NULL;
1139 }
1140
1141 get_shared_library_path(cmd, libpath, path, PATH_MAX);
1142
1143 return path;
1144 }
1145
1146 static char *_build_target_uuid(struct cmd_context *cmd, struct logical_volume *lv)
1147 {
1148 const char *layer;
1149
1150 if (lv_is_thin_pool(lv))
1151 layer = "tpool"; /* Monitor "tpool" for the "thin pool". */
1152 else if (lv_is_origin(lv))
1153 layer = "real"; /* Monitor "real" for "snapshot-origin". */
1154 else
1155 layer = NULL;
1156
1157 return build_dm_uuid(cmd->mem, lv->lvid.s, layer);
1158 }
1159
1160 int target_registered_with_dmeventd(struct cmd_context *cmd, const char *dso,
1161 struct logical_volume *lv, int *pending)
1162 {
1163 char *uuid;
1164 enum dm_event_mask evmask = 0;
1165 struct dm_event_handler *dmevh;
1166 *pending = 0;
1167
1168 if (!dso)
1169 return_0;
1170
1171 if (!(uuid = _build_target_uuid(cmd, lv)))
1172 return_0;
1173
1174 if (!(dmevh = _create_dm_event_handler(cmd, uuid, dso, 0, DM_EVENT_ALL_ERRORS)))
1175 return_0;
1176
1177 if (dm_event_get_registered_device(dmevh, 0)) {
1178 dm_event_handler_destroy(dmevh);
1179 return 0;
1180 }
1181
1182 evmask = dm_event_handler_get_event_mask(dmevh);
1183 if (evmask & DM_EVENT_REGISTRATION_PENDING) {
1184 *pending = 1;
1185 evmask &= ~DM_EVENT_REGISTRATION_PENDING;
1186 }
1187
1188 dm_event_handler_destroy(dmevh);
1189
1190 return evmask;
1191 }
1192
1193 int target_register_events(struct cmd_context *cmd, const char *dso, struct logical_volume *lv,
1194 int evmask __attribute__((unused)), int set, int timeout)
1195 {
1196 char *uuid;
1197 struct dm_event_handler *dmevh;
1198 int r;
1199
1200 if (!dso)
1201 return_0;
1202
1203 /* We always monitor the "real" device, never the "snapshot-origin" itself. */
1204 if (!(uuid = _build_target_uuid(cmd, lv)))
1205 return_0;
1206
1207 if (!(dmevh = _create_dm_event_handler(cmd, uuid, dso, timeout,
1208 DM_EVENT_ALL_ERRORS | (timeout ? DM_EVENT_TIMEOUT : 0))))
1209 return_0;
1210
1211 r = set ? dm_event_register_handler(dmevh) : dm_event_unregister_handler(dmevh);
1212
1213 dm_event_handler_destroy(dmevh);
1214
1215 if (!r)
1216 return_0;
1217
1218 log_info("%s %s for events", set ? "Monitored" : "Unmonitored", uuid);
1219
1220 return 1;
1221 }
1222
1223 #endif
1224
1225 /*
1226 * Returns 0 if an attempt to (un)monitor the device failed.
1227 * Returns 1 otherwise.
1228 */
1229 int monitor_dev_for_events(struct cmd_context *cmd, struct logical_volume *lv,
1230 const struct lv_activate_opts *laopts, int monitor)
1231 {
1232 #ifdef DMEVENTD
1233 int i, pending = 0, monitored;
1234 int r = 1;
1235 struct dm_list *tmp, *snh, *snht;
1236 struct lv_segment *seg;
1237 struct lv_segment *log_seg;
1238 int (*monitor_fn) (struct lv_segment *s, int e);
1239 uint32_t s;
1240 static const struct lv_activate_opts zlaopts = { 0 };
1241 static const struct lv_activate_opts thinopts = { .skip_in_use = 1 };
1242 struct lvinfo info;
1243
1244 if (!laopts)
1245 laopts = &zlaopts;
1246
1247 /* skip dmeventd code altogether */
1248 if (dmeventd_monitor_mode() == DMEVENTD_MONITOR_IGNORE)
1249 return 1;
1250
1251 /*
1252 * Nothing to do if dmeventd configured not to be used.
1253 */
1254 if (monitor && !dmeventd_monitor_mode())
1255 return 1;
1256
1257 /*
1258 * Allow to unmonitor thin pool via explicit pool unmonitor
1259 * or unmonitor before the last thin pool user deactivation
1260 * Skip unmonitor, if invoked via unmonitor of thin volume
1261 * and there is another thin pool user (open_count > 1)
1262 */
1263 if (laopts->skip_in_use && lv_info(lv->vg->cmd, lv, 1, &info, 1, 0) &&
1264 (info.open_count != 1)) {
1265 log_debug("Skipping unmonitor of opened %s (open:%d)",
1266 lv->name, info.open_count);
1267 return 1;
1268 }
1269
1270 /*
1271 * In case of a snapshot device, we monitor lv->snapshot->lv,
1272 * not the actual LV itself.
1273 */
1274 if (lv_is_cow(lv) && (laopts->no_merging || !lv_is_merging_cow(lv)))
1275 return monitor_dev_for_events(cmd, lv->snapshot->lv, NULL, monitor);
1276
1277 /*
1278 * In case this LV is a snapshot origin, we instead monitor
1279 * each of its respective snapshots. The origin itself may
1280 * also need to be monitored if it is a mirror, for example.
1281 */
1282 if (!laopts->origin_only && lv_is_origin(lv))
1283 dm_list_iterate_safe(snh, snht, &lv->snapshot_segs)
1284 if (!monitor_dev_for_events(cmd, dm_list_struct_base(snh,
1285 struct lv_segment, origin_list)->cow, NULL, monitor))
1286 r = 0;
1287
1288 /*
1289 * If the volume is mirrored and its log is also mirrored, monitor
1290 * the log volume as well.
1291 */
1292 if ((seg = first_seg(lv)) != NULL && seg->log_lv != NULL &&
1293 (log_seg = first_seg(seg->log_lv)) != NULL &&
1294 seg_is_mirrored(log_seg))
1295 if (!monitor_dev_for_events(cmd, seg->log_lv, NULL, monitor))
1296 r = 0;
1297
1298 dm_list_iterate(tmp, &lv->segments) {
1299 seg = dm_list_item(tmp, struct lv_segment);
1300
1301 /* Recurse for AREA_LV */
1302 for (s = 0; s < seg->area_count; s++) {
1303 if (seg_type(seg, s) != AREA_LV)
1304 continue;
1305 if (!monitor_dev_for_events(cmd, seg_lv(seg, s), NULL,
1306 monitor)) {
1307 log_error("Failed to %smonitor %s",
1308 monitor ? "" : "un",
1309 seg_lv(seg, s)->name);
1310 r = 0;
1311 }
1312 }
1313
1314 /*
1315 * If requested unmonitoring of thin volume, request test
1316 * if there is no other thin pool user
1317 *
1318 * FIXME: code here looks like _lv_postorder()
1319 */
1320 if (seg->pool_lv &&
1321 !monitor_dev_for_events(cmd, seg->pool_lv,
1322 (!monitor) ? &thinopts : NULL, monitor))
1323 r = 0;
1324
1325 if (seg->metadata_lv &&
1326 !monitor_dev_for_events(cmd, seg->metadata_lv, NULL, monitor))
1327 r = 0;
1328
1329 if (!seg_monitored(seg) || (seg->status & PVMOVE))
1330 continue;
1331
1332 monitor_fn = NULL;
1333
1334 /* Check monitoring status */
1335 if (seg->segtype->ops->target_monitored)
1336 monitored = seg->segtype->ops->target_monitored(seg, &pending);
1337 else
1338 continue; /* segtype doesn't support registration */
1339
1340 /*
1341 * FIXME: We should really try again if pending
1342 */
1343 monitored = (pending) ? 0 : monitored;
1344
1345 if (monitor) {
1346 if (monitored)
1347 log_verbose("%s/%s already monitored.", lv->vg->name, lv->name);
1348 else if (seg->segtype->ops->target_monitor_events)
1349 monitor_fn = seg->segtype->ops->target_monitor_events;
1350 } else {
1351 if (!monitored)
1352 log_verbose("%s/%s already not monitored.", lv->vg->name, lv->name);
1353 else if (seg->segtype->ops->target_unmonitor_events)
1354 monitor_fn = seg->segtype->ops->target_unmonitor_events;
1355 }
1356
1357 /* Do [un]monitor */
1358 if (!monitor_fn)
1359 continue;
1360
1361 log_verbose("%sonitoring %s/%s%s", monitor ? "M" : "Not m", lv->vg->name, lv->name,
1362 test_mode() ? " [Test mode: skipping this]" : "");
1363
1364 /* FIXME Test mode should really continue a bit further. */
1365 if (test_mode())
1366 continue;
1367
1368 /* FIXME specify events */
1369 if (!monitor_fn(seg, 0)) {
1370 log_error("%s/%s: %s segment monitoring function failed.",
1371 lv->vg->name, lv->name, seg->segtype->name);
1372 return 0;
1373 }
1374
1375 /* Check [un]monitor results */
1376 /* Try a couple times if pending, but not forever... */
1377 for (i = 0; i < 10; i++) {
1378 pending = 0;
1379 monitored = seg->segtype->ops->target_monitored(seg, &pending);
1380 if (pending ||
1381 (!monitored && monitor) ||
1382 (monitored && !monitor))
1383 log_very_verbose("%s/%s %smonitoring still pending: waiting...",
1384 lv->vg->name, lv->name, monitor ? "" : "un");
1385 else
1386 break;
1387 sleep(1);
1388 }
1389
1390 if (r)
1391 r = (monitored && monitor) || (!monitored && !monitor);
1392 }
1393
1394 return r;
1395 #else
1396 return 1;
1397 #endif
1398 }
1399
1400 struct detached_lv_data {
1401 struct logical_volume *lv_pre;
1402 struct lv_activate_opts *laopts;
1403 int *flush_required;
1404 };
1405
1406 static int _preload_detached_lv(struct cmd_context *cmd, struct logical_volume *lv, void *data)
1407 {
1408 struct detached_lv_data *detached = data;
1409 struct lv_list *lvl_pre;
1410
1411 if ((lvl_pre = find_lv_in_vg(detached->lv_pre->vg, lv->name))) {
1412 if (lv_is_visible(lvl_pre->lv) && lv_is_active(lv) && (!lv_is_cow(lv) || !lv_is_cow(lvl_pre->lv)) &&
1413 !_lv_preload(lvl_pre->lv, detached->laopts, detached->flush_required))
1414 return_0;
1415 }
1416
1417 return 1;
1418 }
1419
1420 static int _lv_suspend(struct cmd_context *cmd, const char *lvid_s,
1421 struct lv_activate_opts *laopts, int error_if_not_suspended)
1422 {
1423 struct logical_volume *lv = NULL, *lv_pre = NULL, *pvmove_lv = NULL;
1424 struct lv_list *lvl_pre;
1425 struct seg_list *sl;
1426 struct lv_segment *snap_seg;
1427 struct lvinfo info;
1428 int r = 0, lockfs = 0, flush_required = 0;
1429 struct detached_lv_data detached;
1430
1431 if (!activation())
1432 return 1;
1433
1434 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1435 goto_out;
1436
1437 /* Use precommitted metadata if present */
1438 if (!(lv_pre = lv_from_lvid(cmd, lvid_s, 1)))
1439 goto_out;
1440
1441 /* Ignore origin_only unless LV is origin in both old and new metadata */
1442 if (!lv_is_thin_volume(lv) && !(lv_is_origin(lv) && lv_is_origin(lv_pre)))
1443 laopts->origin_only = 0;
1444
1445 if (test_mode()) {
1446 _skip("Suspending %s%s.", lv->name, laopts->origin_only ? " origin without snapshots" : "");
1447 r = 1;
1448 goto out;
1449 }
1450
1451 if (!lv_info(cmd, lv, laopts->origin_only, &info, 0, 0))
1452 goto_out;
1453
1454 if (!info.exists || info.suspended) {
1455 if (!error_if_not_suspended) {
1456 r = 1;
1457 if (info.suspended)
1458 critical_section_inc(cmd, "already suspended");
1459 }
1460 goto out;
1461 }
1462
1463 if (!lv_read_replicator_vgs(lv))
1464 goto_out;
1465
1466 lv_calculate_readahead(lv, NULL);
1467
1468 /*
1469 * Preload devices for the LV.
1470 * If the PVMOVE LV is being removed, it's only present in the old
1471 * metadata and not the new, so we must explicitly add the new
1472 * tables for all the changed LVs here, as the relationships
1473 * are not found by walking the new metadata.
1474 */
1475 if (!(lv_pre->status & LOCKED) &&
1476 (lv->status & LOCKED) &&
1477 (pvmove_lv = find_pvmove_lv_in_lv(lv))) {
1478 /* Preload all the LVs above the PVMOVE LV */
1479 dm_list_iterate_items(sl, &pvmove_lv->segs_using_this_lv) {
1480 if (!(lvl_pre = find_lv_in_vg(lv_pre->vg, sl->seg->lv->name))) {
1481 log_error(INTERNAL_ERROR "LV %s missing from preload metadata", sl->seg->lv->name);
1482 goto out;
1483 }
1484 if (!_lv_preload(lvl_pre->lv, laopts, &flush_required))
1485 goto_out;
1486 }
1487 /* Now preload the PVMOVE LV itself */
1488 if (!(lvl_pre = find_lv_in_vg(lv_pre->vg, pvmove_lv->name))) {
1489 log_error(INTERNAL_ERROR "LV %s missing from preload metadata", pvmove_lv->name);
1490 goto out;
1491 }
1492 if (!_lv_preload(lvl_pre->lv, laopts, &flush_required))
1493 goto_out;
1494 } else {
1495 if (!_lv_preload(lv_pre, laopts, &flush_required))
1496 /* FIXME Revert preloading */
1497 goto_out;
1498
1499 /*
1500 * Search for existing LVs that have become detached and preload them.
1501 */
1502 detached.lv_pre = lv_pre;
1503 detached.laopts = laopts;
1504 detached.flush_required = &flush_required;
1505
1506 if (!for_each_sub_lv(cmd, lv, &_preload_detached_lv, &detached))
1507 goto_out;
1508
1509 /*
1510 * Preload any snapshots that are being removed.
1511 */
1512 if (!laopts->origin_only && lv_is_origin(lv)) {
1513 dm_list_iterate_items_gen(snap_seg, &lv->snapshot_segs, origin_list) {
1514 if (!(lvl_pre = find_lv_in_vg_by_lvid(lv_pre->vg, &snap_seg->cow->lvid))) {
1515 log_error(INTERNAL_ERROR "LV %s (%s) missing from preload metadata",
1516 snap_seg->cow->name, snap_seg->cow->lvid.id[1].uuid);
1517 goto out;
1518 }
1519 if (!lv_is_cow(lvl_pre->lv) &&
1520 !_lv_preload(lvl_pre->lv, laopts, &flush_required))
1521 goto_out;
1522 }
1523 }
1524 }
1525
1526 if (!monitor_dev_for_events(cmd, lv, laopts, 0))
1527 /* FIXME Consider aborting here */
1528 stack;
1529
1530 critical_section_inc(cmd, "suspending");
1531 if (pvmove_lv)
1532 critical_section_inc(cmd, "suspending pvmove LV");
1533
1534 if (!laopts->origin_only &&
1535 (lv_is_origin(lv_pre) || lv_is_cow(lv_pre)))
1536 lockfs = 1;
1537
1538 if (laopts->origin_only && lv_is_thin_volume(lv) && lv_is_thin_volume(lv_pre))
1539 lockfs = 1;
1540
1541 /*
1542 * Suspending an LV directly above a PVMOVE LV also
1543 * suspends other LVs using that same PVMOVE LV.
1544 * FIXME Remove this and delay the 'clear node' until
1545 * after the code knows whether there's a different
1546 * inactive table to load or not instead so lv_suspend
1547 * can be called separately for each LV safely.
1548 */
1549 if ((lv_pre->vg->status & PRECOMMITTED) &&
1550 (lv_pre->status & LOCKED) && find_pvmove_lv_in_lv(lv_pre)) {
1551 if (!_lv_suspend_lv(lv_pre, laopts, lockfs, flush_required)) {
1552 critical_section_dec(cmd, "failed precommitted suspend");
1553 if (pvmove_lv)
1554 critical_section_dec(cmd, "failed precommitted suspend (pvmove)");
1555 goto_out;
1556 }
1557 } else {
1558 /* Normal suspend */
1559 if (!_lv_suspend_lv(lv, laopts, lockfs, flush_required)) {
1560 critical_section_dec(cmd, "failed suspend");
1561 if (pvmove_lv)
1562 critical_section_dec(cmd, "failed suspend (pvmove)");
1563 goto_out;
1564 }
1565 }
1566
1567 r = 1;
1568 out:
1569 if (lv_pre)
1570 release_vg(lv_pre->vg);
1571 if (lv) {
1572 lv_release_replicator_vgs(lv);
1573 release_vg(lv->vg);
1574 }
1575
1576 return r;
1577 }
1578
1579 /*
1580 * In a cluster, set exclusive to indicate that only one node is using the
1581 * device. Any preloaded tables may then use non-clustered targets.
1582 *
1583 * Returns success if the device is not active
1584 */
1585 int lv_suspend_if_active(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only, unsigned exclusive)
1586 {
1587 struct lv_activate_opts laopts = {
1588 .origin_only = origin_only,
1589 .exclusive = exclusive
1590 };
1591
1592 return _lv_suspend(cmd, lvid_s, &laopts, 0);
1593 }
1594
1595 /* No longer used */
1596 /***********
1597 int lv_suspend(struct cmd_context *cmd, const char *lvid_s)
1598 {
1599 return _lv_suspend(cmd, lvid_s, 1);
1600 }
1601 ***********/
1602
1603 static int _lv_resume(struct cmd_context *cmd, const char *lvid_s,
1604 struct lv_activate_opts *laopts, int error_if_not_active)
1605 {
1606 struct logical_volume *lv;
1607 struct lvinfo info;
1608 int r = 0;
1609 int messages_only = 0;
1610
1611 if (!activation())
1612 return 1;
1613
1614 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1615 goto_out;
1616
1617 if (lv_is_thin_pool(lv) && laopts->origin_only)
1618 messages_only = 1;
1619
1620 if (!lv_is_origin(lv) && !lv_is_thin_volume(lv))
1621 laopts->origin_only = 0;
1622
1623 if (test_mode()) {
1624 _skip("Resuming %s%s%s.", lv->name, laopts->origin_only ? " without snapshots" : "",
1625 laopts->revert ? " (reverting)" : "");
1626 r = 1;
1627 goto out;
1628 }
1629
1630 log_debug("Resuming LV %s/%s%s%s%s.", lv->vg->name, lv->name,
1631 error_if_not_active ? "" : " if active",
1632 laopts->origin_only ? " without snapshots" : "",
1633 laopts->revert ? " (reverting)" : "");
1634
1635 if (!lv_info(cmd, lv, laopts->origin_only, &info, 0, 0))
1636 goto_out;
1637
1638 if (!info.exists || !(info.suspended || messages_only)) {
1639 if (error_if_not_active)
1640 goto_out;
1641 r = 1;
1642 if (!info.suspended)
1643 critical_section_dec(cmd, "already resumed");
1644 goto out;
1645 }
1646
1647 laopts->read_only = _passes_readonly_filter(cmd, lv);
1648
1649 if (!_lv_activate_lv(lv, laopts))
1650 goto_out;
1651
1652 critical_section_dec(cmd, "resumed");
1653
1654 if (!monitor_dev_for_events(cmd, lv, laopts, 1))
1655 stack;
1656
1657 r = 1;
1658 out:
1659 if (lv)
1660 release_vg(lv->vg);
1661
1662 return r;
1663 }
1664
1665 /*
1666 * In a cluster, set exclusive to indicate that only one node is using the
1667 * device. Any tables loaded may then use non-clustered targets.
1668 *
1669 * @origin_only
1670 * @exclusive This parameter only has an affect in cluster-context.
1671 * It forces local target type to be used (instead of
1672 * cluster-aware type).
1673 * Returns success if the device is not active
1674 */
1675 int lv_resume_if_active(struct cmd_context *cmd, const char *lvid_s,
1676 unsigned origin_only, unsigned exclusive,
1677 unsigned revert)
1678 {
1679 struct lv_activate_opts laopts = {
1680 .origin_only = origin_only,
1681 .exclusive = exclusive,
1682 .revert = revert
1683 };
1684
1685 return _lv_resume(cmd, lvid_s, &laopts, 0);
1686 }
1687
1688 int lv_resume(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only)
1689 {
1690 struct lv_activate_opts laopts = { .origin_only = origin_only, };
1691
1692 return _lv_resume(cmd, lvid_s, &laopts, 1);
1693 }
1694
1695 static int _lv_has_open_snapshots(struct logical_volume *lv)
1696 {
1697 struct lv_segment *snap_seg;
1698 struct lvinfo info;
1699 int r = 0;
1700
1701 dm_list_iterate_items_gen(snap_seg, &lv->snapshot_segs, origin_list) {
1702 if (!lv_info(lv->vg->cmd, snap_seg->cow, 0, &info, 1, 0)) {
1703 r = 1;
1704 continue;
1705 }
1706
1707 if (info.exists && info.open_count) {
1708 log_error("LV %s/%s has open snapshot %s: "
1709 "not deactivating", lv->vg->name, lv->name,
1710 snap_seg->cow->name);
1711 r = 1;
1712 }
1713 }
1714
1715 return r;
1716 }
1717
1718 int lv_deactivate(struct cmd_context *cmd, const char *lvid_s)
1719 {
1720 struct logical_volume *lv;
1721 struct lvinfo info;
1722 int r = 0;
1723
1724 if (!activation())
1725 return 1;
1726
1727 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1728 goto out;
1729
1730 if (test_mode()) {
1731 _skip("Deactivating '%s'.", lv->name);
1732 r = 1;
1733 goto out;
1734 }
1735
1736 log_debug("Deactivating %s/%s.", lv->vg->name, lv->name);
1737
1738 if (!lv_info(cmd, lv, 0, &info, 1, 0))
1739 goto_out;
1740
1741 if (!info.exists) {
1742 r = 1;
1743 goto out;
1744 }
1745
1746 if (lv_is_visible(lv)) {
1747 if (!lv_check_not_in_use(cmd, lv, &info))
1748 goto_out;
1749
1750 if (lv_is_origin(lv) && _lv_has_open_snapshots(lv))
1751 goto_out;
1752 }
1753
1754 if (!lv_read_replicator_vgs(lv))
1755 goto_out;
1756
1757 lv_calculate_readahead(lv, NULL);
1758
1759 if (!monitor_dev_for_events(cmd, lv, NULL, 0))
1760 stack;
1761
1762 critical_section_inc(cmd, "deactivating");
1763 r = _lv_deactivate(lv);
1764 critical_section_dec(cmd, "deactivated");
1765
1766 if (!lv_info(cmd, lv, 0, &info, 0, 0) || info.exists)
1767 r = 0;
1768 out:
1769 if (lv) {
1770 lv_release_replicator_vgs(lv);
1771 release_vg(lv->vg);
1772 }
1773
1774 return r;
1775 }
1776
1777 /* Test if LV passes filter */
1778 int lv_activation_filter(struct cmd_context *cmd, const char *lvid_s,
1779 int *activate_lv)
1780 {
1781 struct logical_volume *lv;
1782 int r = 0;
1783
1784 if (!activation()) {
1785 *activate_lv = 1;
1786 return 1;
1787 }
1788
1789 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1790 goto out;
1791
1792 if (!_passes_activation_filter(cmd, lv)) {
1793 log_verbose("Not activating %s/%s since it does not pass "
1794 "activation filter.", lv->vg->name, lv->name);
1795 *activate_lv = 0;
1796 } else
1797 *activate_lv = 1;
1798 r = 1;
1799 out:
1800 if (lv)
1801 release_vg(lv->vg);
1802
1803 return r;
1804 }
1805
1806 static int _lv_activate(struct cmd_context *cmd, const char *lvid_s,
1807 struct lv_activate_opts *laopts, int filter)
1808 {
1809 struct logical_volume *lv;
1810 struct lvinfo info;
1811 int r = 0;
1812
1813 if (!activation())
1814 return 1;
1815
1816 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1817 goto out;
1818
1819 if (filter && !_passes_activation_filter(cmd, lv)) {
1820 log_error("Not activating %s/%s since it does not pass "
1821 "activation filter.", lv->vg->name, lv->name);
1822 goto out;
1823 }
1824
1825 if ((!lv->vg->cmd->partial_activation) && (lv->status & PARTIAL_LV)) {
1826 log_error("Refusing activation of partial LV %s. Use --partial to override.",
1827 lv->name);
1828 goto_out;
1829 }
1830
1831 if (lv_has_unknown_segments(lv)) {
1832 log_error("Refusing activation of LV %s containing "
1833 "an unrecognised segment.", lv->name);
1834 goto_out;
1835 }
1836
1837 if (test_mode()) {
1838 _skip("Activating '%s'.", lv->name);
1839 r = 1;
1840 goto out;
1841 }
1842
1843 if (filter)
1844 laopts->read_only = _passes_readonly_filter(cmd, lv);
1845
1846 log_debug("Activating %s/%s%s%s.", lv->vg->name, lv->name,
1847 laopts->exclusive ? " exclusively" : "",
1848 laopts->read_only ? " read-only" : "");
1849
1850 if (!lv_info(cmd, lv, 0, &info, 0, 0))
1851 goto_out;
1852
1853 /*
1854 * Nothing to do?
1855 */
1856 if (info.exists && !info.suspended && info.live_table &&
1857 (info.read_only == read_only_lv(lv, laopts))) {
1858 r = 1;
1859 goto out;
1860 }
1861
1862 if (!lv_read_replicator_vgs(lv))
1863 goto_out;
1864
1865 lv_calculate_readahead(lv, NULL);
1866
1867 critical_section_inc(cmd, "activating");
1868 if (!(r = _lv_activate_lv(lv, laopts)))
1869 stack;
1870 critical_section_dec(cmd, "activated");
1871
1872 if (r && !monitor_dev_for_events(cmd, lv, laopts, 1))
1873 stack;
1874
1875 out:
1876 if (lv) {
1877 lv_release_replicator_vgs(lv);
1878 release_vg(lv->vg);
1879 }
1880
1881 return r;
1882 }
1883
1884 /* Activate LV */
1885 int lv_activate(struct cmd_context *cmd, const char *lvid_s, int exclusive)
1886 {
1887 struct lv_activate_opts laopts = { .exclusive = exclusive };
1888
1889 if (!_lv_activate(cmd, lvid_s, &laopts, 0))
1890 return_0;
1891
1892 return 1;
1893 }
1894
1895 /* Activate LV only if it passes filter */
1896 int lv_activate_with_filter(struct cmd_context *cmd, const char *lvid_s, int exclusive)
1897 {
1898 struct lv_activate_opts laopts = { .exclusive = exclusive };
1899
1900 if (!_lv_activate(cmd, lvid_s, &laopts, 1))
1901 return_0;
1902
1903 return 1;
1904 }
1905
1906 int lv_mknodes(struct cmd_context *cmd, const struct logical_volume *lv)
1907 {
1908 int r = 1;
1909
1910 if (!lv) {
1911 r = dm_mknodes(NULL);
1912 fs_unlock();
1913 return r;
1914 }
1915
1916 if (!activation())
1917 return 1;
1918
1919 r = dev_manager_mknodes(lv);
1920
1921 fs_unlock();
1922
1923 return r;
1924 }
1925
1926 /*
1927 * Does PV use VG somewhere in its construction?
1928 * Returns 1 on failure.
1929 */
1930 int pv_uses_vg(struct physical_volume *pv,
1931 struct volume_group *vg)
1932 {
1933 if (!activation() || !pv->dev)
1934 return 0;
1935
1936 if (!dm_is_dm_major(MAJOR(pv->dev->dev)))
1937 return 0;
1938
1939 return dev_manager_device_uses_vg(pv->dev, vg);
1940 }
1941
1942 void activation_release(void)
1943 {
1944 dev_manager_release();
1945 }
1946
1947 void activation_exit(void)
1948 {
1949 dev_manager_exit();
1950 }
1951 #endif
This page took 0.126653 seconds and 5 git commands to generate.