]> sourceware.org Git - lvm2.git/blob - lib/activate/activate.c
Thin use origin_only for thin pools as well
[lvm2.git] / lib / activate / activate.c
1 /*
2 * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
3 * Copyright (C) 2004-2012 Red Hat, Inc. All rights reserved.
4 *
5 * This file is part of LVM2.
6 *
7 * This copyrighted material is made available to anyone wishing to use,
8 * modify, copy, or redistribute it subject to the terms and conditions
9 * of the GNU Lesser General Public License v.2.1.
10 *
11 * You should have received a copy of the GNU Lesser General Public License
12 * along with this program; if not, write to the Free Software Foundation,
13 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 */
15
16 #include "lib.h"
17 #include "metadata.h"
18 #include "activate.h"
19 #include "memlock.h"
20 #include "display.h"
21 #include "fs.h"
22 #include "lvm-exec.h"
23 #include "lvm-file.h"
24 #include "lvm-string.h"
25 #include "toolcontext.h"
26 #include "dev_manager.h"
27 #include "str_list.h"
28 #include "config.h"
29 #include "filter.h"
30 #include "segtype.h"
31 #include "sharedlib.h"
32
33 #include <limits.h>
34 #include <fcntl.h>
35 #include <unistd.h>
36
37 #define _skip(fmt, args...) log_very_verbose("Skipping: " fmt , ## args)
38
39 int lvm1_present(struct cmd_context *cmd)
40 {
41 static char path[PATH_MAX];
42
43 if (dm_snprintf(path, sizeof(path), "%s/lvm/global", cmd->proc_dir)
44 < 0) {
45 log_error("LVM1 proc global snprintf failed");
46 return 0;
47 }
48
49 if (path_exists(path))
50 return 1;
51 else
52 return 0;
53 }
54
55 int list_segment_modules(struct dm_pool *mem, const struct lv_segment *seg,
56 struct dm_list *modules)
57 {
58 unsigned int s;
59 struct lv_segment *seg2, *snap_seg;
60 struct dm_list *snh;
61
62 if (seg->segtype->ops->modules_needed &&
63 !seg->segtype->ops->modules_needed(mem, seg, modules)) {
64 log_error("module string allocation failed");
65 return 0;
66 }
67
68 if (lv_is_origin(seg->lv))
69 dm_list_iterate(snh, &seg->lv->snapshot_segs)
70 if (!list_lv_modules(mem,
71 dm_list_struct_base(snh,
72 struct lv_segment,
73 origin_list)->cow,
74 modules))
75 return_0;
76
77 if (lv_is_cow(seg->lv)) {
78 snap_seg = find_cow(seg->lv);
79 if (snap_seg->segtype->ops->modules_needed &&
80 !snap_seg->segtype->ops->modules_needed(mem, snap_seg,
81 modules)) {
82 log_error("snap_seg module string allocation failed");
83 return 0;
84 }
85 }
86
87 for (s = 0; s < seg->area_count; s++) {
88 switch (seg_type(seg, s)) {
89 case AREA_LV:
90 seg2 = find_seg_by_le(seg_lv(seg, s), seg_le(seg, s));
91 if (seg2 && !list_segment_modules(mem, seg2, modules))
92 return_0;
93 break;
94 case AREA_PV:
95 case AREA_UNASSIGNED:
96 ;
97 }
98 }
99
100 return 1;
101 }
102
103 int list_lv_modules(struct dm_pool *mem, const struct logical_volume *lv,
104 struct dm_list *modules)
105 {
106 struct lv_segment *seg;
107
108 dm_list_iterate_items(seg, &lv->segments)
109 if (!list_segment_modules(mem, seg, modules))
110 return_0;
111
112 return 1;
113 }
114
115 #ifndef DEVMAPPER_SUPPORT
116 void set_activation(int act)
117 {
118 static int warned = 0;
119
120 if (warned || !act)
121 return;
122
123 log_error("Compiled without libdevmapper support. "
124 "Can't enable activation.");
125
126 warned = 1;
127 }
128 int activation(void)
129 {
130 return 0;
131 }
132 int library_version(char *version, size_t size)
133 {
134 return 0;
135 }
136 int driver_version(char *version, size_t size)
137 {
138 return 0;
139 }
140 int target_version(const char *target_name, uint32_t *maj,
141 uint32_t *min, uint32_t *patchlevel)
142 {
143 return 0;
144 }
145 int target_present(struct cmd_context *cmd, const char *target_name,
146 int use_modprobe)
147 {
148 return 0;
149 }
150 int lvm_dm_prefix_check(const char *sysfs_dir, int major, int minor, const char *prefix)
151 {
152 return 0;
153 }
154 int lv_info(struct cmd_context *cmd, const struct logical_volume *lv, unsigned origin_only,
155 struct lvinfo *info, int with_open_count, int with_read_ahead)
156 {
157 return 0;
158 }
159 int lv_info_by_lvid(struct cmd_context *cmd, const char *lvid_s,
160 unsigned origin_only,
161 struct lvinfo *info, int with_open_count, int with_read_ahead)
162 {
163 return 0;
164 }
165 int lv_snapshot_percent(const struct logical_volume *lv, percent_t *percent)
166 {
167 return 0;
168 }
169 int lv_mirror_percent(struct cmd_context *cmd, const struct logical_volume *lv,
170 int wait, percent_t *percent, uint32_t *event_nr)
171 {
172 return 0;
173 }
174 int lvs_in_vg_activated(struct volume_group *vg)
175 {
176 return 0;
177 }
178 int lvs_in_vg_opened(const struct volume_group *vg)
179 {
180 return 0;
181 }
182 /******
183 int lv_suspend(struct cmd_context *cmd, const char *lvid_s)
184 {
185 return 1;
186 }
187 *******/
188 int lv_suspend_if_active(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only)
189 {
190 return 1;
191 }
192 int lv_resume(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only)
193 {
194 return 1;
195 }
196 int lv_resume_if_active(struct cmd_context *cmd, const char *lvid_s,
197 unsigned origin_only, unsigned exclusive, unsigned revert)
198 {
199 return 1;
200 }
201 int lv_deactivate(struct cmd_context *cmd, const char *lvid_s)
202 {
203 return 1;
204 }
205 int lv_activation_filter(struct cmd_context *cmd, const char *lvid_s,
206 int *activate_lv)
207 {
208 return 1;
209 }
210 int lv_activate(struct cmd_context *cmd, const char *lvid_s, int exclusive)
211 {
212 return 1;
213 }
214 int lv_activate_with_filter(struct cmd_context *cmd, const char *lvid_s, int exclusive)
215 {
216 return 1;
217 }
218 int lv_mknodes(struct cmd_context *cmd, const struct logical_volume *lv)
219 {
220 return 1;
221 }
222 int lv_send_message(const struct logical_volume *lv, const char *message)
223 {
224 return 0;
225 }
226 int pv_uses_vg(struct physical_volume *pv,
227 struct volume_group *vg)
228 {
229 return 0;
230 }
231 void activation_release(void)
232 {
233 }
234 void activation_exit(void)
235 {
236 }
237
238 int lv_is_active(struct logical_volume *lv)
239 {
240 return 0;
241 }
242 int lv_is_active_but_not_locally(struct logical_volume *lv)
243 {
244 return 0;
245 }
246 int lv_is_active_exclusive(struct logical_volume *lv)
247 {
248 return 0;
249 }
250 int lv_is_active_exclusive_locally(struct logical_volume *lv)
251 {
252 return 0;
253 }
254 int lv_is_active_exclusive_remotely(struct logical_volume *lv)
255 {
256 return 0;
257 }
258
259 int lv_check_transient(struct logical_volume *lv)
260 {
261 return 1;
262 }
263 int monitor_dev_for_events(struct cmd_context *cmd, struct logical_volume *lv,
264 struct lv_activate_opts *laopts, int monitor)
265 {
266 return 1;
267 }
268 #else /* DEVMAPPER_SUPPORT */
269
270 static int _activation = 1;
271
272 void set_activation(int act)
273 {
274 if (act == _activation)
275 return;
276
277 _activation = act;
278 if (_activation)
279 log_verbose("Activation enabled. Device-mapper kernel "
280 "driver will be used.");
281 else
282 log_warn("WARNING: Activation disabled. No device-mapper "
283 "interaction will be attempted.");
284 }
285
286 int activation(void)
287 {
288 return _activation;
289 }
290
291 static int _passes_volumes_filter(struct cmd_context *cmd,
292 struct logical_volume *lv,
293 const struct dm_config_node *cn,
294 const char *config_path)
295 {
296 const struct dm_config_value *cv;
297 const char *str;
298 static char path[PATH_MAX];
299
300 log_verbose("%s configuration setting defined: "
301 "Checking the list to match %s/%s",
302 config_path, lv->vg->name, lv->name);
303
304 for (cv = cn->v; cv; cv = cv->next) {
305 if (cv->type != DM_CFG_STRING) {
306 log_error("Ignoring invalid string in config file %s",
307 config_path);
308 continue;
309 }
310 str = cv->v.str;
311 if (!*str) {
312 log_error("Ignoring empty string in config file %s",
313 config_path);
314 continue;
315 }
316
317
318 /* Tag? */
319 if (*str == '@') {
320 str++;
321 if (!*str) {
322 log_error("Ignoring empty tag in config file "
323 "%s", config_path);
324 continue;
325 }
326 /* If any host tag matches any LV or VG tag, activate */
327 if (!strcmp(str, "*")) {
328 if (str_list_match_list(&cmd->tags, &lv->tags, NULL)
329 || str_list_match_list(&cmd->tags,
330 &lv->vg->tags, NULL))
331 return 1;
332 else
333 continue;
334 }
335 /* If supplied tag matches LV or VG tag, activate */
336 if (str_list_match_item(&lv->tags, str) ||
337 str_list_match_item(&lv->vg->tags, str))
338 return 1;
339 else
340 continue;
341 }
342 if (!strchr(str, '/')) {
343 /* vgname supplied */
344 if (!strcmp(str, lv->vg->name))
345 return 1;
346 else
347 continue;
348 }
349 /* vgname/lvname */
350 if (dm_snprintf(path, sizeof(path), "%s/%s", lv->vg->name,
351 lv->name) < 0) {
352 log_error("dm_snprintf error from %s/%s", lv->vg->name,
353 lv->name);
354 continue;
355 }
356 if (!strcmp(path, str))
357 return 1;
358 }
359
360 log_verbose("No item supplied in %s configuration setting "
361 "matches %s/%s", config_path, lv->vg->name, lv->name);
362
363 return 0;
364 }
365
366 static int _passes_activation_filter(struct cmd_context *cmd,
367 struct logical_volume *lv)
368 {
369 const struct dm_config_node *cn;
370
371 if (!(cn = find_config_tree_node(cmd, "activation/volume_list"))) {
372 log_verbose("activation/volume_list configuration setting "
373 "not defined: Checking only host tags for %s/%s",
374 lv->vg->name, lv->name);
375
376 /* If no host tags defined, activate */
377 if (dm_list_empty(&cmd->tags))
378 return 1;
379
380 /* If any host tag matches any LV or VG tag, activate */
381 if (str_list_match_list(&cmd->tags, &lv->tags, NULL) ||
382 str_list_match_list(&cmd->tags, &lv->vg->tags, NULL))
383 return 1;
384
385 log_verbose("No host tag matches %s/%s",
386 lv->vg->name, lv->name);
387
388 /* Don't activate */
389 return 0;
390 }
391
392 return _passes_volumes_filter(cmd, lv, cn, "activation/volume_list");
393 }
394
395 static int _passes_readonly_filter(struct cmd_context *cmd,
396 struct logical_volume *lv)
397 {
398 const struct dm_config_node *cn;
399
400 if (!(cn = find_config_tree_node(cmd, "activation/read_only_volume_list")))
401 return 0;
402
403 return _passes_volumes_filter(cmd, lv, cn, "activation/read_only_volume_list");
404 }
405
406 int library_version(char *version, size_t size)
407 {
408 if (!activation())
409 return 0;
410
411 return dm_get_library_version(version, size);
412 }
413
414 int driver_version(char *version, size_t size)
415 {
416 if (!activation())
417 return 0;
418
419 log_very_verbose("Getting driver version");
420
421 return dm_driver_version(version, size);
422 }
423
424 int target_version(const char *target_name, uint32_t *maj,
425 uint32_t *min, uint32_t *patchlevel)
426 {
427 int r = 0;
428 struct dm_task *dmt;
429 struct dm_versions *target, *last_target;
430
431 log_very_verbose("Getting target version for %s", target_name);
432 if (!(dmt = dm_task_create(DM_DEVICE_LIST_VERSIONS)))
433 return_0;
434
435 if (activation_checks() && !dm_task_enable_checks(dmt))
436 goto_out;
437
438 if (!dm_task_run(dmt)) {
439 log_debug("Failed to get %s target version", target_name);
440 /* Assume this was because LIST_VERSIONS isn't supported */
441 return 1;
442 }
443
444 target = dm_task_get_versions(dmt);
445
446 do {
447 last_target = target;
448
449 if (!strcmp(target_name, target->name)) {
450 r = 1;
451 *maj = target->version[0];
452 *min = target->version[1];
453 *patchlevel = target->version[2];
454 goto out;
455 }
456
457 target = (struct dm_versions *)((char *) target + target->next);
458 } while (last_target != target);
459
460 out:
461 dm_task_destroy(dmt);
462
463 return r;
464 }
465
466 int lvm_dm_prefix_check(int major, int minor, const char *prefix)
467 {
468 struct dm_task *dmt;
469 const char *uuid;
470 int r;
471
472 if (!(dmt = dm_task_create(DM_DEVICE_STATUS)))
473 return_0;
474
475 if (!dm_task_set_minor(dmt, minor) ||
476 !dm_task_set_major(dmt, major) ||
477 !dm_task_run(dmt) ||
478 !(uuid = dm_task_get_uuid(dmt))) {
479 dm_task_destroy(dmt);
480 return 0;
481 }
482
483 r = strncasecmp(uuid, prefix, strlen(prefix));
484 dm_task_destroy(dmt);
485
486 return r ? 0 : 1;
487 }
488
489 int module_present(struct cmd_context *cmd, const char *target_name)
490 {
491 int ret = 0;
492 #ifdef MODPROBE_CMD
493 char module[128];
494 const char *argv[3];
495
496 if (dm_snprintf(module, sizeof(module), "dm-%s", target_name) < 0) {
497 log_error("module_present module name too long: %s",
498 target_name);
499 return 0;
500 }
501
502 argv[0] = MODPROBE_CMD;
503 argv[1] = module;
504 argv[2] = NULL;
505
506 ret = exec_cmd(cmd, argv, NULL, 0);
507 #endif
508 return ret;
509 }
510
511 int target_present(struct cmd_context *cmd, const char *target_name,
512 int use_modprobe)
513 {
514 uint32_t maj, min, patchlevel;
515
516 if (!activation())
517 return 0;
518
519 #ifdef MODPROBE_CMD
520 if (use_modprobe) {
521 if (target_version(target_name, &maj, &min, &patchlevel))
522 return 1;
523
524 if (!module_present(cmd, target_name))
525 return_0;
526 }
527 #endif
528
529 return target_version(target_name, &maj, &min, &patchlevel);
530 }
531
532 /*
533 * Returns 1 if info structure populated, else 0 on failure.
534 */
535 int lv_info(struct cmd_context *cmd, const struct logical_volume *lv, unsigned origin_only,
536 struct lvinfo *info, int with_open_count, int with_read_ahead)
537 {
538 struct dm_info dminfo;
539
540 if (!activation())
541 return 0;
542 /*
543 * If open_count info is requested and we have to be sure our own udev
544 * transactions are finished
545 * For non-clustered locking type we are only interested for non-delete operation
546 * in progress - as only those could lead to opened files
547 */
548 if (with_open_count) {
549 if (locking_is_clustered())
550 sync_local_dev_names(cmd); /* Wait to have udev in sync */
551 else if (fs_has_non_delete_ops())
552 fs_unlock(); /* For non clustered - wait if there are non-delete ops */
553 }
554
555 if (!dev_manager_info(lv->vg->cmd->mem, lv, (lv_is_origin(lv) && origin_only) ? "real" : NULL, with_open_count,
556 with_read_ahead, &dminfo, &info->read_ahead))
557 return_0;
558
559 info->exists = dminfo.exists;
560 info->suspended = dminfo.suspended;
561 info->open_count = dminfo.open_count;
562 info->major = dminfo.major;
563 info->minor = dminfo.minor;
564 info->read_only = dminfo.read_only;
565 info->live_table = dminfo.live_table;
566 info->inactive_table = dminfo.inactive_table;
567
568 return 1;
569 }
570
571 int lv_info_by_lvid(struct cmd_context *cmd, const char *lvid_s,
572 unsigned origin_only,
573 struct lvinfo *info, int with_open_count, int with_read_ahead)
574 {
575 int r;
576 struct logical_volume *lv;
577
578 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
579 return 0;
580
581 if (!lv_is_origin(lv))
582 origin_only = 0;
583
584 r = lv_info(cmd, lv, origin_only, info, with_open_count, with_read_ahead);
585 release_vg(lv->vg);
586
587 return r;
588 }
589
590 int lv_check_not_in_use(struct cmd_context *cmd __attribute__((unused)),
591 struct logical_volume *lv, struct lvinfo *info)
592 {
593 if (!info->exists)
594 return 1;
595
596 /* If sysfs is not used, use open_count information only. */
597 if (!*dm_sysfs_dir()) {
598 if (info->open_count) {
599 log_error("Logical volume %s/%s in use.",
600 lv->vg->name, lv->name);
601 return 0;
602 }
603
604 return 1;
605 }
606
607 if (dm_device_has_holders(info->major, info->minor)) {
608 log_error("Logical volume %s/%s is used by another device.",
609 lv->vg->name, lv->name);
610 return 0;
611 }
612
613 if (dm_device_has_mounted_fs(info->major, info->minor)) {
614 log_error("Logical volume %s/%s contains a filesystem in use.",
615 lv->vg->name, lv->name);
616 return 0;
617 }
618
619 return 1;
620 }
621
622 /*
623 * Returns 1 if percent set, else 0 on failure.
624 */
625 int lv_check_transient(struct logical_volume *lv)
626 {
627 int r;
628 struct dev_manager *dm;
629
630 if (!activation())
631 return 0;
632
633 log_debug("Checking transient status for LV %s/%s", lv->vg->name, lv->name);
634
635 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
636 return_0;
637
638 if (!(r = dev_manager_transient(dm, lv)))
639 stack;
640
641 dev_manager_destroy(dm);
642
643 return r;
644 }
645
646 /*
647 * Returns 1 if percent set, else 0 on failure.
648 */
649 int lv_snapshot_percent(const struct logical_volume *lv, percent_t *percent)
650 {
651 int r;
652 struct dev_manager *dm;
653
654 if (!activation())
655 return 0;
656
657 log_debug("Checking snapshot percent for LV %s/%s", lv->vg->name, lv->name);
658
659 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
660 return_0;
661
662 if (!(r = dev_manager_snapshot_percent(dm, lv, percent)))
663 stack;
664
665 dev_manager_destroy(dm);
666
667 return r;
668 }
669
670 /* FIXME Merge with snapshot_percent */
671 int lv_mirror_percent(struct cmd_context *cmd, const struct logical_volume *lv,
672 int wait, percent_t *percent, uint32_t *event_nr)
673 {
674 int r;
675 struct dev_manager *dm;
676 struct lvinfo info;
677
678 /* If mirrored LV is temporarily shrinked to 1 area (= linear),
679 * it should be considered in-sync. */
680 if (dm_list_size(&lv->segments) == 1 && first_seg(lv)->area_count == 1) {
681 *percent = PERCENT_100;
682 return 1;
683 }
684
685 if (!activation())
686 return 0;
687
688 log_debug("Checking mirror percent for LV %s/%s", lv->vg->name, lv->name);
689
690 if (!lv_info(cmd, lv, 0, &info, 0, 0))
691 return_0;
692
693 if (!info.exists)
694 return 0;
695
696 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
697 return_0;
698
699 if (!(r = dev_manager_mirror_percent(dm, lv, wait, percent, event_nr)))
700 stack;
701
702 dev_manager_destroy(dm);
703
704 return r;
705 }
706
707 int lv_raid_percent(const struct logical_volume *lv, percent_t *percent)
708 {
709 return lv_mirror_percent(lv->vg->cmd, lv, 0, percent, NULL);
710 }
711
712 /*
713 * Returns data or metadata percent usage, depends on metadata 0/1.
714 * Returns 1 if percent set, else 0 on failure.
715 */
716 int lv_thin_pool_percent(const struct logical_volume *lv, int metadata,
717 percent_t *percent)
718 {
719 int r;
720 struct dev_manager *dm;
721
722 if (!activation())
723 return 0;
724
725 log_debug("Checking thin %sdata percent for LV %s/%s",
726 (metadata) ? "meta" : "", lv->vg->name, lv->name);
727
728 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
729 return_0;
730
731 if (!(r = dev_manager_thin_pool_percent(dm, lv, metadata, percent)))
732 stack;
733
734 dev_manager_destroy(dm);
735
736 return r;
737 }
738
739 /*
740 * Returns 1 if percent set, else 0 on failure.
741 */
742 int lv_thin_percent(const struct logical_volume *lv,
743 int mapped, percent_t *percent)
744 {
745 int r;
746 struct dev_manager *dm;
747
748 if (!activation())
749 return 0;
750
751 log_debug("Checking thin percent for LV %s/%s",
752 lv->vg->name, lv->name);
753
754 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
755 return_0;
756
757 if (!(r = dev_manager_thin_percent(dm, lv, mapped, percent)))
758 stack;
759
760 dev_manager_destroy(dm);
761
762 return r;
763 }
764
765 /*
766 * Returns 1 if transaction_id set, else 0 on failure.
767 */
768 int lv_thin_pool_transaction_id(const struct logical_volume *lv,
769 uint64_t *transaction_id)
770 {
771 int r;
772 struct dev_manager *dm;
773 struct dm_status_thin_pool *status;
774
775 if (!activation())
776 return 0;
777
778 log_debug("Checking thin percent for LV %s/%s",
779 lv->vg->name, lv->name);
780
781 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
782 return_0;
783
784 if (!(r = dev_manager_thin_pool_status(dm, lv, &status)))
785 stack;
786 else
787 *transaction_id = status->transaction_id;
788
789 dev_manager_destroy(dm);
790
791 return r;
792 }
793
794 static int _lv_active(struct cmd_context *cmd, struct logical_volume *lv)
795 {
796 struct lvinfo info;
797
798 if (!lv_info(cmd, lv, 0, &info, 0, 0)) {
799 stack;
800 return -1;
801 }
802
803 return info.exists;
804 }
805
806 static int _lv_open_count(struct cmd_context *cmd, struct logical_volume *lv)
807 {
808 struct lvinfo info;
809
810 if (!lv_info(cmd, lv, 0, &info, 1, 0)) {
811 stack;
812 return -1;
813 }
814
815 return info.open_count;
816 }
817
818 static int _lv_activate_lv(struct logical_volume *lv, struct lv_activate_opts *laopts)
819 {
820 int r;
821 struct dev_manager *dm;
822
823 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, (lv->status & PVMOVE) ? 0 : 1)))
824 return_0;
825
826 if (!(r = dev_manager_activate(dm, lv, laopts)))
827 stack;
828
829 dev_manager_destroy(dm);
830 return r;
831 }
832
833 static int _lv_preload(struct logical_volume *lv, struct lv_activate_opts *laopts,
834 int *flush_required)
835 {
836 int r = 0;
837 struct dev_manager *dm;
838 int old_readonly = laopts->read_only;
839
840 laopts->read_only = _passes_readonly_filter(lv->vg->cmd, lv);
841
842 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, (lv->status & PVMOVE) ? 0 : 1)))
843 goto_out;
844
845 if (!(r = dev_manager_preload(dm, lv, laopts, flush_required)))
846 stack;
847
848 dev_manager_destroy(dm);
849
850 laopts->read_only = old_readonly;
851 out:
852 return r;
853 }
854
855 static int _lv_deactivate(struct logical_volume *lv)
856 {
857 int r;
858 struct dev_manager *dm;
859
860 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
861 return_0;
862
863 if (!(r = dev_manager_deactivate(dm, lv)))
864 stack;
865
866 dev_manager_destroy(dm);
867 return r;
868 }
869
870 static int _lv_suspend_lv(struct logical_volume *lv, struct lv_activate_opts *laopts,
871 int lockfs, int flush_required)
872 {
873 int r;
874 struct dev_manager *dm;
875
876 laopts->read_only = _passes_readonly_filter(lv->vg->cmd, lv);
877
878 /*
879 * When we are asked to manipulate (normally suspend/resume) the PVMOVE
880 * device directly, we don't want to touch the devices that use it.
881 */
882 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, (lv->status & PVMOVE) ? 0 : 1)))
883 return_0;
884
885 if (!(r = dev_manager_suspend(dm, lv, laopts, lockfs, flush_required)))
886 stack;
887
888 dev_manager_destroy(dm);
889 return r;
890 }
891
892 /*
893 * These two functions return the number of visible LVs in the state,
894 * or -1 on error. FIXME Check this.
895 */
896 int lvs_in_vg_activated(struct volume_group *vg)
897 {
898 struct lv_list *lvl;
899 int count = 0;
900
901 if (!activation())
902 return 0;
903
904 dm_list_iterate_items(lvl, &vg->lvs)
905 if (lv_is_visible(lvl->lv))
906 count += (_lv_active(vg->cmd, lvl->lv) == 1);
907
908 log_debug("Counted %d active LVs in VG %s", count, vg->name);
909
910 return count;
911 }
912
913 int lvs_in_vg_opened(const struct volume_group *vg)
914 {
915 const struct lv_list *lvl;
916 int count = 0;
917
918 if (!activation())
919 return 0;
920
921 dm_list_iterate_items(lvl, &vg->lvs)
922 if (lv_is_visible(lvl->lv))
923 count += (_lv_open_count(vg->cmd, lvl->lv) > 0);
924
925 log_debug("Counted %d open LVs in VG %s", count, vg->name);
926
927 return count;
928 }
929
930 /*
931 * _lv_is_active
932 * @lv: logical volume being queried
933 * @locally: set if active locally (when provided)
934 * @exclusive: set if active exclusively (when provided)
935 *
936 * Determine whether an LV is active locally or in a cluster.
937 * In addition to the return code which indicates whether or
938 * not the LV is active somewhere, two other values are set
939 * to yield more information about the status of the activation:
940 * return locally exclusively status
941 * ====== ======= =========== ======
942 * 0 0 0 not active
943 * 1 0 0 active remotely
944 * 1 0 1 exclusive remotely
945 * 1 1 0 active locally and possibly remotely
946 * 1 1 1 exclusive locally (or local && !cluster)
947 * The VG lock must be held to call this function.
948 *
949 * Returns: 0 or 1
950 */
951 static int _lv_is_active(struct logical_volume *lv,
952 int *locally, int *exclusive)
953 {
954 int r, l, e; /* remote, local, and exclusive */
955
956 r = l = e = 0;
957
958 if (_lv_active(lv->vg->cmd, lv))
959 l = 1;
960
961 if (!vg_is_clustered(lv->vg)) {
962 if (l)
963 e = 1; /* exclusive by definition */
964 goto out;
965 }
966
967 /* Active locally, and the caller doesn't care about exclusive */
968 if (l && !exclusive)
969 goto out;
970
971 if ((r = remote_lock_held(lv->lvid.s, &e)) >= 0)
972 goto out;
973
974 /*
975 * If lock query is not supported (due to interfacing with old
976 * code), then we cannot evaluate exclusivity properly.
977 *
978 * Old users of this function will never be affected by this,
979 * since they are only concerned about active vs. not active.
980 * New users of this function who specifically ask for 'exclusive'
981 * will be given an error message.
982 */
983 log_error("Unable to determine exclusivity of %s", lv->name);
984
985 e = 0;
986
987 /*
988 * We used to attempt activate_lv_excl_local(lv->vg->cmd, lv) here,
989 * but it's unreliable.
990 */
991
992 out:
993 if (locally)
994 *locally = l;
995 if (exclusive)
996 *exclusive = e;
997
998 log_very_verbose("%s/%s is %sactive%s%s",
999 lv->vg->name, lv->name,
1000 (r || l) ? "" : "not ",
1001 (exclusive && e) ? " exclusive" : "",
1002 e ? (l ? " locally" : " remotely") : "");
1003
1004 return r || l;
1005 }
1006
1007 int lv_is_active(struct logical_volume *lv)
1008 {
1009 return _lv_is_active(lv, NULL, NULL);
1010 }
1011
1012 int lv_is_active_but_not_locally(struct logical_volume *lv)
1013 {
1014 int l;
1015 return _lv_is_active(lv, &l, NULL) && !l;
1016 }
1017
1018 int lv_is_active_exclusive(struct logical_volume *lv)
1019 {
1020 int e;
1021
1022 return _lv_is_active(lv, NULL, &e) && e;
1023 }
1024
1025 int lv_is_active_exclusive_locally(struct logical_volume *lv)
1026 {
1027 int l, e;
1028
1029 return _lv_is_active(lv, &l, &e) && l && e;
1030 }
1031
1032 int lv_is_active_exclusive_remotely(struct logical_volume *lv)
1033 {
1034 int l, e;
1035
1036 return _lv_is_active(lv, &l, &e) && !l && e;
1037 }
1038
1039 #ifdef DMEVENTD
1040 static struct dm_event_handler *_create_dm_event_handler(struct cmd_context *cmd, const char *dmuuid, const char *dso,
1041 const int timeout, enum dm_event_mask mask)
1042 {
1043 struct dm_event_handler *dmevh;
1044
1045 if (!(dmevh = dm_event_handler_create()))
1046 return_NULL;
1047
1048 if (dm_event_handler_set_dmeventd_path(dmevh, find_config_tree_str(cmd, "dmeventd/executable", NULL)))
1049 goto_bad;
1050
1051 if (dm_event_handler_set_dso(dmevh, dso))
1052 goto_bad;
1053
1054 if (dm_event_handler_set_uuid(dmevh, dmuuid))
1055 goto_bad;
1056
1057 dm_event_handler_set_timeout(dmevh, timeout);
1058 dm_event_handler_set_event_mask(dmevh, mask);
1059
1060 return dmevh;
1061
1062 bad:
1063 dm_event_handler_destroy(dmevh);
1064 return NULL;
1065 }
1066
1067 char *get_monitor_dso_path(struct cmd_context *cmd, const char *libpath)
1068 {
1069 char *path;
1070
1071 if (!(path = dm_pool_alloc(cmd->mem, PATH_MAX))) {
1072 log_error("Failed to allocate dmeventd library path.");
1073 return NULL;
1074 }
1075
1076 get_shared_library_path(cmd, libpath, path, PATH_MAX);
1077
1078 return path;
1079 }
1080
1081 static char *_build_target_uuid(struct cmd_context *cmd, struct logical_volume *lv)
1082 {
1083 const char *layer;
1084
1085 if (lv_is_thin_pool(lv))
1086 layer = "tpool"; /* Monitor "tpool" for the "thin pool". */
1087 else if (lv_is_origin(lv))
1088 layer = "real"; /* Monitor "real" for "snapshot-origin". */
1089 else
1090 layer = NULL;
1091
1092 return build_dm_uuid(cmd->mem, lv->lvid.s, layer);
1093 }
1094
1095 int target_registered_with_dmeventd(struct cmd_context *cmd, const char *dso,
1096 struct logical_volume *lv, int *pending)
1097 {
1098 char *uuid;
1099 enum dm_event_mask evmask = 0;
1100 struct dm_event_handler *dmevh;
1101 *pending = 0;
1102
1103 if (!dso)
1104 return_0;
1105
1106 if (!(uuid = _build_target_uuid(cmd, lv)))
1107 return_0;
1108
1109 if (!(dmevh = _create_dm_event_handler(cmd, uuid, dso, 0, DM_EVENT_ALL_ERRORS)))
1110 return_0;
1111
1112 if (dm_event_get_registered_device(dmevh, 0)) {
1113 dm_event_handler_destroy(dmevh);
1114 return 0;
1115 }
1116
1117 evmask = dm_event_handler_get_event_mask(dmevh);
1118 if (evmask & DM_EVENT_REGISTRATION_PENDING) {
1119 *pending = 1;
1120 evmask &= ~DM_EVENT_REGISTRATION_PENDING;
1121 }
1122
1123 dm_event_handler_destroy(dmevh);
1124
1125 return evmask;
1126 }
1127
1128 int target_register_events(struct cmd_context *cmd, const char *dso, struct logical_volume *lv,
1129 int evmask __attribute__((unused)), int set, int timeout)
1130 {
1131 char *uuid;
1132 struct dm_event_handler *dmevh;
1133 int r;
1134
1135 if (!dso)
1136 return_0;
1137
1138 /* We always monitor the "real" device, never the "snapshot-origin" itself. */
1139 if (!(uuid = _build_target_uuid(cmd, lv)))
1140 return_0;
1141
1142 if (!(dmevh = _create_dm_event_handler(cmd, uuid, dso, timeout,
1143 DM_EVENT_ALL_ERRORS | (timeout ? DM_EVENT_TIMEOUT : 0))))
1144 return_0;
1145
1146 r = set ? dm_event_register_handler(dmevh) : dm_event_unregister_handler(dmevh);
1147
1148 dm_event_handler_destroy(dmevh);
1149
1150 if (!r)
1151 return_0;
1152
1153 log_info("%s %s for events", set ? "Monitored" : "Unmonitored", uuid);
1154
1155 return 1;
1156 }
1157
1158 #endif
1159
1160 /*
1161 * Returns 0 if an attempt to (un)monitor the device failed.
1162 * Returns 1 otherwise.
1163 */
1164 int monitor_dev_for_events(struct cmd_context *cmd, struct logical_volume *lv,
1165 const struct lv_activate_opts *laopts, int monitor)
1166 {
1167 #ifdef DMEVENTD
1168 int i, pending = 0, monitored;
1169 int r = 1;
1170 struct dm_list *tmp, *snh, *snht;
1171 struct lv_segment *seg;
1172 struct lv_segment *log_seg;
1173 int (*monitor_fn) (struct lv_segment *s, int e);
1174 uint32_t s;
1175 static const struct lv_activate_opts zlaopts = { 0 };
1176
1177 if (!laopts)
1178 laopts = &zlaopts;
1179
1180 /* skip dmeventd code altogether */
1181 if (dmeventd_monitor_mode() == DMEVENTD_MONITOR_IGNORE)
1182 return 1;
1183
1184 /*
1185 * Nothing to do if dmeventd configured not to be used.
1186 */
1187 if (monitor && !dmeventd_monitor_mode())
1188 return 1;
1189
1190 /*
1191 * In case of a snapshot device, we monitor lv->snapshot->lv,
1192 * not the actual LV itself.
1193 */
1194 if (lv_is_cow(lv) && (laopts->no_merging || !lv_is_merging_cow(lv)))
1195 return monitor_dev_for_events(cmd, lv->snapshot->lv, NULL, monitor);
1196
1197 /*
1198 * In case this LV is a snapshot origin, we instead monitor
1199 * each of its respective snapshots. The origin itself may
1200 * also need to be monitored if it is a mirror, for example.
1201 */
1202 if (!laopts->origin_only && lv_is_origin(lv))
1203 dm_list_iterate_safe(snh, snht, &lv->snapshot_segs)
1204 if (!monitor_dev_for_events(cmd, dm_list_struct_base(snh,
1205 struct lv_segment, origin_list)->cow, NULL, monitor))
1206 r = 0;
1207
1208 /*
1209 * If the volume is mirrored and its log is also mirrored, monitor
1210 * the log volume as well.
1211 */
1212 if ((seg = first_seg(lv)) != NULL && seg->log_lv != NULL &&
1213 (log_seg = first_seg(seg->log_lv)) != NULL &&
1214 seg_is_mirrored(log_seg))
1215 if (!monitor_dev_for_events(cmd, seg->log_lv, NULL, monitor))
1216 r = 0;
1217
1218 dm_list_iterate(tmp, &lv->segments) {
1219 seg = dm_list_item(tmp, struct lv_segment);
1220
1221 /* Recurse for AREA_LV */
1222 for (s = 0; s < seg->area_count; s++) {
1223 if (seg_type(seg, s) != AREA_LV)
1224 continue;
1225 if (!monitor_dev_for_events(cmd, seg_lv(seg, s), NULL,
1226 monitor)) {
1227 log_error("Failed to %smonitor %s",
1228 monitor ? "" : "un",
1229 seg_lv(seg, s)->name);
1230 r = 0;
1231 }
1232 }
1233
1234 if (!seg_monitored(seg) || (seg->status & PVMOVE))
1235 continue;
1236
1237 monitor_fn = NULL;
1238
1239 /* Check monitoring status */
1240 if (seg->segtype->ops->target_monitored)
1241 monitored = seg->segtype->ops->target_monitored(seg, &pending);
1242 else
1243 continue; /* segtype doesn't support registration */
1244
1245 /*
1246 * FIXME: We should really try again if pending
1247 */
1248 monitored = (pending) ? 0 : monitored;
1249
1250 if (monitor) {
1251 if (monitored)
1252 log_verbose("%s/%s already monitored.", lv->vg->name, lv->name);
1253 else if (seg->segtype->ops->target_monitor_events)
1254 monitor_fn = seg->segtype->ops->target_monitor_events;
1255 } else {
1256 if (!monitored)
1257 log_verbose("%s/%s already not monitored.", lv->vg->name, lv->name);
1258 else if (seg->segtype->ops->target_unmonitor_events)
1259 monitor_fn = seg->segtype->ops->target_unmonitor_events;
1260 }
1261
1262 /* Do [un]monitor */
1263 if (!monitor_fn)
1264 continue;
1265
1266 log_verbose("%sonitoring %s/%s%s", monitor ? "M" : "Not m", lv->vg->name, lv->name,
1267 test_mode() ? " [Test mode: skipping this]" : "");
1268
1269 /* FIXME Test mode should really continue a bit further. */
1270 if (test_mode())
1271 continue;
1272
1273 /* FIXME specify events */
1274 if (!monitor_fn(seg, 0)) {
1275 log_error("%s/%s: %s segment monitoring function failed.",
1276 lv->vg->name, lv->name, seg->segtype->name);
1277 return 0;
1278 }
1279
1280 /* Check [un]monitor results */
1281 /* Try a couple times if pending, but not forever... */
1282 for (i = 0; i < 10; i++) {
1283 pending = 0;
1284 monitored = seg->segtype->ops->target_monitored(seg, &pending);
1285 if (pending ||
1286 (!monitored && monitor) ||
1287 (monitored && !monitor))
1288 log_very_verbose("%s/%s %smonitoring still pending: waiting...",
1289 lv->vg->name, lv->name, monitor ? "" : "un");
1290 else
1291 break;
1292 sleep(1);
1293 }
1294
1295 if (r)
1296 r = (monitored && monitor) || (!monitored && !monitor);
1297 }
1298
1299 return r;
1300 #else
1301 return 1;
1302 #endif
1303 }
1304
1305 struct detached_lv_data {
1306 struct logical_volume *lv_pre;
1307 struct lv_activate_opts *laopts;
1308 int *flush_required;
1309 };
1310
1311 static int _preload_detached_lv(struct cmd_context *cmd, struct logical_volume *lv, void *data)
1312 {
1313 struct detached_lv_data *detached = data;
1314 struct lv_list *lvl_pre;
1315
1316 if ((lvl_pre = find_lv_in_vg(detached->lv_pre->vg, lv->name))) {
1317 if (lv_is_visible(lvl_pre->lv) && lv_is_active(lv) && (!lv_is_cow(lv) || !lv_is_cow(lvl_pre->lv)) &&
1318 !_lv_preload(lvl_pre->lv, detached->laopts, detached->flush_required))
1319 return_0;
1320 }
1321
1322 return 1;
1323 }
1324
1325 static int _lv_suspend(struct cmd_context *cmd, const char *lvid_s,
1326 struct lv_activate_opts *laopts, int error_if_not_suspended)
1327 {
1328 struct logical_volume *lv = NULL, *lv_pre = NULL, *pvmove_lv = NULL;
1329 struct lv_list *lvl_pre;
1330 struct seg_list *sl;
1331 struct lv_segment *snap_seg;
1332 struct lvinfo info;
1333 int r = 0, lockfs = 0, flush_required = 0;
1334 struct detached_lv_data detached;
1335
1336 if (!activation())
1337 return 1;
1338
1339 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1340 goto_out;
1341
1342 /* Use precommitted metadata if present */
1343 if (!(lv_pre = lv_from_lvid(cmd, lvid_s, 1)))
1344 goto_out;
1345
1346 /* Ignore origin_only unless LV is origin in both old and new metadata */
1347 if (!lv_is_thin_volume(lv) && !(lv_is_origin(lv) && lv_is_origin(lv_pre)))
1348 laopts->origin_only = 0;
1349
1350 if (test_mode()) {
1351 _skip("Suspending %s%s.", lv->name, laopts->origin_only ? " origin without snapshots" : "");
1352 r = 1;
1353 goto out;
1354 }
1355
1356 if (!lv_info(cmd, lv, laopts->origin_only, &info, 0, 0))
1357 goto_out;
1358
1359 if (!info.exists || info.suspended) {
1360 if (!error_if_not_suspended) {
1361 r = 1;
1362 if (info.suspended)
1363 critical_section_inc(cmd, "already suspended");
1364 }
1365 goto out;
1366 }
1367
1368 if (!lv_read_replicator_vgs(lv))
1369 goto_out;
1370
1371 lv_calculate_readahead(lv, NULL);
1372
1373 /*
1374 * Preload devices for the LV.
1375 * If the PVMOVE LV is being removed, it's only present in the old
1376 * metadata and not the new, so we must explicitly add the new
1377 * tables for all the changed LVs here, as the relationships
1378 * are not found by walking the new metadata.
1379 */
1380 if (!(lv_pre->status & LOCKED) &&
1381 (lv->status & LOCKED) &&
1382 (pvmove_lv = find_pvmove_lv_in_lv(lv))) {
1383 /* Preload all the LVs above the PVMOVE LV */
1384 dm_list_iterate_items(sl, &pvmove_lv->segs_using_this_lv) {
1385 if (!(lvl_pre = find_lv_in_vg(lv_pre->vg, sl->seg->lv->name))) {
1386 log_error(INTERNAL_ERROR "LV %s missing from preload metadata", sl->seg->lv->name);
1387 goto out;
1388 }
1389 if (!_lv_preload(lvl_pre->lv, laopts, &flush_required))
1390 goto_out;
1391 }
1392 /* Now preload the PVMOVE LV itself */
1393 if (!(lvl_pre = find_lv_in_vg(lv_pre->vg, pvmove_lv->name))) {
1394 log_error(INTERNAL_ERROR "LV %s missing from preload metadata", pvmove_lv->name);
1395 goto out;
1396 }
1397 if (!_lv_preload(lvl_pre->lv, laopts, &flush_required))
1398 goto_out;
1399 } else {
1400 if (!_lv_preload(lv_pre, laopts, &flush_required))
1401 /* FIXME Revert preloading */
1402 goto_out;
1403
1404 /*
1405 * Search for existing LVs that have become detached and preload them.
1406 */
1407 detached.lv_pre = lv_pre;
1408 detached.laopts = laopts;
1409 detached.flush_required = &flush_required;
1410
1411 if (!for_each_sub_lv(cmd, lv, &_preload_detached_lv, &detached))
1412 goto_out;
1413
1414 /*
1415 * Preload any snapshots that are being removed.
1416 */
1417 if (!laopts->origin_only && lv_is_origin(lv)) {
1418 dm_list_iterate_items_gen(snap_seg, &lv->snapshot_segs, origin_list) {
1419 if (!(lvl_pre = find_lv_in_vg_by_lvid(lv_pre->vg, &snap_seg->cow->lvid))) {
1420 log_error(INTERNAL_ERROR "LV %s (%s) missing from preload metadata",
1421 snap_seg->cow->name, snap_seg->cow->lvid.id[1].uuid);
1422 goto out;
1423 }
1424 if (!lv_is_cow(lvl_pre->lv) &&
1425 !_lv_preload(lvl_pre->lv, laopts, &flush_required))
1426 goto_out;
1427 }
1428 }
1429 }
1430
1431 if (!monitor_dev_for_events(cmd, lv, laopts, 0))
1432 /* FIXME Consider aborting here */
1433 stack;
1434
1435 critical_section_inc(cmd, "suspending");
1436 if (pvmove_lv)
1437 critical_section_inc(cmd, "suspending pvmove LV");
1438
1439 if (!laopts->origin_only &&
1440 (lv_is_origin(lv_pre) || lv_is_cow(lv_pre)))
1441 lockfs = 1;
1442
1443 /*
1444 * Suspending an LV directly above a PVMOVE LV also
1445 * suspends other LVs using that same PVMOVE LV.
1446 * FIXME Remove this and delay the 'clear node' until
1447 * after the code knows whether there's a different
1448 * inactive table to load or not instead so lv_suspend
1449 * can be called separately for each LV safely.
1450 */
1451 if ((lv_pre->vg->status & PRECOMMITTED) &&
1452 (lv_pre->status & LOCKED) && find_pvmove_lv_in_lv(lv_pre)) {
1453 if (!_lv_suspend_lv(lv_pre, laopts, lockfs, flush_required)) {
1454 critical_section_dec(cmd, "failed precommitted suspend");
1455 if (pvmove_lv)
1456 critical_section_dec(cmd, "failed precommitted suspend (pvmove)");
1457 goto_out;
1458 }
1459 } else {
1460 /* Normal suspend */
1461 if (!_lv_suspend_lv(lv, laopts, lockfs, flush_required)) {
1462 critical_section_dec(cmd, "failed suspend");
1463 if (pvmove_lv)
1464 critical_section_dec(cmd, "failed suspend (pvmove)");
1465 goto_out;
1466 }
1467 }
1468
1469 r = 1;
1470 out:
1471 if (lv_pre)
1472 release_vg(lv_pre->vg);
1473 if (lv) {
1474 lv_release_replicator_vgs(lv);
1475 release_vg(lv->vg);
1476 }
1477
1478 return r;
1479 }
1480
1481 /*
1482 * In a cluster, set exclusive to indicate that only one node is using the
1483 * device. Any preloaded tables may then use non-clustered targets.
1484 *
1485 * Returns success if the device is not active
1486 */
1487 int lv_suspend_if_active(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only, unsigned exclusive)
1488 {
1489 struct lv_activate_opts laopts = {
1490 .origin_only = origin_only,
1491 .exclusive = exclusive
1492 };
1493
1494 return _lv_suspend(cmd, lvid_s, &laopts, 0);
1495 }
1496
1497 /* No longer used */
1498 /***********
1499 int lv_suspend(struct cmd_context *cmd, const char *lvid_s)
1500 {
1501 return _lv_suspend(cmd, lvid_s, 1);
1502 }
1503 ***********/
1504
1505 static int _lv_resume(struct cmd_context *cmd, const char *lvid_s,
1506 struct lv_activate_opts *laopts, int error_if_not_active)
1507 {
1508 struct logical_volume *lv;
1509 struct lvinfo info;
1510 int r = 0;
1511 int messages_only = 0;
1512
1513 if (!activation())
1514 return 1;
1515
1516 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1517 goto_out;
1518
1519 if (lv_is_thin_pool(lv) && laopts->origin_only)
1520 messages_only = 1;
1521
1522 if (!lv_is_origin(lv))
1523 laopts->origin_only = 0;
1524
1525 if (test_mode()) {
1526 _skip("Resuming %s%s%s.", lv->name, laopts->origin_only ? " without snapshots" : "",
1527 laopts->revert ? " (reverting)" : "");
1528 r = 1;
1529 goto out;
1530 }
1531
1532 log_debug("Resuming LV %s/%s%s%s%s.", lv->vg->name, lv->name,
1533 error_if_not_active ? "" : " if active",
1534 laopts->origin_only ? " without snapshots" : "",
1535 laopts->revert ? " (reverting)" : "");
1536
1537 if (!lv_info(cmd, lv, laopts->origin_only, &info, 0, 0))
1538 goto_out;
1539
1540 if (!info.exists || !(info.suspended || messages_only)) {
1541 if (error_if_not_active)
1542 goto_out;
1543 r = 1;
1544 if (!info.suspended)
1545 critical_section_dec(cmd, "already resumed");
1546 goto out;
1547 }
1548
1549 laopts->read_only = _passes_readonly_filter(cmd, lv);
1550
1551 if (!_lv_activate_lv(lv, laopts))
1552 goto_out;
1553
1554 critical_section_dec(cmd, "resumed");
1555
1556 if (!monitor_dev_for_events(cmd, lv, laopts, 1))
1557 stack;
1558
1559 r = 1;
1560 out:
1561 if (lv)
1562 release_vg(lv->vg);
1563
1564 return r;
1565 }
1566
1567 /*
1568 * In a cluster, set exclusive to indicate that only one node is using the
1569 * device. Any tables loaded may then use non-clustered targets.
1570 *
1571 * @origin_only
1572 * @exclusive This parameter only has an affect in cluster-context.
1573 * It forces local target type to be used (instead of
1574 * cluster-aware type).
1575 * Returns success if the device is not active
1576 */
1577 int lv_resume_if_active(struct cmd_context *cmd, const char *lvid_s,
1578 unsigned origin_only, unsigned exclusive,
1579 unsigned revert)
1580 {
1581 struct lv_activate_opts laopts = {
1582 .origin_only = origin_only,
1583 .exclusive = exclusive,
1584 .revert = revert
1585 };
1586
1587 return _lv_resume(cmd, lvid_s, &laopts, 0);
1588 }
1589
1590 int lv_resume(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only)
1591 {
1592 struct lv_activate_opts laopts = { .origin_only = origin_only, };
1593
1594 return _lv_resume(cmd, lvid_s, &laopts, 1);
1595 }
1596
1597 static int _lv_has_open_snapshots(struct logical_volume *lv)
1598 {
1599 struct lv_segment *snap_seg;
1600 struct lvinfo info;
1601 int r = 0;
1602
1603 dm_list_iterate_items_gen(snap_seg, &lv->snapshot_segs, origin_list) {
1604 if (!lv_info(lv->vg->cmd, snap_seg->cow, 0, &info, 1, 0)) {
1605 r = 1;
1606 continue;
1607 }
1608
1609 if (info.exists && info.open_count) {
1610 log_error("LV %s/%s has open snapshot %s: "
1611 "not deactivating", lv->vg->name, lv->name,
1612 snap_seg->cow->name);
1613 r = 1;
1614 }
1615 }
1616
1617 return r;
1618 }
1619
1620 int lv_deactivate(struct cmd_context *cmd, const char *lvid_s)
1621 {
1622 struct logical_volume *lv;
1623 struct lvinfo info;
1624 int r = 0;
1625
1626 if (!activation())
1627 return 1;
1628
1629 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1630 goto out;
1631
1632 if (test_mode()) {
1633 _skip("Deactivating '%s'.", lv->name);
1634 r = 1;
1635 goto out;
1636 }
1637
1638 log_debug("Deactivating %s/%s.", lv->vg->name, lv->name);
1639
1640 if (!lv_info(cmd, lv, 0, &info, 1, 0))
1641 goto_out;
1642
1643 if (!info.exists) {
1644 r = 1;
1645 goto out;
1646 }
1647
1648 if (lv_is_visible(lv)) {
1649 if (!lv_check_not_in_use(cmd, lv, &info))
1650 goto_out;
1651
1652 if (lv_is_origin(lv) && _lv_has_open_snapshots(lv))
1653 goto_out;
1654 }
1655
1656 if (!lv_read_replicator_vgs(lv))
1657 goto_out;
1658
1659 lv_calculate_readahead(lv, NULL);
1660
1661 if (!monitor_dev_for_events(cmd, lv, NULL, 0))
1662 stack;
1663
1664 critical_section_inc(cmd, "deactivating");
1665 r = _lv_deactivate(lv);
1666 critical_section_dec(cmd, "deactivated");
1667
1668 if (!lv_info(cmd, lv, 0, &info, 0, 0) || info.exists)
1669 r = 0;
1670 out:
1671 if (lv) {
1672 lv_release_replicator_vgs(lv);
1673 release_vg(lv->vg);
1674 }
1675
1676 return r;
1677 }
1678
1679 /* Test if LV passes filter */
1680 int lv_activation_filter(struct cmd_context *cmd, const char *lvid_s,
1681 int *activate_lv)
1682 {
1683 struct logical_volume *lv;
1684 int r = 0;
1685
1686 if (!activation()) {
1687 *activate_lv = 1;
1688 return 1;
1689 }
1690
1691 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1692 goto out;
1693
1694 if (!_passes_activation_filter(cmd, lv)) {
1695 log_verbose("Not activating %s/%s since it does not pass "
1696 "activation filter.", lv->vg->name, lv->name);
1697 *activate_lv = 0;
1698 } else
1699 *activate_lv = 1;
1700 r = 1;
1701 out:
1702 if (lv)
1703 release_vg(lv->vg);
1704
1705 return r;
1706 }
1707
1708 static int _lv_activate(struct cmd_context *cmd, const char *lvid_s,
1709 struct lv_activate_opts *laopts, int filter)
1710 {
1711 struct logical_volume *lv;
1712 struct lvinfo info;
1713 int r = 0;
1714
1715 if (!activation())
1716 return 1;
1717
1718 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1719 goto out;
1720
1721 if (filter && !_passes_activation_filter(cmd, lv)) {
1722 log_error("Not activating %s/%s since it does not pass "
1723 "activation filter.", lv->vg->name, lv->name);
1724 goto out;
1725 }
1726
1727 if ((!lv->vg->cmd->partial_activation) && (lv->status & PARTIAL_LV)) {
1728 log_error("Refusing activation of partial LV %s. Use --partial to override.",
1729 lv->name);
1730 goto_out;
1731 }
1732
1733 if (lv_has_unknown_segments(lv)) {
1734 log_error("Refusing activation of LV %s containing "
1735 "an unrecognised segment.", lv->name);
1736 goto_out;
1737 }
1738
1739 if (test_mode()) {
1740 _skip("Activating '%s'.", lv->name);
1741 r = 1;
1742 goto out;
1743 }
1744
1745 if (filter)
1746 laopts->read_only = _passes_readonly_filter(cmd, lv);
1747
1748 log_debug("Activating %s/%s%s%s.", lv->vg->name, lv->name,
1749 laopts->exclusive ? " exclusively" : "",
1750 laopts->read_only ? " read-only" : "");
1751
1752 if (!lv_info(cmd, lv, 0, &info, 0, 0))
1753 goto_out;
1754
1755 /*
1756 * Nothing to do?
1757 */
1758 if (info.exists && !info.suspended && info.live_table &&
1759 (info.read_only == read_only_lv(lv, laopts))) {
1760 r = 1;
1761 goto out;
1762 }
1763
1764 if (!lv_read_replicator_vgs(lv))
1765 goto_out;
1766
1767 lv_calculate_readahead(lv, NULL);
1768
1769 critical_section_inc(cmd, "activating");
1770 if (!(r = _lv_activate_lv(lv, laopts)))
1771 stack;
1772 critical_section_dec(cmd, "activated");
1773
1774 if (r && !monitor_dev_for_events(cmd, lv, laopts, 1))
1775 stack;
1776
1777 out:
1778 if (lv) {
1779 lv_release_replicator_vgs(lv);
1780 release_vg(lv->vg);
1781 }
1782
1783 return r;
1784 }
1785
1786 /* Activate LV */
1787 int lv_activate(struct cmd_context *cmd, const char *lvid_s, int exclusive)
1788 {
1789 struct lv_activate_opts laopts = { .exclusive = exclusive };
1790
1791 if (!_lv_activate(cmd, lvid_s, &laopts, 0))
1792 return_0;
1793
1794 return 1;
1795 }
1796
1797 /* Activate LV only if it passes filter */
1798 int lv_activate_with_filter(struct cmd_context *cmd, const char *lvid_s, int exclusive)
1799 {
1800 struct lv_activate_opts laopts = { .exclusive = exclusive };
1801
1802 if (!_lv_activate(cmd, lvid_s, &laopts, 1))
1803 return_0;
1804
1805 return 1;
1806 }
1807
1808 int lv_mknodes(struct cmd_context *cmd, const struct logical_volume *lv)
1809 {
1810 int r = 1;
1811
1812 if (!lv) {
1813 r = dm_mknodes(NULL);
1814 fs_unlock();
1815 return r;
1816 }
1817
1818 if (!activation())
1819 return 1;
1820
1821 r = dev_manager_mknodes(lv);
1822
1823 fs_unlock();
1824
1825 return r;
1826 }
1827
1828 /*
1829 * Does PV use VG somewhere in its construction?
1830 * Returns 1 on failure.
1831 */
1832 int pv_uses_vg(struct physical_volume *pv,
1833 struct volume_group *vg)
1834 {
1835 if (!activation())
1836 return 0;
1837
1838 if (!dm_is_dm_major(MAJOR(pv->dev->dev)))
1839 return 0;
1840
1841 return dev_manager_device_uses_vg(pv->dev, vg);
1842 }
1843
1844 void activation_release(void)
1845 {
1846 dev_manager_release();
1847 }
1848
1849 void activation_exit(void)
1850 {
1851 dev_manager_exit();
1852 }
1853 #endif
This page took 0.114583 seconds and 6 git commands to generate.