]> sourceware.org Git - lvm2.git/blob - lib/activate/activate.c
Rename origin_only to more generic use_layer flag
[lvm2.git] / lib / activate / activate.c
1 /*
2 * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
3 * Copyright (C) 2004-2012 Red Hat, Inc. All rights reserved.
4 *
5 * This file is part of LVM2.
6 *
7 * This copyrighted material is made available to anyone wishing to use,
8 * modify, copy, or redistribute it subject to the terms and conditions
9 * of the GNU Lesser General Public License v.2.1.
10 *
11 * You should have received a copy of the GNU Lesser General Public License
12 * along with this program; if not, write to the Free Software Foundation,
13 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 */
15
16 #include "lib.h"
17 #include "metadata.h"
18 #include "activate.h"
19 #include "memlock.h"
20 #include "display.h"
21 #include "fs.h"
22 #include "lvm-exec.h"
23 #include "lvm-file.h"
24 #include "lvm-string.h"
25 #include "toolcontext.h"
26 #include "dev_manager.h"
27 #include "str_list.h"
28 #include "config.h"
29 #include "filter.h"
30 #include "segtype.h"
31 #include "sharedlib.h"
32
33 #include <limits.h>
34 #include <fcntl.h>
35 #include <unistd.h>
36
37 #define _skip(fmt, args...) log_very_verbose("Skipping: " fmt , ## args)
38
39 int lvm1_present(struct cmd_context *cmd)
40 {
41 static char path[PATH_MAX];
42
43 if (dm_snprintf(path, sizeof(path), "%s/lvm/global", cmd->proc_dir)
44 < 0) {
45 log_error("LVM1 proc global snprintf failed");
46 return 0;
47 }
48
49 if (path_exists(path))
50 return 1;
51 else
52 return 0;
53 }
54
55 int list_segment_modules(struct dm_pool *mem, const struct lv_segment *seg,
56 struct dm_list *modules)
57 {
58 unsigned int s;
59 struct lv_segment *seg2, *snap_seg;
60 struct dm_list *snh;
61
62 if (seg->segtype->ops->modules_needed &&
63 !seg->segtype->ops->modules_needed(mem, seg, modules)) {
64 log_error("module string allocation failed");
65 return 0;
66 }
67
68 if (lv_is_origin(seg->lv))
69 dm_list_iterate(snh, &seg->lv->snapshot_segs)
70 if (!list_lv_modules(mem,
71 dm_list_struct_base(snh,
72 struct lv_segment,
73 origin_list)->cow,
74 modules))
75 return_0;
76
77 if (lv_is_cow(seg->lv)) {
78 snap_seg = find_cow(seg->lv);
79 if (snap_seg->segtype->ops->modules_needed &&
80 !snap_seg->segtype->ops->modules_needed(mem, snap_seg,
81 modules)) {
82 log_error("snap_seg module string allocation failed");
83 return 0;
84 }
85 }
86
87 for (s = 0; s < seg->area_count; s++) {
88 switch (seg_type(seg, s)) {
89 case AREA_LV:
90 seg2 = find_seg_by_le(seg_lv(seg, s), seg_le(seg, s));
91 if (seg2 && !list_segment_modules(mem, seg2, modules))
92 return_0;
93 break;
94 case AREA_PV:
95 case AREA_UNASSIGNED:
96 ;
97 }
98 }
99
100 return 1;
101 }
102
103 int list_lv_modules(struct dm_pool *mem, const struct logical_volume *lv,
104 struct dm_list *modules)
105 {
106 struct lv_segment *seg;
107
108 dm_list_iterate_items(seg, &lv->segments)
109 if (!list_segment_modules(mem, seg, modules))
110 return_0;
111
112 return 1;
113 }
114
115 #ifndef DEVMAPPER_SUPPORT
116 void set_activation(int act)
117 {
118 static int warned = 0;
119
120 if (warned || !act)
121 return;
122
123 log_error("Compiled without libdevmapper support. "
124 "Can't enable activation.");
125
126 warned = 1;
127 }
128 int activation(void)
129 {
130 return 0;
131 }
132 int library_version(char *version, size_t size)
133 {
134 return 0;
135 }
136 int driver_version(char *version, size_t size)
137 {
138 return 0;
139 }
140 int target_version(const char *target_name, uint32_t *maj,
141 uint32_t *min, uint32_t *patchlevel)
142 {
143 return 0;
144 }
145 int target_present(struct cmd_context *cmd, const char *target_name,
146 int use_modprobe)
147 {
148 return 0;
149 }
150 int lvm_dm_prefix_check(const char *sysfs_dir, int major, int minor, const char *prefix)
151 {
152 return 0;
153 }
154 int lv_info(struct cmd_context *cmd, const struct logical_volume *lv, int use_layer,
155 struct lvinfo *info, int with_open_count, int with_read_ahead)
156 {
157 return 0;
158 }
159 int lv_info_by_lvid(struct cmd_context *cmd, const char *lvid_s, int use_layer,
160 struct lvinfo *info, int with_open_count, int with_read_ahead)
161 {
162 return 0;
163 }
164 int lv_snapshot_percent(const struct logical_volume *lv, percent_t *percent)
165 {
166 return 0;
167 }
168 int lv_mirror_percent(struct cmd_context *cmd, const struct logical_volume *lv,
169 int wait, percent_t *percent, uint32_t *event_nr)
170 {
171 return 0;
172 }
173 int lvs_in_vg_activated(struct volume_group *vg)
174 {
175 return 0;
176 }
177 int lvs_in_vg_opened(const struct volume_group *vg)
178 {
179 return 0;
180 }
181 /******
182 int lv_suspend(struct cmd_context *cmd, const char *lvid_s)
183 {
184 return 1;
185 }
186 *******/
187 int lv_suspend_if_active(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only)
188 {
189 return 1;
190 }
191 int lv_resume(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only)
192 {
193 return 1;
194 }
195 int lv_resume_if_active(struct cmd_context *cmd, const char *lvid_s,
196 unsigned origin_only, unsigned exclusive, unsigned revert)
197 {
198 return 1;
199 }
200 int lv_deactivate(struct cmd_context *cmd, const char *lvid_s)
201 {
202 return 1;
203 }
204 int lv_activation_filter(struct cmd_context *cmd, const char *lvid_s,
205 int *activate_lv)
206 {
207 return 1;
208 }
209 int lv_activate(struct cmd_context *cmd, const char *lvid_s, int exclusive)
210 {
211 return 1;
212 }
213 int lv_activate_with_filter(struct cmd_context *cmd, const char *lvid_s, int exclusive)
214 {
215 return 1;
216 }
217 int lv_mknodes(struct cmd_context *cmd, const struct logical_volume *lv)
218 {
219 return 1;
220 }
221 int lv_send_message(const struct logical_volume *lv, const char *message)
222 {
223 return 0;
224 }
225 int pv_uses_vg(struct physical_volume *pv,
226 struct volume_group *vg)
227 {
228 return 0;
229 }
230 void activation_release(void)
231 {
232 }
233 void activation_exit(void)
234 {
235 }
236
237 int lv_is_active(struct logical_volume *lv)
238 {
239 return 0;
240 }
241 int lv_is_active_but_not_locally(struct logical_volume *lv)
242 {
243 return 0;
244 }
245 int lv_is_active_exclusive(struct logical_volume *lv)
246 {
247 return 0;
248 }
249 int lv_is_active_exclusive_locally(struct logical_volume *lv)
250 {
251 return 0;
252 }
253 int lv_is_active_exclusive_remotely(struct logical_volume *lv)
254 {
255 return 0;
256 }
257
258 int lv_check_transient(struct logical_volume *lv)
259 {
260 return 1;
261 }
262 int monitor_dev_for_events(struct cmd_context *cmd, struct logical_volume *lv,
263 struct lv_activate_opts *laopts, int monitor)
264 {
265 return 1;
266 }
267 #else /* DEVMAPPER_SUPPORT */
268
269 static int _activation = 1;
270
271 void set_activation(int act)
272 {
273 if (act == _activation)
274 return;
275
276 _activation = act;
277 if (_activation)
278 log_verbose("Activation enabled. Device-mapper kernel "
279 "driver will be used.");
280 else
281 log_warn("WARNING: Activation disabled. No device-mapper "
282 "interaction will be attempted.");
283 }
284
285 int activation(void)
286 {
287 return _activation;
288 }
289
290 static int _passes_volumes_filter(struct cmd_context *cmd,
291 struct logical_volume *lv,
292 const struct dm_config_node *cn,
293 const char *config_path)
294 {
295 const struct dm_config_value *cv;
296 const char *str;
297 static char path[PATH_MAX];
298
299 log_verbose("%s configuration setting defined: "
300 "Checking the list to match %s/%s",
301 config_path, lv->vg->name, lv->name);
302
303 for (cv = cn->v; cv; cv = cv->next) {
304 if (cv->type != DM_CFG_STRING) {
305 log_error("Ignoring invalid string in config file %s",
306 config_path);
307 continue;
308 }
309 str = cv->v.str;
310 if (!*str) {
311 log_error("Ignoring empty string in config file %s",
312 config_path);
313 continue;
314 }
315
316
317 /* Tag? */
318 if (*str == '@') {
319 str++;
320 if (!*str) {
321 log_error("Ignoring empty tag in config file "
322 "%s", config_path);
323 continue;
324 }
325 /* If any host tag matches any LV or VG tag, activate */
326 if (!strcmp(str, "*")) {
327 if (str_list_match_list(&cmd->tags, &lv->tags, NULL)
328 || str_list_match_list(&cmd->tags,
329 &lv->vg->tags, NULL))
330 return 1;
331 else
332 continue;
333 }
334 /* If supplied tag matches LV or VG tag, activate */
335 if (str_list_match_item(&lv->tags, str) ||
336 str_list_match_item(&lv->vg->tags, str))
337 return 1;
338 else
339 continue;
340 }
341 if (!strchr(str, '/')) {
342 /* vgname supplied */
343 if (!strcmp(str, lv->vg->name))
344 return 1;
345 else
346 continue;
347 }
348 /* vgname/lvname */
349 if (dm_snprintf(path, sizeof(path), "%s/%s", lv->vg->name,
350 lv->name) < 0) {
351 log_error("dm_snprintf error from %s/%s", lv->vg->name,
352 lv->name);
353 continue;
354 }
355 if (!strcmp(path, str))
356 return 1;
357 }
358
359 log_verbose("No item supplied in %s configuration setting "
360 "matches %s/%s", config_path, lv->vg->name, lv->name);
361
362 return 0;
363 }
364
365 static int _passes_activation_filter(struct cmd_context *cmd,
366 struct logical_volume *lv)
367 {
368 const struct dm_config_node *cn;
369
370 if (!(cn = find_config_tree_node(cmd, "activation/volume_list"))) {
371 log_verbose("activation/volume_list configuration setting "
372 "not defined: Checking only host tags for %s/%s",
373 lv->vg->name, lv->name);
374
375 /* If no host tags defined, activate */
376 if (dm_list_empty(&cmd->tags))
377 return 1;
378
379 /* If any host tag matches any LV or VG tag, activate */
380 if (str_list_match_list(&cmd->tags, &lv->tags, NULL) ||
381 str_list_match_list(&cmd->tags, &lv->vg->tags, NULL))
382 return 1;
383
384 log_verbose("No host tag matches %s/%s",
385 lv->vg->name, lv->name);
386
387 /* Don't activate */
388 return 0;
389 }
390
391 return _passes_volumes_filter(cmd, lv, cn, "activation/volume_list");
392 }
393
394 static int _passes_readonly_filter(struct cmd_context *cmd,
395 struct logical_volume *lv)
396 {
397 const struct dm_config_node *cn;
398
399 if (!(cn = find_config_tree_node(cmd, "activation/read_only_volume_list")))
400 return 0;
401
402 return _passes_volumes_filter(cmd, lv, cn, "activation/read_only_volume_list");
403 }
404
405 int library_version(char *version, size_t size)
406 {
407 if (!activation())
408 return 0;
409
410 return dm_get_library_version(version, size);
411 }
412
413 int driver_version(char *version, size_t size)
414 {
415 if (!activation())
416 return 0;
417
418 log_very_verbose("Getting driver version");
419
420 return dm_driver_version(version, size);
421 }
422
423 int target_version(const char *target_name, uint32_t *maj,
424 uint32_t *min, uint32_t *patchlevel)
425 {
426 int r = 0;
427 struct dm_task *dmt;
428 struct dm_versions *target, *last_target;
429
430 log_very_verbose("Getting target version for %s", target_name);
431 if (!(dmt = dm_task_create(DM_DEVICE_LIST_VERSIONS)))
432 return_0;
433
434 if (activation_checks() && !dm_task_enable_checks(dmt))
435 goto_out;
436
437 if (!dm_task_run(dmt)) {
438 log_debug("Failed to get %s target version", target_name);
439 /* Assume this was because LIST_VERSIONS isn't supported */
440 return 1;
441 }
442
443 target = dm_task_get_versions(dmt);
444
445 do {
446 last_target = target;
447
448 if (!strcmp(target_name, target->name)) {
449 r = 1;
450 *maj = target->version[0];
451 *min = target->version[1];
452 *patchlevel = target->version[2];
453 goto out;
454 }
455
456 target = (struct dm_versions *)((char *) target + target->next);
457 } while (last_target != target);
458
459 out:
460 dm_task_destroy(dmt);
461
462 return r;
463 }
464
465 int lvm_dm_prefix_check(int major, int minor, const char *prefix)
466 {
467 struct dm_task *dmt;
468 const char *uuid;
469 int r;
470
471 if (!(dmt = dm_task_create(DM_DEVICE_STATUS)))
472 return_0;
473
474 if (!dm_task_set_minor(dmt, minor) ||
475 !dm_task_set_major(dmt, major) ||
476 !dm_task_run(dmt) ||
477 !(uuid = dm_task_get_uuid(dmt))) {
478 dm_task_destroy(dmt);
479 return 0;
480 }
481
482 r = strncasecmp(uuid, prefix, strlen(prefix));
483 dm_task_destroy(dmt);
484
485 return r ? 0 : 1;
486 }
487
488 int module_present(struct cmd_context *cmd, const char *target_name)
489 {
490 int ret = 0;
491 #ifdef MODPROBE_CMD
492 char module[128];
493 const char *argv[3];
494
495 if (dm_snprintf(module, sizeof(module), "dm-%s", target_name) < 0) {
496 log_error("module_present module name too long: %s",
497 target_name);
498 return 0;
499 }
500
501 argv[0] = MODPROBE_CMD;
502 argv[1] = module;
503 argv[2] = NULL;
504
505 ret = exec_cmd(cmd, argv, NULL, 0);
506 #endif
507 return ret;
508 }
509
510 int target_present(struct cmd_context *cmd, const char *target_name,
511 int use_modprobe)
512 {
513 uint32_t maj, min, patchlevel;
514
515 if (!activation())
516 return 0;
517
518 #ifdef MODPROBE_CMD
519 if (use_modprobe) {
520 if (target_version(target_name, &maj, &min, &patchlevel))
521 return 1;
522
523 if (!module_present(cmd, target_name))
524 return_0;
525 }
526 #endif
527
528 return target_version(target_name, &maj, &min, &patchlevel);
529 }
530
531 /*
532 * Returns 1 if info structure populated, else 0 on failure.
533 */
534 int lv_info(struct cmd_context *cmd, const struct logical_volume *lv, int use_layer,
535 struct lvinfo *info, int with_open_count, int with_read_ahead)
536 {
537 struct dm_info dminfo;
538 const char *layer;
539
540 if (!activation())
541 return 0;
542 /*
543 * If open_count info is requested and we have to be sure our own udev
544 * transactions are finished
545 * For non-clustered locking type we are only interested for non-delete operation
546 * in progress - as only those could lead to opened files
547 */
548 if (with_open_count) {
549 if (locking_is_clustered())
550 sync_local_dev_names(cmd); /* Wait to have udev in sync */
551 else if (fs_has_non_delete_ops())
552 fs_unlock(); /* For non clustered - wait if there are non-delete ops */
553 }
554
555 if (use_layer && lv_is_thin_pool(lv))
556 layer = "tpool";
557 else if (use_layer && lv_is_origin(lv))
558 layer = "real";
559 else
560 layer = NULL;
561
562 if (!dev_manager_info(lv->vg->cmd->mem, lv, layer, with_open_count,
563 with_read_ahead, &dminfo, &info->read_ahead))
564 return_0;
565
566 info->exists = dminfo.exists;
567 info->suspended = dminfo.suspended;
568 info->open_count = dminfo.open_count;
569 info->major = dminfo.major;
570 info->minor = dminfo.minor;
571 info->read_only = dminfo.read_only;
572 info->live_table = dminfo.live_table;
573 info->inactive_table = dminfo.inactive_table;
574
575 return 1;
576 }
577
578 int lv_info_by_lvid(struct cmd_context *cmd, const char *lvid_s, int use_layer,
579 struct lvinfo *info, int with_open_count, int with_read_ahead)
580 {
581 int r;
582 struct logical_volume *lv;
583
584 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
585 return 0;
586
587 r = lv_info(cmd, lv, use_layer, info, with_open_count, with_read_ahead);
588 release_vg(lv->vg);
589
590 return r;
591 }
592
593 int lv_check_not_in_use(struct cmd_context *cmd __attribute__((unused)),
594 struct logical_volume *lv, struct lvinfo *info)
595 {
596 if (!info->exists)
597 return 1;
598
599 /* If sysfs is not used, use open_count information only. */
600 if (!*dm_sysfs_dir()) {
601 if (info->open_count) {
602 log_error("Logical volume %s/%s in use.",
603 lv->vg->name, lv->name);
604 return 0;
605 }
606
607 return 1;
608 }
609
610 if (dm_device_has_holders(info->major, info->minor)) {
611 log_error("Logical volume %s/%s is used by another device.",
612 lv->vg->name, lv->name);
613 return 0;
614 }
615
616 if (dm_device_has_mounted_fs(info->major, info->minor)) {
617 log_error("Logical volume %s/%s contains a filesystem in use.",
618 lv->vg->name, lv->name);
619 return 0;
620 }
621
622 return 1;
623 }
624
625 /*
626 * Returns 1 if percent set, else 0 on failure.
627 */
628 int lv_check_transient(struct logical_volume *lv)
629 {
630 int r;
631 struct dev_manager *dm;
632
633 if (!activation())
634 return 0;
635
636 log_debug("Checking transient status for LV %s/%s", lv->vg->name, lv->name);
637
638 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
639 return_0;
640
641 if (!(r = dev_manager_transient(dm, lv)))
642 stack;
643
644 dev_manager_destroy(dm);
645
646 return r;
647 }
648
649 /*
650 * Returns 1 if percent set, else 0 on failure.
651 */
652 int lv_snapshot_percent(const struct logical_volume *lv, percent_t *percent)
653 {
654 int r;
655 struct dev_manager *dm;
656
657 if (!activation())
658 return 0;
659
660 log_debug("Checking snapshot percent for LV %s/%s", lv->vg->name, lv->name);
661
662 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
663 return_0;
664
665 if (!(r = dev_manager_snapshot_percent(dm, lv, percent)))
666 stack;
667
668 dev_manager_destroy(dm);
669
670 return r;
671 }
672
673 /* FIXME Merge with snapshot_percent */
674 int lv_mirror_percent(struct cmd_context *cmd, const struct logical_volume *lv,
675 int wait, percent_t *percent, uint32_t *event_nr)
676 {
677 int r;
678 struct dev_manager *dm;
679 struct lvinfo info;
680
681 /* If mirrored LV is temporarily shrinked to 1 area (= linear),
682 * it should be considered in-sync. */
683 if (dm_list_size(&lv->segments) == 1 && first_seg(lv)->area_count == 1) {
684 *percent = PERCENT_100;
685 return 1;
686 }
687
688 if (!activation())
689 return 0;
690
691 log_debug("Checking mirror percent for LV %s/%s", lv->vg->name, lv->name);
692
693 if (!lv_info(cmd, lv, 0, &info, 0, 0))
694 return_0;
695
696 if (!info.exists)
697 return 0;
698
699 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
700 return_0;
701
702 if (!(r = dev_manager_mirror_percent(dm, lv, wait, percent, event_nr)))
703 stack;
704
705 dev_manager_destroy(dm);
706
707 return r;
708 }
709
710 int lv_raid_percent(const struct logical_volume *lv, percent_t *percent)
711 {
712 return lv_mirror_percent(lv->vg->cmd, lv, 0, percent, NULL);
713 }
714
715 /*
716 * Returns data or metadata percent usage, depends on metadata 0/1.
717 * Returns 1 if percent set, else 0 on failure.
718 */
719 int lv_thin_pool_percent(const struct logical_volume *lv, int metadata,
720 percent_t *percent)
721 {
722 int r;
723 struct dev_manager *dm;
724
725 if (!activation())
726 return 0;
727
728 log_debug("Checking thin %sdata percent for LV %s/%s",
729 (metadata) ? "meta" : "", lv->vg->name, lv->name);
730
731 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
732 return_0;
733
734 if (!(r = dev_manager_thin_pool_percent(dm, lv, metadata, percent)))
735 stack;
736
737 dev_manager_destroy(dm);
738
739 return r;
740 }
741
742 /*
743 * Returns 1 if percent set, else 0 on failure.
744 */
745 int lv_thin_percent(const struct logical_volume *lv,
746 int mapped, percent_t *percent)
747 {
748 int r;
749 struct dev_manager *dm;
750
751 if (!activation())
752 return 0;
753
754 log_debug("Checking thin percent for LV %s/%s",
755 lv->vg->name, lv->name);
756
757 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
758 return_0;
759
760 if (!(r = dev_manager_thin_percent(dm, lv, mapped, percent)))
761 stack;
762
763 dev_manager_destroy(dm);
764
765 return r;
766 }
767
768 /*
769 * Returns 1 if transaction_id set, else 0 on failure.
770 */
771 int lv_thin_pool_transaction_id(const struct logical_volume *lv,
772 uint64_t *transaction_id)
773 {
774 int r;
775 struct dev_manager *dm;
776 struct dm_status_thin_pool *status;
777
778 if (!activation())
779 return 0;
780
781 log_debug("Checking thin percent for LV %s/%s",
782 lv->vg->name, lv->name);
783
784 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
785 return_0;
786
787 if (!(r = dev_manager_thin_pool_status(dm, lv, &status)))
788 stack;
789 else
790 *transaction_id = status->transaction_id;
791
792 dev_manager_destroy(dm);
793
794 return r;
795 }
796
797 static int _lv_active(struct cmd_context *cmd, struct logical_volume *lv)
798 {
799 struct lvinfo info;
800
801 if (!lv_info(cmd, lv, 0, &info, 0, 0)) {
802 stack;
803 return -1;
804 }
805
806 return info.exists;
807 }
808
809 static int _lv_open_count(struct cmd_context *cmd, struct logical_volume *lv)
810 {
811 struct lvinfo info;
812
813 if (!lv_info(cmd, lv, 0, &info, 1, 0)) {
814 stack;
815 return -1;
816 }
817
818 return info.open_count;
819 }
820
821 static int _lv_activate_lv(struct logical_volume *lv, struct lv_activate_opts *laopts)
822 {
823 int r;
824 struct dev_manager *dm;
825
826 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, (lv->status & PVMOVE) ? 0 : 1)))
827 return_0;
828
829 if (!(r = dev_manager_activate(dm, lv, laopts)))
830 stack;
831
832 dev_manager_destroy(dm);
833 return r;
834 }
835
836 static int _lv_preload(struct logical_volume *lv, struct lv_activate_opts *laopts,
837 int *flush_required)
838 {
839 int r = 0;
840 struct dev_manager *dm;
841 int old_readonly = laopts->read_only;
842
843 laopts->read_only = _passes_readonly_filter(lv->vg->cmd, lv);
844
845 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, (lv->status & PVMOVE) ? 0 : 1)))
846 goto_out;
847
848 if (!(r = dev_manager_preload(dm, lv, laopts, flush_required)))
849 stack;
850
851 dev_manager_destroy(dm);
852
853 laopts->read_only = old_readonly;
854 out:
855 return r;
856 }
857
858 static int _lv_deactivate(struct logical_volume *lv)
859 {
860 int r;
861 struct dev_manager *dm;
862
863 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
864 return_0;
865
866 if (!(r = dev_manager_deactivate(dm, lv)))
867 stack;
868
869 dev_manager_destroy(dm);
870 return r;
871 }
872
873 static int _lv_suspend_lv(struct logical_volume *lv, struct lv_activate_opts *laopts,
874 int lockfs, int flush_required)
875 {
876 int r;
877 struct dev_manager *dm;
878
879 laopts->read_only = _passes_readonly_filter(lv->vg->cmd, lv);
880
881 /*
882 * When we are asked to manipulate (normally suspend/resume) the PVMOVE
883 * device directly, we don't want to touch the devices that use it.
884 */
885 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, (lv->status & PVMOVE) ? 0 : 1)))
886 return_0;
887
888 if (!(r = dev_manager_suspend(dm, lv, laopts, lockfs, flush_required)))
889 stack;
890
891 dev_manager_destroy(dm);
892 return r;
893 }
894
895 /*
896 * These two functions return the number of visible LVs in the state,
897 * or -1 on error. FIXME Check this.
898 */
899 int lvs_in_vg_activated(struct volume_group *vg)
900 {
901 struct lv_list *lvl;
902 int count = 0;
903
904 if (!activation())
905 return 0;
906
907 dm_list_iterate_items(lvl, &vg->lvs)
908 if (lv_is_visible(lvl->lv))
909 count += (_lv_active(vg->cmd, lvl->lv) == 1);
910
911 log_debug("Counted %d active LVs in VG %s", count, vg->name);
912
913 return count;
914 }
915
916 int lvs_in_vg_opened(const struct volume_group *vg)
917 {
918 const struct lv_list *lvl;
919 int count = 0;
920
921 if (!activation())
922 return 0;
923
924 dm_list_iterate_items(lvl, &vg->lvs)
925 if (lv_is_visible(lvl->lv))
926 count += (_lv_open_count(vg->cmd, lvl->lv) > 0);
927
928 log_debug("Counted %d open LVs in VG %s", count, vg->name);
929
930 return count;
931 }
932
933 /*
934 * _lv_is_active
935 * @lv: logical volume being queried
936 * @locally: set if active locally (when provided)
937 * @exclusive: set if active exclusively (when provided)
938 *
939 * Determine whether an LV is active locally or in a cluster.
940 * In addition to the return code which indicates whether or
941 * not the LV is active somewhere, two other values are set
942 * to yield more information about the status of the activation:
943 * return locally exclusively status
944 * ====== ======= =========== ======
945 * 0 0 0 not active
946 * 1 0 0 active remotely
947 * 1 0 1 exclusive remotely
948 * 1 1 0 active locally and possibly remotely
949 * 1 1 1 exclusive locally (or local && !cluster)
950 * The VG lock must be held to call this function.
951 *
952 * Returns: 0 or 1
953 */
954 static int _lv_is_active(struct logical_volume *lv,
955 int *locally, int *exclusive)
956 {
957 int r, l, e; /* remote, local, and exclusive */
958
959 r = l = e = 0;
960
961 if (_lv_active(lv->vg->cmd, lv))
962 l = 1;
963
964 if (!vg_is_clustered(lv->vg)) {
965 if (l)
966 e = 1; /* exclusive by definition */
967 goto out;
968 }
969
970 /* Active locally, and the caller doesn't care about exclusive */
971 if (l && !exclusive)
972 goto out;
973
974 if ((r = remote_lock_held(lv->lvid.s, &e)) >= 0)
975 goto out;
976
977 /*
978 * If lock query is not supported (due to interfacing with old
979 * code), then we cannot evaluate exclusivity properly.
980 *
981 * Old users of this function will never be affected by this,
982 * since they are only concerned about active vs. not active.
983 * New users of this function who specifically ask for 'exclusive'
984 * will be given an error message.
985 */
986 log_error("Unable to determine exclusivity of %s", lv->name);
987
988 e = 0;
989
990 /*
991 * We used to attempt activate_lv_excl_local(lv->vg->cmd, lv) here,
992 * but it's unreliable.
993 */
994
995 out:
996 if (locally)
997 *locally = l;
998 if (exclusive)
999 *exclusive = e;
1000
1001 log_very_verbose("%s/%s is %sactive%s%s",
1002 lv->vg->name, lv->name,
1003 (r || l) ? "" : "not ",
1004 (exclusive && e) ? " exclusive" : "",
1005 e ? (l ? " locally" : " remotely") : "");
1006
1007 return r || l;
1008 }
1009
1010 int lv_is_active(struct logical_volume *lv)
1011 {
1012 return _lv_is_active(lv, NULL, NULL);
1013 }
1014
1015 int lv_is_active_but_not_locally(struct logical_volume *lv)
1016 {
1017 int l;
1018 return _lv_is_active(lv, &l, NULL) && !l;
1019 }
1020
1021 int lv_is_active_exclusive(struct logical_volume *lv)
1022 {
1023 int e;
1024
1025 return _lv_is_active(lv, NULL, &e) && e;
1026 }
1027
1028 int lv_is_active_exclusive_locally(struct logical_volume *lv)
1029 {
1030 int l, e;
1031
1032 return _lv_is_active(lv, &l, &e) && l && e;
1033 }
1034
1035 int lv_is_active_exclusive_remotely(struct logical_volume *lv)
1036 {
1037 int l, e;
1038
1039 return _lv_is_active(lv, &l, &e) && !l && e;
1040 }
1041
1042 #ifdef DMEVENTD
1043 static struct dm_event_handler *_create_dm_event_handler(struct cmd_context *cmd, const char *dmuuid, const char *dso,
1044 const int timeout, enum dm_event_mask mask)
1045 {
1046 struct dm_event_handler *dmevh;
1047
1048 if (!(dmevh = dm_event_handler_create()))
1049 return_NULL;
1050
1051 if (dm_event_handler_set_dmeventd_path(dmevh, find_config_tree_str(cmd, "dmeventd/executable", NULL)))
1052 goto_bad;
1053
1054 if (dm_event_handler_set_dso(dmevh, dso))
1055 goto_bad;
1056
1057 if (dm_event_handler_set_uuid(dmevh, dmuuid))
1058 goto_bad;
1059
1060 dm_event_handler_set_timeout(dmevh, timeout);
1061 dm_event_handler_set_event_mask(dmevh, mask);
1062
1063 return dmevh;
1064
1065 bad:
1066 dm_event_handler_destroy(dmevh);
1067 return NULL;
1068 }
1069
1070 char *get_monitor_dso_path(struct cmd_context *cmd, const char *libpath)
1071 {
1072 char *path;
1073
1074 if (!(path = dm_pool_alloc(cmd->mem, PATH_MAX))) {
1075 log_error("Failed to allocate dmeventd library path.");
1076 return NULL;
1077 }
1078
1079 get_shared_library_path(cmd, libpath, path, PATH_MAX);
1080
1081 return path;
1082 }
1083
1084 static char *_build_target_uuid(struct cmd_context *cmd, struct logical_volume *lv)
1085 {
1086 const char *layer;
1087
1088 if (lv_is_thin_pool(lv))
1089 layer = "tpool"; /* Monitor "tpool" for the "thin pool". */
1090 else if (lv_is_origin(lv))
1091 layer = "real"; /* Monitor "real" for "snapshot-origin". */
1092 else
1093 layer = NULL;
1094
1095 return build_dm_uuid(cmd->mem, lv->lvid.s, layer);
1096 }
1097
1098 int target_registered_with_dmeventd(struct cmd_context *cmd, const char *dso,
1099 struct logical_volume *lv, int *pending)
1100 {
1101 char *uuid;
1102 enum dm_event_mask evmask = 0;
1103 struct dm_event_handler *dmevh;
1104 *pending = 0;
1105
1106 if (!dso)
1107 return_0;
1108
1109 if (!(uuid = _build_target_uuid(cmd, lv)))
1110 return_0;
1111
1112 if (!(dmevh = _create_dm_event_handler(cmd, uuid, dso, 0, DM_EVENT_ALL_ERRORS)))
1113 return_0;
1114
1115 if (dm_event_get_registered_device(dmevh, 0)) {
1116 dm_event_handler_destroy(dmevh);
1117 return 0;
1118 }
1119
1120 evmask = dm_event_handler_get_event_mask(dmevh);
1121 if (evmask & DM_EVENT_REGISTRATION_PENDING) {
1122 *pending = 1;
1123 evmask &= ~DM_EVENT_REGISTRATION_PENDING;
1124 }
1125
1126 dm_event_handler_destroy(dmevh);
1127
1128 return evmask;
1129 }
1130
1131 int target_register_events(struct cmd_context *cmd, const char *dso, struct logical_volume *lv,
1132 int evmask __attribute__((unused)), int set, int timeout)
1133 {
1134 char *uuid;
1135 struct dm_event_handler *dmevh;
1136 int r;
1137
1138 if (!dso)
1139 return_0;
1140
1141 /* We always monitor the "real" device, never the "snapshot-origin" itself. */
1142 if (!(uuid = _build_target_uuid(cmd, lv)))
1143 return_0;
1144
1145 if (!(dmevh = _create_dm_event_handler(cmd, uuid, dso, timeout,
1146 DM_EVENT_ALL_ERRORS | (timeout ? DM_EVENT_TIMEOUT : 0))))
1147 return_0;
1148
1149 r = set ? dm_event_register_handler(dmevh) : dm_event_unregister_handler(dmevh);
1150
1151 dm_event_handler_destroy(dmevh);
1152
1153 if (!r)
1154 return_0;
1155
1156 log_info("%s %s for events", set ? "Monitored" : "Unmonitored", uuid);
1157
1158 return 1;
1159 }
1160
1161 #endif
1162
1163 /*
1164 * Returns 0 if an attempt to (un)monitor the device failed.
1165 * Returns 1 otherwise.
1166 */
1167 int monitor_dev_for_events(struct cmd_context *cmd, struct logical_volume *lv,
1168 const struct lv_activate_opts *laopts, int monitor)
1169 {
1170 #ifdef DMEVENTD
1171 int i, pending = 0, monitored;
1172 int r = 1;
1173 struct dm_list *tmp, *snh, *snht;
1174 struct lv_segment *seg;
1175 struct lv_segment *log_seg;
1176 int (*monitor_fn) (struct lv_segment *s, int e);
1177 uint32_t s;
1178 static const struct lv_activate_opts zlaopts = { 0 };
1179
1180 if (!laopts)
1181 laopts = &zlaopts;
1182
1183 /* skip dmeventd code altogether */
1184 if (dmeventd_monitor_mode() == DMEVENTD_MONITOR_IGNORE)
1185 return 1;
1186
1187 /*
1188 * Nothing to do if dmeventd configured not to be used.
1189 */
1190 if (monitor && !dmeventd_monitor_mode())
1191 return 1;
1192
1193 /*
1194 * In case of a snapshot device, we monitor lv->snapshot->lv,
1195 * not the actual LV itself.
1196 */
1197 if (lv_is_cow(lv) && (laopts->no_merging || !lv_is_merging_cow(lv)))
1198 return monitor_dev_for_events(cmd, lv->snapshot->lv, NULL, monitor);
1199
1200 /*
1201 * In case this LV is a snapshot origin, we instead monitor
1202 * each of its respective snapshots. The origin itself may
1203 * also need to be monitored if it is a mirror, for example.
1204 */
1205 if (!laopts->origin_only && lv_is_origin(lv))
1206 dm_list_iterate_safe(snh, snht, &lv->snapshot_segs)
1207 if (!monitor_dev_for_events(cmd, dm_list_struct_base(snh,
1208 struct lv_segment, origin_list)->cow, NULL, monitor))
1209 r = 0;
1210
1211 /*
1212 * If the volume is mirrored and its log is also mirrored, monitor
1213 * the log volume as well.
1214 */
1215 if ((seg = first_seg(lv)) != NULL && seg->log_lv != NULL &&
1216 (log_seg = first_seg(seg->log_lv)) != NULL &&
1217 seg_is_mirrored(log_seg))
1218 if (!monitor_dev_for_events(cmd, seg->log_lv, NULL, monitor))
1219 r = 0;
1220
1221 dm_list_iterate(tmp, &lv->segments) {
1222 seg = dm_list_item(tmp, struct lv_segment);
1223
1224 /* Recurse for AREA_LV */
1225 for (s = 0; s < seg->area_count; s++) {
1226 if (seg_type(seg, s) != AREA_LV)
1227 continue;
1228 if (!monitor_dev_for_events(cmd, seg_lv(seg, s), NULL,
1229 monitor)) {
1230 log_error("Failed to %smonitor %s",
1231 monitor ? "" : "un",
1232 seg_lv(seg, s)->name);
1233 r = 0;
1234 }
1235 }
1236
1237 if (!seg_monitored(seg) || (seg->status & PVMOVE))
1238 continue;
1239
1240 monitor_fn = NULL;
1241
1242 /* Check monitoring status */
1243 if (seg->segtype->ops->target_monitored)
1244 monitored = seg->segtype->ops->target_monitored(seg, &pending);
1245 else
1246 continue; /* segtype doesn't support registration */
1247
1248 /*
1249 * FIXME: We should really try again if pending
1250 */
1251 monitored = (pending) ? 0 : monitored;
1252
1253 if (monitor) {
1254 if (monitored)
1255 log_verbose("%s/%s already monitored.", lv->vg->name, lv->name);
1256 else if (seg->segtype->ops->target_monitor_events)
1257 monitor_fn = seg->segtype->ops->target_monitor_events;
1258 } else {
1259 if (!monitored)
1260 log_verbose("%s/%s already not monitored.", lv->vg->name, lv->name);
1261 else if (seg->segtype->ops->target_unmonitor_events)
1262 monitor_fn = seg->segtype->ops->target_unmonitor_events;
1263 }
1264
1265 /* Do [un]monitor */
1266 if (!monitor_fn)
1267 continue;
1268
1269 log_verbose("%sonitoring %s/%s%s", monitor ? "M" : "Not m", lv->vg->name, lv->name,
1270 test_mode() ? " [Test mode: skipping this]" : "");
1271
1272 /* FIXME Test mode should really continue a bit further. */
1273 if (test_mode())
1274 continue;
1275
1276 /* FIXME specify events */
1277 if (!monitor_fn(seg, 0)) {
1278 log_error("%s/%s: %s segment monitoring function failed.",
1279 lv->vg->name, lv->name, seg->segtype->name);
1280 return 0;
1281 }
1282
1283 /* Check [un]monitor results */
1284 /* Try a couple times if pending, but not forever... */
1285 for (i = 0; i < 10; i++) {
1286 pending = 0;
1287 monitored = seg->segtype->ops->target_monitored(seg, &pending);
1288 if (pending ||
1289 (!monitored && monitor) ||
1290 (monitored && !monitor))
1291 log_very_verbose("%s/%s %smonitoring still pending: waiting...",
1292 lv->vg->name, lv->name, monitor ? "" : "un");
1293 else
1294 break;
1295 sleep(1);
1296 }
1297
1298 if (r)
1299 r = (monitored && monitor) || (!monitored && !monitor);
1300 }
1301
1302 return r;
1303 #else
1304 return 1;
1305 #endif
1306 }
1307
1308 struct detached_lv_data {
1309 struct logical_volume *lv_pre;
1310 struct lv_activate_opts *laopts;
1311 int *flush_required;
1312 };
1313
1314 static int _preload_detached_lv(struct cmd_context *cmd, struct logical_volume *lv, void *data)
1315 {
1316 struct detached_lv_data *detached = data;
1317 struct lv_list *lvl_pre;
1318
1319 if ((lvl_pre = find_lv_in_vg(detached->lv_pre->vg, lv->name))) {
1320 if (lv_is_visible(lvl_pre->lv) && lv_is_active(lv) && (!lv_is_cow(lv) || !lv_is_cow(lvl_pre->lv)) &&
1321 !_lv_preload(lvl_pre->lv, detached->laopts, detached->flush_required))
1322 return_0;
1323 }
1324
1325 return 1;
1326 }
1327
1328 static int _lv_suspend(struct cmd_context *cmd, const char *lvid_s,
1329 struct lv_activate_opts *laopts, int error_if_not_suspended)
1330 {
1331 struct logical_volume *lv = NULL, *lv_pre = NULL, *pvmove_lv = NULL;
1332 struct lv_list *lvl_pre;
1333 struct seg_list *sl;
1334 struct lv_segment *snap_seg;
1335 struct lvinfo info;
1336 int r = 0, lockfs = 0, flush_required = 0;
1337 struct detached_lv_data detached;
1338
1339 if (!activation())
1340 return 1;
1341
1342 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1343 goto_out;
1344
1345 /* Use precommitted metadata if present */
1346 if (!(lv_pre = lv_from_lvid(cmd, lvid_s, 1)))
1347 goto_out;
1348
1349 /* Ignore origin_only unless LV is origin in both old and new metadata */
1350 if (!lv_is_thin_volume(lv) && !(lv_is_origin(lv) && lv_is_origin(lv_pre)))
1351 laopts->origin_only = 0;
1352
1353 if (test_mode()) {
1354 _skip("Suspending %s%s.", lv->name, laopts->origin_only ? " origin without snapshots" : "");
1355 r = 1;
1356 goto out;
1357 }
1358
1359 if (!lv_info(cmd, lv, laopts->origin_only, &info, 0, 0))
1360 goto_out;
1361
1362 if (!info.exists || info.suspended) {
1363 if (!error_if_not_suspended) {
1364 r = 1;
1365 if (info.suspended)
1366 critical_section_inc(cmd, "already suspended");
1367 }
1368 goto out;
1369 }
1370
1371 if (!lv_read_replicator_vgs(lv))
1372 goto_out;
1373
1374 lv_calculate_readahead(lv, NULL);
1375
1376 /*
1377 * Preload devices for the LV.
1378 * If the PVMOVE LV is being removed, it's only present in the old
1379 * metadata and not the new, so we must explicitly add the new
1380 * tables for all the changed LVs here, as the relationships
1381 * are not found by walking the new metadata.
1382 */
1383 if (!(lv_pre->status & LOCKED) &&
1384 (lv->status & LOCKED) &&
1385 (pvmove_lv = find_pvmove_lv_in_lv(lv))) {
1386 /* Preload all the LVs above the PVMOVE LV */
1387 dm_list_iterate_items(sl, &pvmove_lv->segs_using_this_lv) {
1388 if (!(lvl_pre = find_lv_in_vg(lv_pre->vg, sl->seg->lv->name))) {
1389 log_error(INTERNAL_ERROR "LV %s missing from preload metadata", sl->seg->lv->name);
1390 goto out;
1391 }
1392 if (!_lv_preload(lvl_pre->lv, laopts, &flush_required))
1393 goto_out;
1394 }
1395 /* Now preload the PVMOVE LV itself */
1396 if (!(lvl_pre = find_lv_in_vg(lv_pre->vg, pvmove_lv->name))) {
1397 log_error(INTERNAL_ERROR "LV %s missing from preload metadata", pvmove_lv->name);
1398 goto out;
1399 }
1400 if (!_lv_preload(lvl_pre->lv, laopts, &flush_required))
1401 goto_out;
1402 } else {
1403 if (!_lv_preload(lv_pre, laopts, &flush_required))
1404 /* FIXME Revert preloading */
1405 goto_out;
1406
1407 /*
1408 * Search for existing LVs that have become detached and preload them.
1409 */
1410 detached.lv_pre = lv_pre;
1411 detached.laopts = laopts;
1412 detached.flush_required = &flush_required;
1413
1414 if (!for_each_sub_lv(cmd, lv, &_preload_detached_lv, &detached))
1415 goto_out;
1416
1417 /*
1418 * Preload any snapshots that are being removed.
1419 */
1420 if (!laopts->origin_only && lv_is_origin(lv)) {
1421 dm_list_iterate_items_gen(snap_seg, &lv->snapshot_segs, origin_list) {
1422 if (!(lvl_pre = find_lv_in_vg_by_lvid(lv_pre->vg, &snap_seg->cow->lvid))) {
1423 log_error(INTERNAL_ERROR "LV %s (%s) missing from preload metadata",
1424 snap_seg->cow->name, snap_seg->cow->lvid.id[1].uuid);
1425 goto out;
1426 }
1427 if (!lv_is_cow(lvl_pre->lv) &&
1428 !_lv_preload(lvl_pre->lv, laopts, &flush_required))
1429 goto_out;
1430 }
1431 }
1432 }
1433
1434 if (!monitor_dev_for_events(cmd, lv, laopts, 0))
1435 /* FIXME Consider aborting here */
1436 stack;
1437
1438 critical_section_inc(cmd, "suspending");
1439 if (pvmove_lv)
1440 critical_section_inc(cmd, "suspending pvmove LV");
1441
1442 if (!laopts->origin_only &&
1443 (lv_is_origin(lv_pre) || lv_is_cow(lv_pre)))
1444 lockfs = 1;
1445
1446 /*
1447 * Suspending an LV directly above a PVMOVE LV also
1448 * suspends other LVs using that same PVMOVE LV.
1449 * FIXME Remove this and delay the 'clear node' until
1450 * after the code knows whether there's a different
1451 * inactive table to load or not instead so lv_suspend
1452 * can be called separately for each LV safely.
1453 */
1454 if ((lv_pre->vg->status & PRECOMMITTED) &&
1455 (lv_pre->status & LOCKED) && find_pvmove_lv_in_lv(lv_pre)) {
1456 if (!_lv_suspend_lv(lv_pre, laopts, lockfs, flush_required)) {
1457 critical_section_dec(cmd, "failed precommitted suspend");
1458 if (pvmove_lv)
1459 critical_section_dec(cmd, "failed precommitted suspend (pvmove)");
1460 goto_out;
1461 }
1462 } else {
1463 /* Normal suspend */
1464 if (!_lv_suspend_lv(lv, laopts, lockfs, flush_required)) {
1465 critical_section_dec(cmd, "failed suspend");
1466 if (pvmove_lv)
1467 critical_section_dec(cmd, "failed suspend (pvmove)");
1468 goto_out;
1469 }
1470 }
1471
1472 r = 1;
1473 out:
1474 if (lv_pre)
1475 release_vg(lv_pre->vg);
1476 if (lv) {
1477 lv_release_replicator_vgs(lv);
1478 release_vg(lv->vg);
1479 }
1480
1481 return r;
1482 }
1483
1484 /*
1485 * In a cluster, set exclusive to indicate that only one node is using the
1486 * device. Any preloaded tables may then use non-clustered targets.
1487 *
1488 * Returns success if the device is not active
1489 */
1490 int lv_suspend_if_active(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only, unsigned exclusive)
1491 {
1492 struct lv_activate_opts laopts = {
1493 .origin_only = origin_only,
1494 .exclusive = exclusive
1495 };
1496
1497 return _lv_suspend(cmd, lvid_s, &laopts, 0);
1498 }
1499
1500 /* No longer used */
1501 /***********
1502 int lv_suspend(struct cmd_context *cmd, const char *lvid_s)
1503 {
1504 return _lv_suspend(cmd, lvid_s, 1);
1505 }
1506 ***********/
1507
1508 static int _lv_resume(struct cmd_context *cmd, const char *lvid_s,
1509 struct lv_activate_opts *laopts, int error_if_not_active)
1510 {
1511 struct logical_volume *lv;
1512 struct lvinfo info;
1513 int r = 0;
1514 int messages_only = 0;
1515
1516 if (!activation())
1517 return 1;
1518
1519 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1520 goto_out;
1521
1522 if (lv_is_thin_pool(lv) && laopts->origin_only)
1523 messages_only = 1;
1524
1525 if (!lv_is_origin(lv))
1526 laopts->origin_only = 0;
1527
1528 if (test_mode()) {
1529 _skip("Resuming %s%s%s.", lv->name, laopts->origin_only ? " without snapshots" : "",
1530 laopts->revert ? " (reverting)" : "");
1531 r = 1;
1532 goto out;
1533 }
1534
1535 log_debug("Resuming LV %s/%s%s%s%s.", lv->vg->name, lv->name,
1536 error_if_not_active ? "" : " if active",
1537 laopts->origin_only ? " without snapshots" : "",
1538 laopts->revert ? " (reverting)" : "");
1539
1540 if (!lv_info(cmd, lv, laopts->origin_only, &info, 0, 0))
1541 goto_out;
1542
1543 if (!info.exists || !(info.suspended || messages_only)) {
1544 if (error_if_not_active)
1545 goto_out;
1546 r = 1;
1547 if (!info.suspended)
1548 critical_section_dec(cmd, "already resumed");
1549 goto out;
1550 }
1551
1552 laopts->read_only = _passes_readonly_filter(cmd, lv);
1553
1554 if (!_lv_activate_lv(lv, laopts))
1555 goto_out;
1556
1557 critical_section_dec(cmd, "resumed");
1558
1559 if (!monitor_dev_for_events(cmd, lv, laopts, 1))
1560 stack;
1561
1562 r = 1;
1563 out:
1564 if (lv)
1565 release_vg(lv->vg);
1566
1567 return r;
1568 }
1569
1570 /*
1571 * In a cluster, set exclusive to indicate that only one node is using the
1572 * device. Any tables loaded may then use non-clustered targets.
1573 *
1574 * @origin_only
1575 * @exclusive This parameter only has an affect in cluster-context.
1576 * It forces local target type to be used (instead of
1577 * cluster-aware type).
1578 * Returns success if the device is not active
1579 */
1580 int lv_resume_if_active(struct cmd_context *cmd, const char *lvid_s,
1581 unsigned origin_only, unsigned exclusive,
1582 unsigned revert)
1583 {
1584 struct lv_activate_opts laopts = {
1585 .origin_only = origin_only,
1586 .exclusive = exclusive,
1587 .revert = revert
1588 };
1589
1590 return _lv_resume(cmd, lvid_s, &laopts, 0);
1591 }
1592
1593 int lv_resume(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only)
1594 {
1595 struct lv_activate_opts laopts = { .origin_only = origin_only, };
1596
1597 return _lv_resume(cmd, lvid_s, &laopts, 1);
1598 }
1599
1600 static int _lv_has_open_snapshots(struct logical_volume *lv)
1601 {
1602 struct lv_segment *snap_seg;
1603 struct lvinfo info;
1604 int r = 0;
1605
1606 dm_list_iterate_items_gen(snap_seg, &lv->snapshot_segs, origin_list) {
1607 if (!lv_info(lv->vg->cmd, snap_seg->cow, 0, &info, 1, 0)) {
1608 r = 1;
1609 continue;
1610 }
1611
1612 if (info.exists && info.open_count) {
1613 log_error("LV %s/%s has open snapshot %s: "
1614 "not deactivating", lv->vg->name, lv->name,
1615 snap_seg->cow->name);
1616 r = 1;
1617 }
1618 }
1619
1620 return r;
1621 }
1622
1623 int lv_deactivate(struct cmd_context *cmd, const char *lvid_s)
1624 {
1625 struct logical_volume *lv;
1626 struct lvinfo info;
1627 int r = 0;
1628
1629 if (!activation())
1630 return 1;
1631
1632 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1633 goto out;
1634
1635 if (test_mode()) {
1636 _skip("Deactivating '%s'.", lv->name);
1637 r = 1;
1638 goto out;
1639 }
1640
1641 log_debug("Deactivating %s/%s.", lv->vg->name, lv->name);
1642
1643 if (!lv_info(cmd, lv, 0, &info, 1, 0))
1644 goto_out;
1645
1646 if (!info.exists) {
1647 r = 1;
1648 goto out;
1649 }
1650
1651 if (lv_is_visible(lv)) {
1652 if (!lv_check_not_in_use(cmd, lv, &info))
1653 goto_out;
1654
1655 if (lv_is_origin(lv) && _lv_has_open_snapshots(lv))
1656 goto_out;
1657 }
1658
1659 if (!lv_read_replicator_vgs(lv))
1660 goto_out;
1661
1662 lv_calculate_readahead(lv, NULL);
1663
1664 if (!monitor_dev_for_events(cmd, lv, NULL, 0))
1665 stack;
1666
1667 critical_section_inc(cmd, "deactivating");
1668 r = _lv_deactivate(lv);
1669 critical_section_dec(cmd, "deactivated");
1670
1671 if (!lv_info(cmd, lv, 0, &info, 0, 0) || info.exists)
1672 r = 0;
1673 out:
1674 if (lv) {
1675 lv_release_replicator_vgs(lv);
1676 release_vg(lv->vg);
1677 }
1678
1679 return r;
1680 }
1681
1682 /* Test if LV passes filter */
1683 int lv_activation_filter(struct cmd_context *cmd, const char *lvid_s,
1684 int *activate_lv)
1685 {
1686 struct logical_volume *lv;
1687 int r = 0;
1688
1689 if (!activation()) {
1690 *activate_lv = 1;
1691 return 1;
1692 }
1693
1694 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1695 goto out;
1696
1697 if (!_passes_activation_filter(cmd, lv)) {
1698 log_verbose("Not activating %s/%s since it does not pass "
1699 "activation filter.", lv->vg->name, lv->name);
1700 *activate_lv = 0;
1701 } else
1702 *activate_lv = 1;
1703 r = 1;
1704 out:
1705 if (lv)
1706 release_vg(lv->vg);
1707
1708 return r;
1709 }
1710
1711 static int _lv_activate(struct cmd_context *cmd, const char *lvid_s,
1712 struct lv_activate_opts *laopts, int filter)
1713 {
1714 struct logical_volume *lv;
1715 struct lvinfo info;
1716 int r = 0;
1717
1718 if (!activation())
1719 return 1;
1720
1721 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1722 goto out;
1723
1724 if (filter && !_passes_activation_filter(cmd, lv)) {
1725 log_error("Not activating %s/%s since it does not pass "
1726 "activation filter.", lv->vg->name, lv->name);
1727 goto out;
1728 }
1729
1730 if ((!lv->vg->cmd->partial_activation) && (lv->status & PARTIAL_LV)) {
1731 log_error("Refusing activation of partial LV %s. Use --partial to override.",
1732 lv->name);
1733 goto_out;
1734 }
1735
1736 if (lv_has_unknown_segments(lv)) {
1737 log_error("Refusing activation of LV %s containing "
1738 "an unrecognised segment.", lv->name);
1739 goto_out;
1740 }
1741
1742 if (test_mode()) {
1743 _skip("Activating '%s'.", lv->name);
1744 r = 1;
1745 goto out;
1746 }
1747
1748 if (filter)
1749 laopts->read_only = _passes_readonly_filter(cmd, lv);
1750
1751 log_debug("Activating %s/%s%s%s.", lv->vg->name, lv->name,
1752 laopts->exclusive ? " exclusively" : "",
1753 laopts->read_only ? " read-only" : "");
1754
1755 if (!lv_info(cmd, lv, 0, &info, 0, 0))
1756 goto_out;
1757
1758 /*
1759 * Nothing to do?
1760 */
1761 if (info.exists && !info.suspended && info.live_table &&
1762 (info.read_only == read_only_lv(lv, laopts))) {
1763 r = 1;
1764 goto out;
1765 }
1766
1767 if (!lv_read_replicator_vgs(lv))
1768 goto_out;
1769
1770 lv_calculate_readahead(lv, NULL);
1771
1772 critical_section_inc(cmd, "activating");
1773 if (!(r = _lv_activate_lv(lv, laopts)))
1774 stack;
1775 critical_section_dec(cmd, "activated");
1776
1777 if (r && !monitor_dev_for_events(cmd, lv, laopts, 1))
1778 stack;
1779
1780 out:
1781 if (lv) {
1782 lv_release_replicator_vgs(lv);
1783 release_vg(lv->vg);
1784 }
1785
1786 return r;
1787 }
1788
1789 /* Activate LV */
1790 int lv_activate(struct cmd_context *cmd, const char *lvid_s, int exclusive)
1791 {
1792 struct lv_activate_opts laopts = { .exclusive = exclusive };
1793
1794 if (!_lv_activate(cmd, lvid_s, &laopts, 0))
1795 return_0;
1796
1797 return 1;
1798 }
1799
1800 /* Activate LV only if it passes filter */
1801 int lv_activate_with_filter(struct cmd_context *cmd, const char *lvid_s, int exclusive)
1802 {
1803 struct lv_activate_opts laopts = { .exclusive = exclusive };
1804
1805 if (!_lv_activate(cmd, lvid_s, &laopts, 1))
1806 return_0;
1807
1808 return 1;
1809 }
1810
1811 int lv_mknodes(struct cmd_context *cmd, const struct logical_volume *lv)
1812 {
1813 int r = 1;
1814
1815 if (!lv) {
1816 r = dm_mknodes(NULL);
1817 fs_unlock();
1818 return r;
1819 }
1820
1821 if (!activation())
1822 return 1;
1823
1824 r = dev_manager_mknodes(lv);
1825
1826 fs_unlock();
1827
1828 return r;
1829 }
1830
1831 /*
1832 * Does PV use VG somewhere in its construction?
1833 * Returns 1 on failure.
1834 */
1835 int pv_uses_vg(struct physical_volume *pv,
1836 struct volume_group *vg)
1837 {
1838 if (!activation())
1839 return 0;
1840
1841 if (!dm_is_dm_major(MAJOR(pv->dev->dev)))
1842 return 0;
1843
1844 return dev_manager_device_uses_vg(pv->dev, vg);
1845 }
1846
1847 void activation_release(void)
1848 {
1849 dev_manager_release();
1850 }
1851
1852 void activation_exit(void)
1853 {
1854 dev_manager_exit();
1855 }
1856 #endif
This page took 0.116707 seconds and 6 git commands to generate.