]> sourceware.org Git - lvm2.git/blob - lib/activate/activate.c
Fix locking query compatibility with old external locking libraries.
[lvm2.git] / lib / activate / activate.c
1 /*
2 * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
3 * Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved.
4 *
5 * This file is part of LVM2.
6 *
7 * This copyrighted material is made available to anyone wishing to use,
8 * modify, copy, or redistribute it subject to the terms and conditions
9 * of the GNU Lesser General Public License v.2.1.
10 *
11 * You should have received a copy of the GNU Lesser General Public License
12 * along with this program; if not, write to the Free Software Foundation,
13 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 */
15
16 #include "lib.h"
17 #include "metadata.h"
18 #include "activate.h"
19 #include "memlock.h"
20 #include "display.h"
21 #include "fs.h"
22 #include "lvm-exec.h"
23 #include "lvm-file.h"
24 #include "lvm-string.h"
25 #include "toolcontext.h"
26 #include "dev_manager.h"
27 #include "str_list.h"
28 #include "config.h"
29 #include "filter.h"
30 #include "segtype.h"
31
32 #include <limits.h>
33 #include <fcntl.h>
34 #include <unistd.h>
35
36 #define _skip(fmt, args...) log_very_verbose("Skipping: " fmt , ## args)
37
38 int lvm1_present(struct cmd_context *cmd)
39 {
40 char path[PATH_MAX];
41
42 if (dm_snprintf(path, sizeof(path), "%s/lvm/global", cmd->proc_dir)
43 < 0) {
44 log_error("LVM1 proc global snprintf failed");
45 return 0;
46 }
47
48 if (path_exists(path))
49 return 1;
50 else
51 return 0;
52 }
53
54 int list_segment_modules(struct dm_pool *mem, const struct lv_segment *seg,
55 struct dm_list *modules)
56 {
57 unsigned int s;
58 struct lv_segment *seg2, *snap_seg;
59 struct dm_list *snh;
60
61 if (seg->segtype->ops->modules_needed &&
62 !seg->segtype->ops->modules_needed(mem, seg, modules)) {
63 log_error("module string allocation failed");
64 return 0;
65 }
66
67 if (lv_is_origin(seg->lv))
68 dm_list_iterate(snh, &seg->lv->snapshot_segs)
69 if (!list_lv_modules(mem,
70 dm_list_struct_base(snh,
71 struct lv_segment,
72 origin_list)->cow,
73 modules))
74 return_0;
75
76 if (lv_is_cow(seg->lv)) {
77 snap_seg = find_cow(seg->lv);
78 if (snap_seg->segtype->ops->modules_needed &&
79 !snap_seg->segtype->ops->modules_needed(mem, snap_seg,
80 modules)) {
81 log_error("snap_seg module string allocation failed");
82 return 0;
83 }
84 }
85
86 for (s = 0; s < seg->area_count; s++) {
87 switch (seg_type(seg, s)) {
88 case AREA_LV:
89 seg2 = find_seg_by_le(seg_lv(seg, s), seg_le(seg, s));
90 if (seg2 && !list_segment_modules(mem, seg2, modules))
91 return_0;
92 break;
93 case AREA_PV:
94 case AREA_UNASSIGNED:
95 ;
96 }
97 }
98
99 return 1;
100 }
101
102 int list_lv_modules(struct dm_pool *mem, const struct logical_volume *lv,
103 struct dm_list *modules)
104 {
105 struct lv_segment *seg;
106
107 dm_list_iterate_items(seg, &lv->segments)
108 if (!list_segment_modules(mem, seg, modules))
109 return_0;
110
111 return 1;
112 }
113
114 #ifndef DEVMAPPER_SUPPORT
115 void set_activation(int act)
116 {
117 static int warned = 0;
118
119 if (warned || !act)
120 return;
121
122 log_error("Compiled without libdevmapper support. "
123 "Can't enable activation.");
124
125 warned = 1;
126 }
127 int activation(void)
128 {
129 return 0;
130 }
131 int library_version(char *version, size_t size)
132 {
133 return 0;
134 }
135 int driver_version(char *version, size_t size)
136 {
137 return 0;
138 }
139 int target_version(const char *target_name, uint32_t *maj,
140 uint32_t *min, uint32_t *patchlevel)
141 {
142 return 0;
143 }
144 int target_present(struct cmd_context *cmd, const char *target_name,
145 int use_modprobe)
146 {
147 return 0;
148 }
149 int lv_info(struct cmd_context *cmd, const struct logical_volume *lv, struct lvinfo *info,
150 int with_open_count, int with_read_ahead)
151 {
152 return 0;
153 }
154 int lv_info_by_lvid(struct cmd_context *cmd, const char *lvid_s,
155 struct lvinfo *info, int with_open_count, int with_read_ahead)
156 {
157 return 0;
158 }
159 int lv_snapshot_percent(const struct logical_volume *lv, float *percent)
160 {
161 return 0;
162 }
163 int lv_mirror_percent(struct cmd_context *cmd, struct logical_volume *lv,
164 int wait, float *percent, uint32_t *event_nr)
165 {
166 return 0;
167 }
168 int lvs_in_vg_activated(struct volume_group *vg)
169 {
170 return 0;
171 }
172 int lvs_in_vg_activated_by_uuid_only(struct volume_group *vg)
173 {
174 return 0;
175 }
176 int lvs_in_vg_opened(struct volume_group *vg)
177 {
178 return 0;
179 }
180 int lv_suspend(struct cmd_context *cmd, const char *lvid_s)
181 {
182 return 1;
183 }
184 int lv_suspend_if_active(struct cmd_context *cmd, const char *lvid_s)
185 {
186 return 1;
187 }
188 int lv_resume(struct cmd_context *cmd, const char *lvid_s)
189 {
190 return 1;
191 }
192 int lv_resume_if_active(struct cmd_context *cmd, const char *lvid_s)
193 {
194 return 1;
195 }
196 int lv_deactivate(struct cmd_context *cmd, const char *lvid_s)
197 {
198 return 1;
199 }
200 int lv_activation_filter(struct cmd_context *cmd, const char *lvid_s,
201 int *activate_lv)
202 {
203 return 1;
204 }
205 int lv_activate(struct cmd_context *cmd, const char *lvid_s, int exclusive)
206 {
207 return 1;
208 }
209 int lv_activate_with_filter(struct cmd_context *cmd, const char *lvid_s, int exclusive)
210 {
211 return 1;
212 }
213
214 int lv_mknodes(struct cmd_context *cmd, const struct logical_volume *lv)
215 {
216 return 1;
217 }
218
219 int pv_uses_vg(struct physical_volume *pv,
220 struct volume_group *vg)
221 {
222 return 0;
223 }
224
225 void activation_release(void)
226 {
227 return;
228 }
229
230 void activation_exit(void)
231 {
232 return;
233 }
234
235 #else /* DEVMAPPER_SUPPORT */
236
237 static int _activation = 1;
238
239 void set_activation(int act)
240 {
241 if (act == _activation)
242 return;
243
244 _activation = act;
245 if (_activation)
246 log_verbose("Activation enabled. Device-mapper kernel "
247 "driver will be used.");
248 else
249 log_warn("WARNING: Activation disabled. No device-mapper "
250 "interaction will be attempted.");
251 }
252
253 int activation(void)
254 {
255 return _activation;
256 }
257
258 static int _passes_activation_filter(struct cmd_context *cmd,
259 struct logical_volume *lv)
260 {
261 const struct config_node *cn;
262 struct config_value *cv;
263 char *str;
264 char path[PATH_MAX];
265
266 if (!(cn = find_config_tree_node(cmd, "activation/volume_list"))) {
267 /* If no host tags defined, activate */
268 if (dm_list_empty(&cmd->tags))
269 return 1;
270
271 /* If any host tag matches any LV or VG tag, activate */
272 if (str_list_match_list(&cmd->tags, &lv->tags) ||
273 str_list_match_list(&cmd->tags, &lv->vg->tags))
274 return 1;
275
276 /* Don't activate */
277 return 0;
278 }
279
280 for (cv = cn->v; cv; cv = cv->next) {
281 if (cv->type != CFG_STRING) {
282 log_error("Ignoring invalid string in config file "
283 "activation/volume_list");
284 continue;
285 }
286 str = cv->v.str;
287 if (!*str) {
288 log_error("Ignoring empty string in config file "
289 "activation/volume_list");
290 continue;
291 }
292
293 /* Tag? */
294 if (*str == '@') {
295 str++;
296 if (!*str) {
297 log_error("Ignoring empty tag in config file "
298 "activation/volume_list");
299 continue;
300 }
301 /* If any host tag matches any LV or VG tag, activate */
302 if (!strcmp(str, "*")) {
303 if (str_list_match_list(&cmd->tags, &lv->tags)
304 || str_list_match_list(&cmd->tags,
305 &lv->vg->tags))
306 return 1;
307 else
308 continue;
309 }
310 /* If supplied tag matches LV or VG tag, activate */
311 if (str_list_match_item(&lv->tags, str) ||
312 str_list_match_item(&lv->vg->tags, str))
313 return 1;
314 else
315 continue;
316 }
317 if (!strchr(str, '/')) {
318 /* vgname supplied */
319 if (!strcmp(str, lv->vg->name))
320 return 1;
321 else
322 continue;
323 }
324 /* vgname/lvname */
325 if (dm_snprintf(path, sizeof(path), "%s/%s", lv->vg->name,
326 lv->name) < 0) {
327 log_error("dm_snprintf error from %s/%s", lv->vg->name,
328 lv->name);
329 continue;
330 }
331 if (!strcmp(path, str))
332 return 1;
333 }
334
335 return 0;
336 }
337
338 int library_version(char *version, size_t size)
339 {
340 if (!activation())
341 return 0;
342
343 return dm_get_library_version(version, size);
344 }
345
346 int driver_version(char *version, size_t size)
347 {
348 if (!activation())
349 return 0;
350
351 log_very_verbose("Getting driver version");
352
353 return dm_driver_version(version, size);
354 }
355
356 int target_version(const char *target_name, uint32_t *maj,
357 uint32_t *min, uint32_t *patchlevel)
358 {
359 int r = 0;
360 struct dm_task *dmt;
361 struct dm_versions *target, *last_target;
362
363 log_very_verbose("Getting target version for %s", target_name);
364 if (!(dmt = dm_task_create(DM_DEVICE_LIST_VERSIONS)))
365 return_0;
366
367 if (!dm_task_run(dmt)) {
368 log_debug("Failed to get %s target version", target_name);
369 /* Assume this was because LIST_VERSIONS isn't supported */
370 return 1;
371 }
372
373 target = dm_task_get_versions(dmt);
374
375 do {
376 last_target = target;
377
378 if (!strcmp(target_name, target->name)) {
379 r = 1;
380 *maj = target->version[0];
381 *min = target->version[1];
382 *patchlevel = target->version[2];
383 goto out;
384 }
385
386 target = (void *) target + target->next;
387 } while (last_target != target);
388
389 out:
390 dm_task_destroy(dmt);
391
392 return r;
393 }
394
395 int module_present(struct cmd_context *cmd, const char *target_name)
396 {
397 int ret = 0;
398 #ifdef MODPROBE_CMD
399 char module[128];
400 const char *argv[3];
401
402 if (dm_snprintf(module, sizeof(module), "dm-%s", target_name) < 0) {
403 log_error("module_present module name too long: %s",
404 target_name);
405 return 0;
406 }
407
408 argv[0] = MODPROBE_CMD;
409 argv[1] = module;
410 argv[2] = NULL;
411
412 ret = exec_cmd(cmd, argv);
413 #endif
414 return ret;
415 }
416
417 int target_present(struct cmd_context *cmd, const char *target_name,
418 int use_modprobe)
419 {
420 uint32_t maj, min, patchlevel;
421
422 if (!activation())
423 return 0;
424
425 #ifdef MODPROBE_CMD
426 if (use_modprobe) {
427 if (target_version(target_name, &maj, &min, &patchlevel))
428 return 1;
429
430 if (!module_present(cmd, target_name))
431 return_0;
432 }
433 #endif
434
435 return target_version(target_name, &maj, &min, &patchlevel);
436 }
437
438 /*
439 * Returns 1 if info structure populated, else 0 on failure.
440 */
441 static int _lv_info(struct cmd_context *cmd, const struct logical_volume *lv, int with_mknodes,
442 struct lvinfo *info, int with_open_count, int with_read_ahead, unsigned by_uuid_only)
443 {
444 struct dm_info dminfo;
445 char *name = NULL;
446
447 if (!activation())
448 return 0;
449
450 if (!by_uuid_only &&
451 !(name = build_dm_name(cmd->mem, lv->vg->name, lv->name, NULL)))
452 return_0;
453
454 log_debug("Getting device info for %s", name);
455 if (!dev_manager_info(lv->vg->cmd->mem, name, lv, with_mknodes,
456 with_open_count, with_read_ahead, &dminfo,
457 &info->read_ahead)) {
458 if (name)
459 dm_pool_free(cmd->mem, name);
460 return_0;
461 }
462
463 info->exists = dminfo.exists;
464 info->suspended = dminfo.suspended;
465 info->open_count = dminfo.open_count;
466 info->major = dminfo.major;
467 info->minor = dminfo.minor;
468 info->read_only = dminfo.read_only;
469 info->live_table = dminfo.live_table;
470 info->inactive_table = dminfo.inactive_table;
471
472 /*
473 * Cache read ahead value for PV devices now (before possible suspend)
474 */
475 (void)lv_calculate_readhead(lv);
476
477 if (name)
478 dm_pool_free(cmd->mem, name);
479
480 return 1;
481 }
482
483 int lv_info(struct cmd_context *cmd, const struct logical_volume *lv, struct lvinfo *info,
484 int with_open_count, int with_read_ahead)
485 {
486 return _lv_info(cmd, lv, 0, info, with_open_count, with_read_ahead, 0);
487 }
488
489 int lv_info_by_lvid(struct cmd_context *cmd, const char *lvid_s,
490 struct lvinfo *info, int with_open_count, int with_read_ahead)
491 {
492 struct logical_volume *lv;
493
494 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
495 return 0;
496
497 return _lv_info(cmd, lv, 0, info, with_open_count, with_read_ahead, 0);
498 }
499
500 /*
501 * Returns 1 if percent set, else 0 on failure.
502 */
503 int lv_snapshot_percent(const struct logical_volume *lv, float *percent)
504 {
505 int r;
506 struct dev_manager *dm;
507
508 if (!activation())
509 return 0;
510
511 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name)))
512 return_0;
513
514 if (!(r = dev_manager_snapshot_percent(dm, lv, percent)))
515 stack;
516
517 dev_manager_destroy(dm);
518
519 return r;
520 }
521
522 /* FIXME Merge with snapshot_percent */
523 int lv_mirror_percent(struct cmd_context *cmd, struct logical_volume *lv,
524 int wait, float *percent, uint32_t *event_nr)
525 {
526 int r;
527 struct dev_manager *dm;
528 struct lvinfo info;
529
530 /* If mirrored LV is temporarily shrinked to 1 area (= linear),
531 * it should be considered in-sync. */
532 if (dm_list_size(&lv->segments) == 1 && first_seg(lv)->area_count == 1) {
533 *percent = 100.0;
534 return 1;
535 }
536
537 if (!activation())
538 return 0;
539
540 if (!lv_info(cmd, lv, &info, 0, 0))
541 return_0;
542
543 if (!info.exists)
544 return 0;
545
546 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name)))
547 return_0;
548
549 if (!(r = dev_manager_mirror_percent(dm, lv, wait, percent, event_nr)))
550 stack;
551
552 dev_manager_destroy(dm);
553
554 return r;
555 }
556
557 static int _lv_active(struct cmd_context *cmd, struct logical_volume *lv,
558 unsigned by_uuid_only)
559 {
560 struct lvinfo info;
561
562 if (!_lv_info(cmd, lv, 0, &info, 0, 0, by_uuid_only)) {
563 stack;
564 return -1;
565 }
566
567 return info.exists;
568 }
569
570 static int _lv_open_count(struct cmd_context *cmd, struct logical_volume *lv)
571 {
572 struct lvinfo info;
573
574 if (!lv_info(cmd, lv, &info, 1, 0)) {
575 stack;
576 return -1;
577 }
578
579 return info.open_count;
580 }
581
582 static int _lv_activate_lv(struct logical_volume *lv)
583 {
584 int r;
585 struct dev_manager *dm;
586
587 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name)))
588 return_0;
589
590 if (!(r = dev_manager_activate(dm, lv)))
591 stack;
592
593 dev_manager_destroy(dm);
594 return r;
595 }
596
597 static int _lv_preload(struct logical_volume *lv, int *flush_required)
598 {
599 int r;
600 struct dev_manager *dm;
601
602 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name)))
603 return_0;
604
605 if (!(r = dev_manager_preload(dm, lv, flush_required)))
606 stack;
607
608 dev_manager_destroy(dm);
609 return r;
610 }
611
612 static int _lv_deactivate(struct logical_volume *lv)
613 {
614 int r;
615 struct dev_manager *dm;
616
617 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name)))
618 return_0;
619
620 if (!(r = dev_manager_deactivate(dm, lv)))
621 stack;
622
623 dev_manager_destroy(dm);
624 return r;
625 }
626
627 static int _lv_suspend_lv(struct logical_volume *lv, int lockfs, int flush_required)
628 {
629 int r;
630 struct dev_manager *dm;
631
632 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name)))
633 return_0;
634
635 if (!(r = dev_manager_suspend(dm, lv, lockfs, flush_required)))
636 stack;
637
638 dev_manager_destroy(dm);
639 return r;
640 }
641
642 /*
643 * These two functions return the number of visible LVs in the state,
644 * or -1 on error.
645 */
646 static int _lvs_in_vg_activated(struct volume_group *vg, unsigned by_uuid_only)
647 {
648 struct lv_list *lvl;
649 int count = 0;
650
651 if (!activation())
652 return 0;
653
654 dm_list_iterate_items(lvl, &vg->lvs) {
655 if (lv_is_visible(lvl->lv))
656 count += (_lv_active(vg->cmd, lvl->lv, by_uuid_only) == 1);
657 }
658
659 return count;
660 }
661
662 int lvs_in_vg_activated_by_uuid_only(struct volume_group *vg)
663 {
664 return _lvs_in_vg_activated(vg, 1);
665 }
666
667 int lvs_in_vg_activated(struct volume_group *vg)
668 {
669 return _lvs_in_vg_activated(vg, 0);
670 }
671
672 int lvs_in_vg_opened(const struct volume_group *vg)
673 {
674 const struct lv_list *lvl;
675 int count = 0;
676
677 if (!activation())
678 return 0;
679
680 dm_list_iterate_items(lvl, &vg->lvs) {
681 if (lv_is_visible(lvl->lv))
682 count += (_lv_open_count(vg->cmd, lvl->lv) > 0);
683 }
684
685 return count;
686 }
687
688 /*
689 * Determine whether an LV is active locally or in a cluster.
690 * Assumes vg lock held.
691 * Returns:
692 * 0 - not active locally or on any node in cluster
693 * 1 - active either locally or some node in the cluster
694 */
695 int lv_is_active(struct logical_volume *lv)
696 {
697 int ret;
698
699 if (_lv_active(lv->vg->cmd, lv, 0))
700 return 1;
701
702 if (!vg_is_clustered(lv->vg))
703 return 0;
704
705 if ((ret = remote_lock_held(lv->lvid.s)) >= 0)
706 return ret;
707
708 /*
709 * Old compatibility code if locking doesn't support lock query
710 * FIXME: check status to not deactivate already activate device
711 */
712 if (activate_lv_excl(lv->vg->cmd, lv)) {
713 deactivate_lv(lv->vg->cmd, lv);
714 return 0;
715 }
716
717 /*
718 * Exclusive local activation failed so assume it is active elsewhere.
719 */
720 return 1;
721 }
722
723 /*
724 * Returns 0 if an attempt to (un)monitor the device failed.
725 * Returns 1 otherwise.
726 */
727 int monitor_dev_for_events(struct cmd_context *cmd,
728 struct logical_volume *lv, int monitor)
729 {
730 #ifdef DMEVENTD
731 int i, pending = 0, monitored;
732 int r = 1;
733 struct dm_list *tmp, *snh, *snht;
734 struct lv_segment *seg;
735 int (*monitor_fn) (struct lv_segment *s, int e);
736 uint32_t s;
737
738 /* skip dmeventd code altogether */
739 if (dmeventd_monitor_mode() == DMEVENTD_MONITOR_IGNORE)
740 return 1;
741
742 /*
743 * Nothing to do if dmeventd configured not to be used.
744 */
745 if (monitor && !dmeventd_monitor_mode())
746 return 1;
747
748 /*
749 * In case of a snapshot device, we monitor lv->snapshot->lv,
750 * not the actual LV itself.
751 */
752 if (lv_is_cow(lv))
753 return monitor_dev_for_events(cmd, lv->snapshot->lv, monitor);
754
755 /*
756 * In case this LV is a snapshot origin, we instead monitor
757 * each of its respective snapshots (the origin itself does
758 * not need to be monitored).
759 *
760 * TODO: This may change when snapshots of mirrors are allowed.
761 */
762 if (lv_is_origin(lv)) {
763 dm_list_iterate_safe(snh, snht, &lv->snapshot_segs)
764 if (!monitor_dev_for_events(cmd, dm_list_struct_base(snh,
765 struct lv_segment, origin_list)->cow, monitor))
766 r = 0;
767 return r;
768 }
769
770 dm_list_iterate(tmp, &lv->segments) {
771 seg = dm_list_item(tmp, struct lv_segment);
772
773 /* Recurse for AREA_LV */
774 for (s = 0; s < seg->area_count; s++) {
775 if (seg_type(seg, s) != AREA_LV)
776 continue;
777 if (!monitor_dev_for_events(cmd, seg_lv(seg, s),
778 monitor)) {
779 log_error("Failed to %smonitor %s",
780 monitor ? "" : "un",
781 seg_lv(seg, s)->name);
782 r = 0;
783 }
784 }
785
786 if (!seg_monitored(seg) || (seg->status & PVMOVE))
787 continue;
788
789 monitor_fn = NULL;
790
791 /* Check monitoring status */
792 if (seg->segtype->ops->target_monitored)
793 monitored = seg->segtype->ops->target_monitored(seg, &pending);
794 else
795 continue; /* segtype doesn't support registration */
796
797 /*
798 * FIXME: We should really try again if pending
799 */
800 monitored = (pending) ? 0 : monitored;
801
802 if (monitor) {
803 if (monitored)
804 log_verbose("%s/%s already monitored.", lv->vg->name, lv->name);
805 else if (seg->segtype->ops->target_monitor_events)
806 monitor_fn = seg->segtype->ops->target_monitor_events;
807 } else {
808 if (!monitored)
809 log_verbose("%s/%s already not monitored.", lv->vg->name, lv->name);
810 else if (seg->segtype->ops->target_unmonitor_events)
811 monitor_fn = seg->segtype->ops->target_unmonitor_events;
812 }
813
814 /* Do [un]monitor */
815 if (!monitor_fn)
816 continue;
817
818 log_verbose("%sonitoring %s/%s", monitor ? "M" : "Not m", lv->vg->name, lv->name);
819
820 /* FIXME specify events */
821 if (!monitor_fn(seg, 0)) {
822 log_error("%s/%s: %s segment monitoring function failed.",
823 lv->vg->name, lv->name, seg->segtype->name);
824 return 0;
825 }
826
827 /* Check [un]monitor results */
828 /* Try a couple times if pending, but not forever... */
829 for (i = 0; i < 10; i++) {
830 pending = 0;
831 monitored = seg->segtype->ops->target_monitored(seg, &pending);
832 if (pending ||
833 (!monitored && monitor) ||
834 (monitored && !monitor))
835 log_very_verbose("%s/%s %smonitoring still pending: waiting...",
836 lv->vg->name, lv->name, monitor ? "" : "un");
837 else
838 break;
839 sleep(1);
840 }
841
842 r = (monitored && monitor) || (!monitored && !monitor);
843 }
844
845 return r;
846 #else
847 return 1;
848 #endif
849 }
850
851 static int _lv_suspend(struct cmd_context *cmd, const char *lvid_s,
852 int error_if_not_suspended)
853 {
854 struct logical_volume *lv = NULL, *lv_pre = NULL;
855 struct lvinfo info;
856 int r = 0, lockfs = 0, flush_required = 0;
857
858 if (!activation())
859 return 1;
860
861 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
862 goto_out;
863
864 /* Use precommitted metadata if present */
865 if (!(lv_pre = lv_from_lvid(cmd, lvid_s, 1)))
866 goto_out;
867
868 if (test_mode()) {
869 _skip("Suspending '%s'.", lv->name);
870 r = 1;
871 goto out;
872 }
873
874 if (!lv_info(cmd, lv, &info, 0, 0))
875 goto_out;
876
877 if (!info.exists || info.suspended) {
878 r = error_if_not_suspended ? 0 : 1;
879 goto out;
880 }
881
882 /* If VG was precommitted, preload devices for the LV */
883 if ((lv_pre->vg->status & PRECOMMITTED)) {
884 if (!_lv_preload(lv_pre, &flush_required)) {
885 /* FIXME Revert preloading */
886 goto_out;
887 }
888 }
889
890 if (!monitor_dev_for_events(cmd, lv, 0))
891 /* FIXME Consider aborting here */
892 stack;
893
894 memlock_inc();
895
896 if (lv_is_origin(lv_pre) || lv_is_cow(lv_pre))
897 lockfs = 1;
898
899 if (!_lv_suspend_lv(lv, lockfs, flush_required)) {
900 memlock_dec();
901 fs_unlock();
902 goto out;
903 }
904
905 r = 1;
906 out:
907 if (lv_pre)
908 vg_release(lv_pre->vg);
909 if (lv)
910 vg_release(lv->vg);
911
912 return r;
913 }
914
915 /* Returns success if the device is not active */
916 int lv_suspend_if_active(struct cmd_context *cmd, const char *lvid_s)
917 {
918 return _lv_suspend(cmd, lvid_s, 0);
919 }
920
921 int lv_suspend(struct cmd_context *cmd, const char *lvid_s)
922 {
923 return _lv_suspend(cmd, lvid_s, 1);
924 }
925
926 static int _lv_resume(struct cmd_context *cmd, const char *lvid_s,
927 int error_if_not_active)
928 {
929 struct logical_volume *lv;
930 struct lvinfo info;
931 int r = 0;
932
933 if (!activation())
934 return 1;
935
936 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
937 goto out;
938
939 if (test_mode()) {
940 _skip("Resuming '%s'.", lv->name);
941 r = 1;
942 goto out;
943 }
944
945 if (!lv_info(cmd, lv, &info, 0, 0))
946 goto_out;
947
948 if (!info.exists || !info.suspended) {
949 r = error_if_not_active ? 0 : 1;
950 goto out;
951 }
952
953 if (!_lv_activate_lv(lv))
954 goto out;
955
956 memlock_dec();
957 fs_unlock();
958
959 if (!monitor_dev_for_events(cmd, lv, 1))
960 stack;
961
962 r = 1;
963 out:
964 if (lv)
965 vg_release(lv->vg);
966
967 return r;
968 }
969
970 /* Returns success if the device is not active */
971 int lv_resume_if_active(struct cmd_context *cmd, const char *lvid_s)
972 {
973 return _lv_resume(cmd, lvid_s, 0);
974 }
975
976 int lv_resume(struct cmd_context *cmd, const char *lvid_s)
977 {
978 return _lv_resume(cmd, lvid_s, 1);
979 }
980
981 int lv_deactivate(struct cmd_context *cmd, const char *lvid_s)
982 {
983 struct logical_volume *lv;
984 struct lvinfo info;
985 int r = 0;
986
987 if (!activation())
988 return 1;
989
990 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
991 goto out;
992
993 if (test_mode()) {
994 _skip("Deactivating '%s'.", lv->name);
995 r = 1;
996 goto out;
997 }
998
999 if (!lv_info(cmd, lv, &info, 1, 0))
1000 goto_out;
1001
1002 if (!info.exists) {
1003 r = 1;
1004 goto out;
1005 }
1006
1007 if (info.open_count && lv_is_visible(lv)) {
1008 log_error("LV %s/%s in use: not deactivating", lv->vg->name,
1009 lv->name);
1010 goto out;
1011 }
1012
1013 if (!monitor_dev_for_events(cmd, lv, 0))
1014 stack;
1015
1016 memlock_inc();
1017 r = _lv_deactivate(lv);
1018 memlock_dec();
1019 fs_unlock();
1020
1021 out:
1022 if (lv)
1023 vg_release(lv->vg);
1024
1025 return r;
1026 }
1027
1028 /* Test if LV passes filter */
1029 int lv_activation_filter(struct cmd_context *cmd, const char *lvid_s,
1030 int *activate_lv)
1031 {
1032 struct logical_volume *lv;
1033 int r = 0;
1034
1035 if (!activation()) {
1036 *activate_lv = 1;
1037 return 1;
1038 }
1039
1040 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1041 goto out;
1042
1043 if (!_passes_activation_filter(cmd, lv)) {
1044 log_verbose("Not activating %s/%s due to config file settings",
1045 lv->vg->name, lv->name);
1046 *activate_lv = 0;
1047 } else
1048 *activate_lv = 1;
1049 r = 1;
1050 out:
1051 if (lv)
1052 vg_release(lv->vg);
1053
1054 return r;
1055 }
1056
1057 static int _lv_activate(struct cmd_context *cmd, const char *lvid_s,
1058 int exclusive, int filter)
1059 {
1060 struct logical_volume *lv;
1061 struct lvinfo info;
1062 int r = 0;
1063
1064 if (!activation())
1065 return 1;
1066
1067 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1068 goto out;
1069
1070 if (filter && !_passes_activation_filter(cmd, lv)) {
1071 log_verbose("Not activating %s/%s due to config file settings",
1072 lv->vg->name, lv->name);
1073 goto out;
1074 }
1075
1076 if ((!lv->vg->cmd->partial_activation) && (lv->status & PARTIAL_LV)) {
1077 log_error("Refusing activation of partial LV %s. Use --partial to override.",
1078 lv->name);
1079 goto_out;
1080 }
1081
1082 if (test_mode()) {
1083 _skip("Activating '%s'.", lv->name);
1084 r = 1;
1085 goto out;
1086 }
1087
1088 if (!lv_info(cmd, lv, &info, 0, 0))
1089 goto_out;
1090
1091 if (info.exists && !info.suspended && info.live_table) {
1092 r = 1;
1093 goto out;
1094 }
1095
1096 if (exclusive)
1097 lv->status |= ACTIVATE_EXCL;
1098
1099 memlock_inc();
1100 r = _lv_activate_lv(lv);
1101 memlock_dec();
1102 fs_unlock();
1103
1104 if (r && !monitor_dev_for_events(cmd, lv, 1))
1105 stack;
1106
1107 out:
1108 if (lv)
1109 vg_release(lv->vg);
1110
1111 return r;
1112 }
1113
1114 /* Activate LV */
1115 int lv_activate(struct cmd_context *cmd, const char *lvid_s, int exclusive)
1116 {
1117 return _lv_activate(cmd, lvid_s, exclusive, 0);
1118 }
1119
1120 /* Activate LV only if it passes filter */
1121 int lv_activate_with_filter(struct cmd_context *cmd, const char *lvid_s, int exclusive)
1122 {
1123 return _lv_activate(cmd, lvid_s, exclusive, 1);
1124 }
1125
1126 int lv_mknodes(struct cmd_context *cmd, const struct logical_volume *lv)
1127 {
1128 struct lvinfo info;
1129 int r = 1;
1130
1131 if (!lv) {
1132 r = dm_mknodes(NULL);
1133 fs_unlock();
1134 return r;
1135 }
1136
1137 if (!_lv_info(cmd, lv, 1, &info, 0, 0, 0))
1138 return_0;
1139
1140 if (info.exists) {
1141 if (lv_is_visible(lv))
1142 r = dev_manager_lv_mknodes(lv);
1143 } else
1144 r = dev_manager_lv_rmnodes(lv);
1145
1146 fs_unlock();
1147
1148 return r;
1149 }
1150
1151 /*
1152 * Does PV use VG somewhere in its construction?
1153 * Returns 1 on failure.
1154 */
1155 int pv_uses_vg(struct physical_volume *pv,
1156 struct volume_group *vg)
1157 {
1158 if (!activation())
1159 return 0;
1160
1161 if (!dm_is_dm_major(MAJOR(pv->dev->dev)))
1162 return 0;
1163
1164 return dev_manager_device_uses_vg(pv->dev, vg);
1165 }
1166
1167 void activation_release(void)
1168 {
1169 dev_manager_release();
1170 }
1171
1172 void activation_exit(void)
1173 {
1174 dev_manager_exit();
1175 }
1176 #endif
This page took 0.090276 seconds and 6 git commands to generate.