]> sourceware.org Git - lvm2.git/blob - lib/activate/activate.c
Monitor origin -real device below snapshot instead of overlay device. (brassow)
[lvm2.git] / lib / activate / activate.c
1 /*
2 * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
3 * Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved.
4 *
5 * This file is part of LVM2.
6 *
7 * This copyrighted material is made available to anyone wishing to use,
8 * modify, copy, or redistribute it subject to the terms and conditions
9 * of the GNU Lesser General Public License v.2.1.
10 *
11 * You should have received a copy of the GNU Lesser General Public License
12 * along with this program; if not, write to the Free Software Foundation,
13 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 */
15
16 #include "lib.h"
17 #include "metadata.h"
18 #include "activate.h"
19 #include "memlock.h"
20 #include "display.h"
21 #include "fs.h"
22 #include "lvm-exec.h"
23 #include "lvm-file.h"
24 #include "lvm-string.h"
25 #include "toolcontext.h"
26 #include "dev_manager.h"
27 #include "str_list.h"
28 #include "config.h"
29 #include "filter.h"
30 #include "segtype.h"
31 #include "sharedlib.h"
32
33 #include <limits.h>
34 #include <fcntl.h>
35 #include <unistd.h>
36
37 #define _skip(fmt, args...) log_very_verbose("Skipping: " fmt , ## args)
38
39 int lvm1_present(struct cmd_context *cmd)
40 {
41 char path[PATH_MAX];
42
43 if (dm_snprintf(path, sizeof(path), "%s/lvm/global", cmd->proc_dir)
44 < 0) {
45 log_error("LVM1 proc global snprintf failed");
46 return 0;
47 }
48
49 if (path_exists(path))
50 return 1;
51 else
52 return 0;
53 }
54
55 int list_segment_modules(struct dm_pool *mem, const struct lv_segment *seg,
56 struct dm_list *modules)
57 {
58 unsigned int s;
59 struct lv_segment *seg2, *snap_seg;
60 struct dm_list *snh;
61
62 if (seg->segtype->ops->modules_needed &&
63 !seg->segtype->ops->modules_needed(mem, seg, modules)) {
64 log_error("module string allocation failed");
65 return 0;
66 }
67
68 if (lv_is_origin(seg->lv))
69 dm_list_iterate(snh, &seg->lv->snapshot_segs)
70 if (!list_lv_modules(mem,
71 dm_list_struct_base(snh,
72 struct lv_segment,
73 origin_list)->cow,
74 modules))
75 return_0;
76
77 if (lv_is_cow(seg->lv)) {
78 snap_seg = find_cow(seg->lv);
79 if (snap_seg->segtype->ops->modules_needed &&
80 !snap_seg->segtype->ops->modules_needed(mem, snap_seg,
81 modules)) {
82 log_error("snap_seg module string allocation failed");
83 return 0;
84 }
85 }
86
87 for (s = 0; s < seg->area_count; s++) {
88 switch (seg_type(seg, s)) {
89 case AREA_LV:
90 seg2 = find_seg_by_le(seg_lv(seg, s), seg_le(seg, s));
91 if (seg2 && !list_segment_modules(mem, seg2, modules))
92 return_0;
93 break;
94 case AREA_PV:
95 case AREA_UNASSIGNED:
96 ;
97 }
98 }
99
100 return 1;
101 }
102
103 int list_lv_modules(struct dm_pool *mem, const struct logical_volume *lv,
104 struct dm_list *modules)
105 {
106 struct lv_segment *seg;
107
108 dm_list_iterate_items(seg, &lv->segments)
109 if (!list_segment_modules(mem, seg, modules))
110 return_0;
111
112 return 1;
113 }
114
115 #ifndef DEVMAPPER_SUPPORT
116 void set_activation(int act)
117 {
118 static int warned = 0;
119
120 if (warned || !act)
121 return;
122
123 log_error("Compiled without libdevmapper support. "
124 "Can't enable activation.");
125
126 warned = 1;
127 }
128 int activation(void)
129 {
130 return 0;
131 }
132 int library_version(char *version, size_t size)
133 {
134 return 0;
135 }
136 int driver_version(char *version, size_t size)
137 {
138 return 0;
139 }
140 int target_version(const char *target_name, uint32_t *maj,
141 uint32_t *min, uint32_t *patchlevel)
142 {
143 return 0;
144 }
145 int target_present(struct cmd_context *cmd, const char *target_name,
146 int use_modprobe)
147 {
148 return 0;
149 }
150 int lv_info(struct cmd_context *cmd, const struct logical_volume *lv, struct lvinfo *info,
151 int with_open_count, int with_read_ahead)
152 {
153 return 0;
154 }
155 int lv_info_by_lvid(struct cmd_context *cmd, const char *lvid_s,
156 struct lvinfo *info, int with_open_count, int with_read_ahead)
157 {
158 return 0;
159 }
160 int lv_snapshot_percent(const struct logical_volume *lv, float *percent,
161 percent_range_t *percent_range)
162 {
163 return 0;
164 }
165 int lv_mirror_percent(struct cmd_context *cmd, struct logical_volume *lv,
166 int wait, float *percent, percent_range_t *percent_range,
167 uint32_t *event_nr)
168 {
169 return 0;
170 }
171 int lvs_in_vg_activated(struct volume_group *vg)
172 {
173 return 0;
174 }
175 int lvs_in_vg_opened(struct volume_group *vg)
176 {
177 return 0;
178 }
179 int lv_suspend(struct cmd_context *cmd, const char *lvid_s)
180 {
181 return 1;
182 }
183 int lv_suspend_if_active(struct cmd_context *cmd, const char *lvid_s)
184 {
185 return 1;
186 }
187 int lv_resume(struct cmd_context *cmd, const char *lvid_s)
188 {
189 return 1;
190 }
191 int lv_resume_if_active(struct cmd_context *cmd, const char *lvid_s)
192 {
193 return 1;
194 }
195 int lv_deactivate(struct cmd_context *cmd, const char *lvid_s)
196 {
197 return 1;
198 }
199 int lv_activation_filter(struct cmd_context *cmd, const char *lvid_s,
200 int *activate_lv)
201 {
202 return 1;
203 }
204 int lv_activate(struct cmd_context *cmd, const char *lvid_s, int exclusive)
205 {
206 return 1;
207 }
208 int lv_activate_with_filter(struct cmd_context *cmd, const char *lvid_s, int exclusive)
209 {
210 return 1;
211 }
212
213 int lv_mknodes(struct cmd_context *cmd, const struct logical_volume *lv)
214 {
215 return 1;
216 }
217
218 int pv_uses_vg(struct physical_volume *pv,
219 struct volume_group *vg)
220 {
221 return 0;
222 }
223
224 void activation_release(void)
225 {
226 return;
227 }
228
229 void activation_exit(void)
230 {
231 return;
232 }
233
234 #else /* DEVMAPPER_SUPPORT */
235
236 static int _activation = 1;
237
238 void set_activation(int act)
239 {
240 if (act == _activation)
241 return;
242
243 _activation = act;
244 if (_activation)
245 log_verbose("Activation enabled. Device-mapper kernel "
246 "driver will be used.");
247 else
248 log_warn("WARNING: Activation disabled. No device-mapper "
249 "interaction will be attempted.");
250 }
251
252 int activation(void)
253 {
254 return _activation;
255 }
256
257 static int _passes_activation_filter(struct cmd_context *cmd,
258 struct logical_volume *lv)
259 {
260 const struct config_node *cn;
261 struct config_value *cv;
262 char *str;
263 char path[PATH_MAX];
264
265 if (!(cn = find_config_tree_node(cmd, "activation/volume_list"))) {
266 log_verbose("activation/volume_list configuration setting "
267 "not defined, checking only host tags for %s/%s",
268 lv->vg->name, lv->name);
269
270 /* If no host tags defined, activate */
271 if (dm_list_empty(&cmd->tags))
272 return 1;
273
274 /* If any host tag matches any LV or VG tag, activate */
275 if (str_list_match_list(&cmd->tags, &lv->tags) ||
276 str_list_match_list(&cmd->tags, &lv->vg->tags))
277 return 1;
278
279 log_verbose("No host tag matches %s/%s",
280 lv->vg->name, lv->name);
281
282 /* Don't activate */
283 return 0;
284 }
285
286 for (cv = cn->v; cv; cv = cv->next) {
287 log_verbose("activation/volume_list configuration setting "
288 "defined, checking the list to match %s/%s",
289 lv->vg->name, lv->name);
290
291 if (cv->type != CFG_STRING) {
292 log_error("Ignoring invalid string in config file "
293 "activation/volume_list");
294 continue;
295 }
296 str = cv->v.str;
297 if (!*str) {
298 log_error("Ignoring empty string in config file "
299 "activation/volume_list");
300 continue;
301 }
302
303 /* Tag? */
304 if (*str == '@') {
305 str++;
306 if (!*str) {
307 log_error("Ignoring empty tag in config file "
308 "activation/volume_list");
309 continue;
310 }
311 /* If any host tag matches any LV or VG tag, activate */
312 if (!strcmp(str, "*")) {
313 if (str_list_match_list(&cmd->tags, &lv->tags)
314 || str_list_match_list(&cmd->tags,
315 &lv->vg->tags))
316 return 1;
317 else
318 continue;
319 }
320 /* If supplied tag matches LV or VG tag, activate */
321 if (str_list_match_item(&lv->tags, str) ||
322 str_list_match_item(&lv->vg->tags, str))
323 return 1;
324 else
325 continue;
326 }
327 if (!strchr(str, '/')) {
328 /* vgname supplied */
329 if (!strcmp(str, lv->vg->name))
330 return 1;
331 else
332 continue;
333 }
334 /* vgname/lvname */
335 if (dm_snprintf(path, sizeof(path), "%s/%s", lv->vg->name,
336 lv->name) < 0) {
337 log_error("dm_snprintf error from %s/%s", lv->vg->name,
338 lv->name);
339 continue;
340 }
341 if (!strcmp(path, str))
342 return 1;
343 }
344
345 log_verbose("No item supplied in activation/volume_list configuration "
346 "setting matches %s/%s", lv->vg->name, lv->name);
347
348 return 0;
349 }
350
351 int library_version(char *version, size_t size)
352 {
353 if (!activation())
354 return 0;
355
356 return dm_get_library_version(version, size);
357 }
358
359 int driver_version(char *version, size_t size)
360 {
361 if (!activation())
362 return 0;
363
364 log_very_verbose("Getting driver version");
365
366 return dm_driver_version(version, size);
367 }
368
369 int target_version(const char *target_name, uint32_t *maj,
370 uint32_t *min, uint32_t *patchlevel)
371 {
372 int r = 0;
373 struct dm_task *dmt;
374 struct dm_versions *target, *last_target;
375
376 log_very_verbose("Getting target version for %s", target_name);
377 if (!(dmt = dm_task_create(DM_DEVICE_LIST_VERSIONS)))
378 return_0;
379
380 if (!dm_task_run(dmt)) {
381 log_debug("Failed to get %s target version", target_name);
382 /* Assume this was because LIST_VERSIONS isn't supported */
383 return 1;
384 }
385
386 target = dm_task_get_versions(dmt);
387
388 do {
389 last_target = target;
390
391 if (!strcmp(target_name, target->name)) {
392 r = 1;
393 *maj = target->version[0];
394 *min = target->version[1];
395 *patchlevel = target->version[2];
396 goto out;
397 }
398
399 target = (void *) target + target->next;
400 } while (last_target != target);
401
402 out:
403 dm_task_destroy(dmt);
404
405 return r;
406 }
407
408 int module_present(struct cmd_context *cmd, const char *target_name)
409 {
410 int ret = 0;
411 #ifdef MODPROBE_CMD
412 char module[128];
413 const char *argv[3];
414
415 if (dm_snprintf(module, sizeof(module), "dm-%s", target_name) < 0) {
416 log_error("module_present module name too long: %s",
417 target_name);
418 return 0;
419 }
420
421 argv[0] = MODPROBE_CMD;
422 argv[1] = module;
423 argv[2] = NULL;
424
425 ret = exec_cmd(cmd, argv);
426 #endif
427 return ret;
428 }
429
430 int target_present(struct cmd_context *cmd, const char *target_name,
431 int use_modprobe)
432 {
433 uint32_t maj, min, patchlevel;
434
435 if (!activation())
436 return 0;
437
438 #ifdef MODPROBE_CMD
439 if (use_modprobe) {
440 if (target_version(target_name, &maj, &min, &patchlevel))
441 return 1;
442
443 if (!module_present(cmd, target_name))
444 return_0;
445 }
446 #endif
447
448 return target_version(target_name, &maj, &min, &patchlevel);
449 }
450
451 /*
452 * Returns 1 if info structure populated, else 0 on failure.
453 */
454 int lv_info(struct cmd_context *cmd, const struct logical_volume *lv,
455 struct lvinfo *info, int with_open_count, int with_read_ahead)
456 {
457 struct dm_info dminfo;
458
459 if (!activation())
460 return 0;
461
462 if (!dev_manager_info(lv->vg->cmd->mem, lv, with_open_count,
463 with_read_ahead, &dminfo, &info->read_ahead))
464 return_0;
465
466 info->exists = dminfo.exists;
467 info->suspended = dminfo.suspended;
468 info->open_count = dminfo.open_count;
469 info->major = dminfo.major;
470 info->minor = dminfo.minor;
471 info->read_only = dminfo.read_only;
472 info->live_table = dminfo.live_table;
473 info->inactive_table = dminfo.inactive_table;
474
475 return 1;
476 }
477
478 int lv_info_by_lvid(struct cmd_context *cmd, const char *lvid_s,
479 struct lvinfo *info, int with_open_count, int with_read_ahead)
480 {
481 int r;
482 struct logical_volume *lv;
483
484 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
485 return 0;
486
487 r = lv_info(cmd, lv, info, with_open_count, with_read_ahead);
488 vg_release(lv->vg);
489
490 return r;
491 }
492
493 /*
494 * Returns 1 if percent set, else 0 on failure.
495 */
496 int lv_check_transient(struct logical_volume *lv)
497 {
498 int r;
499 struct dev_manager *dm;
500
501 if (!activation())
502 return 0;
503
504 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name)))
505 return_0;
506
507 if (!(r = dev_manager_transient(dm, lv)))
508 stack;
509
510 dev_manager_destroy(dm);
511
512 return r;
513 }
514
515 /*
516 * Returns 1 if percent set, else 0 on failure.
517 */
518 int lv_snapshot_percent(const struct logical_volume *lv, float *percent,
519 percent_range_t *percent_range)
520 {
521 int r;
522 struct dev_manager *dm;
523
524 if (!activation())
525 return 0;
526
527 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name)))
528 return_0;
529
530 if (!(r = dev_manager_snapshot_percent(dm, lv, percent, percent_range)))
531 stack;
532
533 dev_manager_destroy(dm);
534
535 return r;
536 }
537
538 /* FIXME Merge with snapshot_percent */
539 int lv_mirror_percent(struct cmd_context *cmd, struct logical_volume *lv,
540 int wait, float *percent, percent_range_t *percent_range,
541 uint32_t *event_nr)
542 {
543 int r;
544 struct dev_manager *dm;
545 struct lvinfo info;
546
547 /* If mirrored LV is temporarily shrinked to 1 area (= linear),
548 * it should be considered in-sync. */
549 if (dm_list_size(&lv->segments) == 1 && first_seg(lv)->area_count == 1) {
550 *percent = 100.0;
551 return 1;
552 }
553
554 if (!activation())
555 return 0;
556
557 if (!lv_info(cmd, lv, &info, 0, 0))
558 return_0;
559
560 if (!info.exists)
561 return 0;
562
563 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name)))
564 return_0;
565
566 if (!(r = dev_manager_mirror_percent(dm, lv, wait, percent,
567 percent_range, event_nr)))
568 stack;
569
570 dev_manager_destroy(dm);
571
572 return r;
573 }
574
575 static int _lv_active(struct cmd_context *cmd, struct logical_volume *lv)
576 {
577 struct lvinfo info;
578
579 if (!lv_info(cmd, lv, &info, 0, 0)) {
580 stack;
581 return -1;
582 }
583
584 return info.exists;
585 }
586
587 static int _lv_open_count(struct cmd_context *cmd, struct logical_volume *lv)
588 {
589 struct lvinfo info;
590
591 if (!lv_info(cmd, lv, &info, 1, 0)) {
592 stack;
593 return -1;
594 }
595
596 return info.open_count;
597 }
598
599 static int _lv_activate_lv(struct logical_volume *lv)
600 {
601 int r;
602 struct dev_manager *dm;
603
604 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name)))
605 return_0;
606
607 if (!(r = dev_manager_activate(dm, lv)))
608 stack;
609
610 dev_manager_destroy(dm);
611 return r;
612 }
613
614 static int _lv_preload(struct logical_volume *lv, int *flush_required)
615 {
616 int r;
617 struct dev_manager *dm;
618
619 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name)))
620 return_0;
621
622 if (!(r = dev_manager_preload(dm, lv, flush_required)))
623 stack;
624
625 dev_manager_destroy(dm);
626 return r;
627 }
628
629 static int _lv_deactivate(struct logical_volume *lv)
630 {
631 int r;
632 struct dev_manager *dm;
633
634 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name)))
635 return_0;
636
637 if (!(r = dev_manager_deactivate(dm, lv)))
638 stack;
639
640 dev_manager_destroy(dm);
641 return r;
642 }
643
644 static int _lv_suspend_lv(struct logical_volume *lv, int lockfs, int flush_required)
645 {
646 int r;
647 struct dev_manager *dm;
648
649 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name)))
650 return_0;
651
652 if (!(r = dev_manager_suspend(dm, lv, lockfs, flush_required)))
653 stack;
654
655 dev_manager_destroy(dm);
656 return r;
657 }
658
659 /*
660 * These two functions return the number of visible LVs in the state,
661 * or -1 on error.
662 */
663 int lvs_in_vg_activated(struct volume_group *vg)
664 {
665 struct lv_list *lvl;
666 int count = 0;
667
668 if (!activation())
669 return 0;
670
671 dm_list_iterate_items(lvl, &vg->lvs) {
672 if (lv_is_visible(lvl->lv))
673 count += (_lv_active(vg->cmd, lvl->lv) == 1);
674 }
675
676 return count;
677 }
678
679 int lvs_in_vg_opened(const struct volume_group *vg)
680 {
681 const struct lv_list *lvl;
682 int count = 0;
683
684 if (!activation())
685 return 0;
686
687 dm_list_iterate_items(lvl, &vg->lvs) {
688 if (lv_is_visible(lvl->lv))
689 count += (_lv_open_count(vg->cmd, lvl->lv) > 0);
690 }
691
692 return count;
693 }
694
695 /*
696 * Determine whether an LV is active locally or in a cluster.
697 * Assumes vg lock held.
698 * Returns:
699 * 0 - not active locally or on any node in cluster
700 * 1 - active either locally or some node in the cluster
701 */
702 int lv_is_active(struct logical_volume *lv)
703 {
704 int ret;
705
706 if (_lv_active(lv->vg->cmd, lv))
707 return 1;
708
709 if (!vg_is_clustered(lv->vg))
710 return 0;
711
712 if ((ret = remote_lock_held(lv->lvid.s)) >= 0)
713 return ret;
714
715 /*
716 * Old compatibility code if locking doesn't support lock query
717 * FIXME: check status to not deactivate already activate device
718 */
719 if (activate_lv_excl(lv->vg->cmd, lv)) {
720 if (!deactivate_lv(lv->vg->cmd, lv))
721 stack;
722 return 0;
723 }
724
725 /*
726 * Exclusive local activation failed so assume it is active elsewhere.
727 */
728 return 1;
729 }
730
731 #ifdef DMEVENTD
732 static struct dm_event_handler *_create_dm_event_handler(struct cmd_context *cmd, const char *dmuuid, const char *dso,
733 const int timeout, enum dm_event_mask mask)
734 {
735 struct dm_event_handler *dmevh;
736
737 if (!(dmevh = dm_event_handler_create()))
738 return_NULL;
739
740 if (dm_event_handler_set_dmeventd_path(dmevh, find_config_tree_str(cmd, "dmeventd/executable", NULL)))
741 goto_bad;
742
743 if (dm_event_handler_set_dso(dmevh, dso))
744 goto_bad;
745
746 if (dm_event_handler_set_uuid(dmevh, dmuuid))
747 goto_bad;
748
749 dm_event_handler_set_timeout(dmevh, timeout);
750 dm_event_handler_set_event_mask(dmevh, mask);
751
752 return dmevh;
753
754 bad:
755 dm_event_handler_destroy(dmevh);
756 return NULL;
757 }
758
759 char *get_monitor_dso_path(struct cmd_context *cmd, const char *libpath)
760 {
761 char *path;
762
763 if (!(path = dm_pool_alloc(cmd->mem, PATH_MAX))) {
764 log_error("Failed to allocate dmeventd library path.");
765 return NULL;
766 }
767
768 get_shared_library_path(cmd, libpath, path, PATH_MAX);
769
770 return path;
771 }
772
773 int target_registered_with_dmeventd(struct cmd_context *cmd, const char *dso,
774 struct logical_volume *lv, int *pending)
775 {
776 char *uuid;
777 enum dm_event_mask evmask = 0;
778 struct dm_event_handler *dmevh;
779
780 *pending = 0;
781
782 if (!dso)
783 return_0;
784
785 /* We always monitor the "real" device, never the "snapshot-origin" itself. */
786 if (!(uuid = build_dm_uuid(cmd->mem, lv->lvid.s, lv_is_origin(lv) ? "real" : NULL)))
787 return_0;
788
789 if (!(dmevh = _create_dm_event_handler(cmd, uuid, dso, 0, DM_EVENT_ALL_ERRORS)))
790 return_0;
791
792 if (dm_event_get_registered_device(dmevh, 0)) {
793 dm_event_handler_destroy(dmevh);
794 return 0;
795 }
796
797 evmask = dm_event_handler_get_event_mask(dmevh);
798 if (evmask & DM_EVENT_REGISTRATION_PENDING) {
799 *pending = 1;
800 evmask &= ~DM_EVENT_REGISTRATION_PENDING;
801 }
802
803 dm_event_handler_destroy(dmevh);
804
805 return evmask;
806 }
807
808 int target_register_events(struct cmd_context *cmd, const char *dso, struct logical_volume *lv,
809 int evmask __attribute__((unused)), int set, int timeout)
810 {
811 char *uuid;
812 struct dm_event_handler *dmevh;
813 int r;
814
815 if (!dso)
816 return_0;
817
818 /* We always monitor the "real" device, never the "snapshot-origin" itself. */
819 if (!(uuid = build_dm_uuid(cmd->mem, lv->lvid.s, lv_is_origin(lv) ? "real" : NULL)))
820 return_0;
821
822 if (!(dmevh = _create_dm_event_handler(cmd, uuid, dso, timeout,
823 DM_EVENT_ALL_ERRORS | (timeout ? DM_EVENT_TIMEOUT : 0))))
824 return_0;
825
826 r = set ? dm_event_register_handler(dmevh) : dm_event_unregister_handler(dmevh);
827
828 dm_event_handler_destroy(dmevh);
829
830 if (!r)
831 return_0;
832
833 log_info("%s %s for events", set ? "Monitored" : "Unmonitored", uuid);
834
835 return 1;
836 }
837
838 #endif
839
840 /*
841 * Returns 0 if an attempt to (un)monitor the device failed.
842 * Returns 1 otherwise.
843 */
844 int monitor_dev_for_events(struct cmd_context *cmd,
845 struct logical_volume *lv, int monitor)
846 {
847 #ifdef DMEVENTD
848 int i, pending = 0, monitored;
849 int r = 1;
850 struct dm_list *tmp, *snh, *snht;
851 struct lv_segment *seg;
852 struct lv_segment *log_seg;
853 int (*monitor_fn) (struct lv_segment *s, int e);
854 uint32_t s;
855
856 /* skip dmeventd code altogether */
857 if (dmeventd_monitor_mode() == DMEVENTD_MONITOR_IGNORE)
858 return 1;
859
860 /*
861 * Nothing to do if dmeventd configured not to be used.
862 */
863 if (monitor && !dmeventd_monitor_mode())
864 return 1;
865
866 /*
867 * In case of a snapshot device, we monitor lv->snapshot->lv,
868 * not the actual LV itself.
869 */
870 if (lv_is_cow(lv) && !lv_is_merging_cow(lv))
871 return monitor_dev_for_events(cmd, lv->snapshot->lv, monitor);
872
873 /*
874 * In case this LV is a snapshot origin, we instead monitor
875 * each of its respective snapshots. The origin itself may
876 * also need to be monitored if it is a mirror, for example.
877 */
878 if (lv_is_origin(lv))
879 dm_list_iterate_safe(snh, snht, &lv->snapshot_segs)
880 if (!monitor_dev_for_events(cmd, dm_list_struct_base(snh,
881 struct lv_segment, origin_list)->cow, monitor))
882 r = 0;
883
884 /*
885 * If the volume is mirrored and its log is also mirrored, monitor
886 * the log volume as well.
887 */
888 if ((seg = first_seg(lv)) != NULL && seg->log_lv != NULL &&
889 (log_seg = first_seg(seg->log_lv)) != NULL &&
890 seg_is_mirrored(log_seg))
891 if (!monitor_dev_for_events(cmd, seg->log_lv, monitor))
892 r = 0;
893
894 dm_list_iterate(tmp, &lv->segments) {
895 seg = dm_list_item(tmp, struct lv_segment);
896
897 /* Recurse for AREA_LV */
898 for (s = 0; s < seg->area_count; s++) {
899 if (seg_type(seg, s) != AREA_LV)
900 continue;
901 if (!monitor_dev_for_events(cmd, seg_lv(seg, s),
902 monitor)) {
903 log_error("Failed to %smonitor %s",
904 monitor ? "" : "un",
905 seg_lv(seg, s)->name);
906 r = 0;
907 }
908 }
909
910 if (!seg_monitored(seg) || (seg->status & PVMOVE))
911 continue;
912
913 monitor_fn = NULL;
914
915 /* Check monitoring status */
916 if (seg->segtype->ops->target_monitored)
917 monitored = seg->segtype->ops->target_monitored(seg, &pending);
918 else
919 continue; /* segtype doesn't support registration */
920
921 /*
922 * FIXME: We should really try again if pending
923 */
924 monitored = (pending) ? 0 : monitored;
925
926 if (monitor) {
927 if (monitored)
928 log_verbose("%s/%s already monitored.", lv->vg->name, lv->name);
929 else if (seg->segtype->ops->target_monitor_events)
930 monitor_fn = seg->segtype->ops->target_monitor_events;
931 } else {
932 if (!monitored)
933 log_verbose("%s/%s already not monitored.", lv->vg->name, lv->name);
934 else if (seg->segtype->ops->target_unmonitor_events)
935 monitor_fn = seg->segtype->ops->target_unmonitor_events;
936 }
937
938 /* Do [un]monitor */
939 if (!monitor_fn)
940 continue;
941
942 log_verbose("%sonitoring %s/%s%s", monitor ? "M" : "Not m", lv->vg->name, lv->name,
943 test_mode() ? " [Test mode: skipping this]" : "");
944
945 /* FIXME Test mode should really continue a bit further. */
946 if (test_mode())
947 continue;
948
949 /* FIXME specify events */
950 if (!monitor_fn(seg, 0)) {
951 log_error("%s/%s: %s segment monitoring function failed.",
952 lv->vg->name, lv->name, seg->segtype->name);
953 return 0;
954 }
955
956 /* Check [un]monitor results */
957 /* Try a couple times if pending, but not forever... */
958 for (i = 0; i < 10; i++) {
959 pending = 0;
960 monitored = seg->segtype->ops->target_monitored(seg, &pending);
961 if (pending ||
962 (!monitored && monitor) ||
963 (monitored && !monitor))
964 log_very_verbose("%s/%s %smonitoring still pending: waiting...",
965 lv->vg->name, lv->name, monitor ? "" : "un");
966 else
967 break;
968 sleep(1);
969 }
970
971 if (r)
972 r = (monitored && monitor) || (!monitored && !monitor);
973 }
974
975 return r;
976 #else
977 return 1;
978 #endif
979 }
980
981 static int _lv_suspend(struct cmd_context *cmd, const char *lvid_s,
982 int error_if_not_suspended)
983 {
984 struct logical_volume *lv = NULL, *lv_pre = NULL;
985 struct lvinfo info;
986 int r = 0, lockfs = 0, flush_required = 0;
987
988 if (!activation())
989 return 1;
990
991 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
992 goto_out;
993
994 /* Use precommitted metadata if present */
995 if (!(lv_pre = lv_from_lvid(cmd, lvid_s, 1)))
996 goto_out;
997
998 if (test_mode()) {
999 _skip("Suspending '%s'.", lv->name);
1000 r = 1;
1001 goto out;
1002 }
1003
1004 if (!lv_info(cmd, lv, &info, 0, 0))
1005 goto_out;
1006
1007 if (!info.exists || info.suspended) {
1008 if (!error_if_not_suspended) {
1009 r = 1;
1010 if (info.suspended)
1011 memlock_inc(cmd);
1012 }
1013 goto out;
1014 }
1015
1016 if (!lv_read_replicator_vgs(lv))
1017 goto_out;
1018
1019 lv_calculate_readahead(lv, NULL);
1020
1021 /* If VG was precommitted, preload devices for the LV */
1022 if ((lv_pre->vg->status & PRECOMMITTED)) {
1023 if (!_lv_preload(lv_pre, &flush_required)) {
1024 /* FIXME Revert preloading */
1025 goto_out;
1026 }
1027 }
1028
1029 if (!monitor_dev_for_events(cmd, lv, 0))
1030 /* FIXME Consider aborting here */
1031 stack;
1032
1033 memlock_inc(cmd);
1034
1035 if (lv_is_origin(lv_pre) || lv_is_cow(lv_pre))
1036 lockfs = 1;
1037
1038 if (!_lv_suspend_lv(lv, lockfs, flush_required)) {
1039 memlock_dec(cmd);
1040 fs_unlock();
1041 goto out;
1042 }
1043
1044 r = 1;
1045 out:
1046 if (lv_pre)
1047 vg_release(lv_pre->vg);
1048 if (lv) {
1049 lv_release_replicator_vgs(lv);
1050 vg_release(lv->vg);
1051 }
1052
1053 return r;
1054 }
1055
1056 /* Returns success if the device is not active */
1057 int lv_suspend_if_active(struct cmd_context *cmd, const char *lvid_s)
1058 {
1059 return _lv_suspend(cmd, lvid_s, 0);
1060 }
1061
1062 int lv_suspend(struct cmd_context *cmd, const char *lvid_s)
1063 {
1064 return _lv_suspend(cmd, lvid_s, 1);
1065 }
1066
1067 static int _lv_resume(struct cmd_context *cmd, const char *lvid_s,
1068 int error_if_not_active)
1069 {
1070 struct logical_volume *lv;
1071 struct lvinfo info;
1072 int r = 0;
1073
1074 if (!activation())
1075 return 1;
1076
1077 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1078 goto_out;
1079
1080 if (test_mode()) {
1081 _skip("Resuming '%s'.", lv->name);
1082 r = 1;
1083 goto out;
1084 }
1085
1086 if (!lv_info(cmd, lv, &info, 0, 0))
1087 goto_out;
1088
1089 if (!info.exists || !info.suspended) {
1090 if (error_if_not_active)
1091 goto_out;
1092 r = 1;
1093 goto out;
1094 }
1095
1096 if (!_lv_activate_lv(lv))
1097 goto_out;
1098
1099 memlock_dec(cmd);
1100 fs_unlock();
1101
1102 if (!monitor_dev_for_events(cmd, lv, 1))
1103 stack;
1104
1105 r = 1;
1106 out:
1107 if (lv)
1108 vg_release(lv->vg);
1109
1110 return r;
1111 }
1112
1113 /* Returns success if the device is not active */
1114 int lv_resume_if_active(struct cmd_context *cmd, const char *lvid_s)
1115 {
1116 return _lv_resume(cmd, lvid_s, 0);
1117 }
1118
1119 int lv_resume(struct cmd_context *cmd, const char *lvid_s)
1120 {
1121 return _lv_resume(cmd, lvid_s, 1);
1122 }
1123
1124 static int _lv_has_open_snapshots(struct logical_volume *lv)
1125 {
1126 struct lv_segment *snap_seg;
1127 struct lvinfo info;
1128 int r = 0;
1129
1130 dm_list_iterate_items_gen(snap_seg, &lv->snapshot_segs, origin_list) {
1131 if (!lv_info(lv->vg->cmd, snap_seg->cow, &info, 1, 0)) {
1132 r = 1;
1133 continue;
1134 }
1135
1136 if (info.exists && info.open_count) {
1137 log_error("LV %s/%s has open snapshot %s: "
1138 "not deactivating", lv->vg->name, lv->name,
1139 snap_seg->cow->name);
1140 r = 1;
1141 }
1142 }
1143
1144 return r;
1145 }
1146
1147 int lv_deactivate(struct cmd_context *cmd, const char *lvid_s)
1148 {
1149 struct logical_volume *lv;
1150 struct lvinfo info;
1151 int r = 0;
1152
1153 if (!activation())
1154 return 1;
1155
1156 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1157 goto out;
1158
1159 if (test_mode()) {
1160 _skip("Deactivating '%s'.", lv->name);
1161 r = 1;
1162 goto out;
1163 }
1164
1165 if (!lv_info(cmd, lv, &info, 1, 0))
1166 goto_out;
1167
1168 if (!info.exists) {
1169 r = 1;
1170 goto out;
1171 }
1172
1173 if (lv_is_visible(lv)) {
1174 if (info.open_count) {
1175 log_error("LV %s/%s in use: not deactivating",
1176 lv->vg->name, lv->name);
1177 goto out;
1178 }
1179 if (lv_is_origin(lv) && _lv_has_open_snapshots(lv))
1180 goto_out;
1181 }
1182
1183 if (!lv_read_replicator_vgs(lv))
1184 goto_out;
1185
1186 lv_calculate_readahead(lv, NULL);
1187
1188 if (!monitor_dev_for_events(cmd, lv, 0))
1189 stack;
1190
1191 memlock_inc(cmd);
1192 r = _lv_deactivate(lv);
1193 memlock_dec(cmd);
1194 fs_unlock();
1195
1196 if (!lv_info(cmd, lv, &info, 1, 0) || info.exists)
1197 r = 0;
1198 out:
1199 if (lv) {
1200 lv_release_replicator_vgs(lv);
1201 vg_release(lv->vg);
1202 }
1203
1204 return r;
1205 }
1206
1207 /* Test if LV passes filter */
1208 int lv_activation_filter(struct cmd_context *cmd, const char *lvid_s,
1209 int *activate_lv)
1210 {
1211 struct logical_volume *lv;
1212 int r = 0;
1213
1214 if (!activation()) {
1215 *activate_lv = 1;
1216 return 1;
1217 }
1218
1219 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1220 goto out;
1221
1222 if (!_passes_activation_filter(cmd, lv)) {
1223 log_verbose("Not activating %s/%s due to config file settings",
1224 lv->vg->name, lv->name);
1225 *activate_lv = 0;
1226 } else
1227 *activate_lv = 1;
1228 r = 1;
1229 out:
1230 if (lv)
1231 vg_release(lv->vg);
1232
1233 return r;
1234 }
1235
1236 static int _lv_activate(struct cmd_context *cmd, const char *lvid_s,
1237 int exclusive, int filter)
1238 {
1239 struct logical_volume *lv;
1240 struct lvinfo info;
1241 int r = 0;
1242
1243 if (!activation())
1244 return 1;
1245
1246 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1247 goto out;
1248
1249 if (filter && !_passes_activation_filter(cmd, lv)) {
1250 log_verbose("Not activating %s/%s due to config file settings",
1251 lv->vg->name, lv->name);
1252 goto out;
1253 }
1254
1255 if ((!lv->vg->cmd->partial_activation) && (lv->status & PARTIAL_LV)) {
1256 log_error("Refusing activation of partial LV %s. Use --partial to override.",
1257 lv->name);
1258 goto_out;
1259 }
1260
1261 if (lv_has_unknown_segments(lv)) {
1262 log_error("Refusing activation of LV %s containing "
1263 "an unrecognised segment.", lv->name);
1264 goto_out;
1265 }
1266
1267 if (test_mode()) {
1268 _skip("Activating '%s'.", lv->name);
1269 r = 1;
1270 goto out;
1271 }
1272
1273 if (!lv_info(cmd, lv, &info, 0, 0))
1274 goto_out;
1275
1276 if (info.exists && !info.suspended && info.live_table) {
1277 r = 1;
1278 goto out;
1279 }
1280
1281 if (!lv_read_replicator_vgs(lv))
1282 goto_out;
1283
1284 lv_calculate_readahead(lv, NULL);
1285
1286 if (exclusive)
1287 lv->status |= ACTIVATE_EXCL;
1288
1289 memlock_inc(cmd);
1290 if (!(r = _lv_activate_lv(lv)))
1291 stack;
1292 memlock_dec(cmd);
1293 fs_unlock();
1294
1295 if (r && !monitor_dev_for_events(cmd, lv, 1))
1296 stack;
1297
1298 out:
1299 if (lv) {
1300 lv_release_replicator_vgs(lv);
1301 vg_release(lv->vg);
1302 }
1303
1304 return r;
1305 }
1306
1307 /* Activate LV */
1308 int lv_activate(struct cmd_context *cmd, const char *lvid_s, int exclusive)
1309 {
1310 if (!_lv_activate(cmd, lvid_s, exclusive, 0))
1311 return_0;
1312
1313 return 1;
1314 }
1315
1316 /* Activate LV only if it passes filter */
1317 int lv_activate_with_filter(struct cmd_context *cmd, const char *lvid_s, int exclusive)
1318 {
1319 if (!_lv_activate(cmd, lvid_s, exclusive, 1))
1320 return_0;
1321
1322 return 1;
1323 }
1324
1325 int lv_mknodes(struct cmd_context *cmd, const struct logical_volume *lv)
1326 {
1327 int r = 1;
1328
1329 if (!lv) {
1330 r = dm_mknodes(NULL);
1331 fs_unlock();
1332 return r;
1333 }
1334
1335 if (!activation())
1336 return 1;
1337
1338 r = dev_manager_mknodes(lv);
1339
1340 fs_unlock();
1341
1342 return r;
1343 }
1344
1345 /*
1346 * Does PV use VG somewhere in its construction?
1347 * Returns 1 on failure.
1348 */
1349 int pv_uses_vg(struct physical_volume *pv,
1350 struct volume_group *vg)
1351 {
1352 if (!activation())
1353 return 0;
1354
1355 if (!dm_is_dm_major(MAJOR(pv->dev->dev)))
1356 return 0;
1357
1358 return dev_manager_device_uses_vg(pv->dev, vg);
1359 }
1360
1361 void activation_release(void)
1362 {
1363 dev_manager_release();
1364 }
1365
1366 void activation_exit(void)
1367 {
1368 dev_manager_exit();
1369 }
1370 #endif
This page took 0.094722 seconds and 6 git commands to generate.