]> sourceware.org Git - lvm2.git/blob - lib/activate/activate.c
Critical section
[lvm2.git] / lib / activate / activate.c
1 /*
2 * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
3 * Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved.
4 *
5 * This file is part of LVM2.
6 *
7 * This copyrighted material is made available to anyone wishing to use,
8 * modify, copy, or redistribute it subject to the terms and conditions
9 * of the GNU Lesser General Public License v.2.1.
10 *
11 * You should have received a copy of the GNU Lesser General Public License
12 * along with this program; if not, write to the Free Software Foundation,
13 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 */
15
16 #include "lib.h"
17 #include "metadata.h"
18 #include "activate.h"
19 #include "memlock.h"
20 #include "display.h"
21 #include "fs.h"
22 #include "lvm-exec.h"
23 #include "lvm-file.h"
24 #include "lvm-string.h"
25 #include "toolcontext.h"
26 #include "dev_manager.h"
27 #include "str_list.h"
28 #include "config.h"
29 #include "filter.h"
30 #include "segtype.h"
31 #include "sharedlib.h"
32
33 #include <limits.h>
34 #include <fcntl.h>
35 #include <unistd.h>
36
37 #define _skip(fmt, args...) log_very_verbose("Skipping: " fmt , ## args)
38
39 int lvm1_present(struct cmd_context *cmd)
40 {
41 char path[PATH_MAX];
42
43 if (dm_snprintf(path, sizeof(path), "%s/lvm/global", cmd->proc_dir)
44 < 0) {
45 log_error("LVM1 proc global snprintf failed");
46 return 0;
47 }
48
49 if (path_exists(path))
50 return 1;
51 else
52 return 0;
53 }
54
55 int list_segment_modules(struct dm_pool *mem, const struct lv_segment *seg,
56 struct dm_list *modules)
57 {
58 unsigned int s;
59 struct lv_segment *seg2, *snap_seg;
60 struct dm_list *snh;
61
62 if (seg->segtype->ops->modules_needed &&
63 !seg->segtype->ops->modules_needed(mem, seg, modules)) {
64 log_error("module string allocation failed");
65 return 0;
66 }
67
68 if (lv_is_origin(seg->lv))
69 dm_list_iterate(snh, &seg->lv->snapshot_segs)
70 if (!list_lv_modules(mem,
71 dm_list_struct_base(snh,
72 struct lv_segment,
73 origin_list)->cow,
74 modules))
75 return_0;
76
77 if (lv_is_cow(seg->lv)) {
78 snap_seg = find_cow(seg->lv);
79 if (snap_seg->segtype->ops->modules_needed &&
80 !snap_seg->segtype->ops->modules_needed(mem, snap_seg,
81 modules)) {
82 log_error("snap_seg module string allocation failed");
83 return 0;
84 }
85 }
86
87 for (s = 0; s < seg->area_count; s++) {
88 switch (seg_type(seg, s)) {
89 case AREA_LV:
90 seg2 = find_seg_by_le(seg_lv(seg, s), seg_le(seg, s));
91 if (seg2 && !list_segment_modules(mem, seg2, modules))
92 return_0;
93 break;
94 case AREA_PV:
95 case AREA_UNASSIGNED:
96 ;
97 }
98 }
99
100 return 1;
101 }
102
103 int list_lv_modules(struct dm_pool *mem, const struct logical_volume *lv,
104 struct dm_list *modules)
105 {
106 struct lv_segment *seg;
107
108 dm_list_iterate_items(seg, &lv->segments)
109 if (!list_segment_modules(mem, seg, modules))
110 return_0;
111
112 return 1;
113 }
114
115 #ifndef DEVMAPPER_SUPPORT
116 void set_activation(int act)
117 {
118 static int warned = 0;
119
120 if (warned || !act)
121 return;
122
123 log_error("Compiled without libdevmapper support. "
124 "Can't enable activation.");
125
126 warned = 1;
127 }
128 int activation(void)
129 {
130 return 0;
131 }
132 int library_version(char *version, size_t size)
133 {
134 return 0;
135 }
136 int driver_version(char *version, size_t size)
137 {
138 return 0;
139 }
140 int target_version(const char *target_name, uint32_t *maj,
141 uint32_t *min, uint32_t *patchlevel)
142 {
143 return 0;
144 }
145 int target_present(struct cmd_context *cmd, const char *target_name,
146 int use_modprobe)
147 {
148 return 0;
149 }
150 int lv_info(struct cmd_context *cmd, const struct logical_volume *lv, unsigned origin_only,
151 struct lvinfo *info, int with_open_count, int with_read_ahead)
152 {
153 return 0;
154 }
155 int lv_info_by_lvid(struct cmd_context *cmd, const char *lvid_s,
156 unsigned origin_only,
157 struct lvinfo *info, int with_open_count, int with_read_ahead)
158 {
159 return 0;
160 }
161 int lv_snapshot_percent(const struct logical_volume *lv, percent_t *percent)
162 {
163 return 0;
164 }
165 int lv_mirror_percent(struct cmd_context *cmd, struct logical_volume *lv,
166 int wait, percent_t *percent, uint32_t *event_nr)
167 {
168 return 0;
169 }
170 int lvs_in_vg_activated(struct volume_group *vg)
171 {
172 return 0;
173 }
174 int lvs_in_vg_opened(struct volume_group *vg)
175 {
176 return 0;
177 }
178 /******
179 int lv_suspend(struct cmd_context *cmd, const char *lvid_s)
180 {
181 return 1;
182 }
183 *******/
184 int lv_suspend_if_active(struct cmd_context *cmd, const char *lvid_s)
185 {
186 return 1;
187 }
188 int lv_resume(struct cmd_context *cmd, const char *lvid_s)
189 {
190 return 1;
191 }
192 int lv_resume_if_active(struct cmd_context *cmd, const char *lvid_s)
193 {
194 return 1;
195 }
196 int lv_deactivate(struct cmd_context *cmd, const char *lvid_s)
197 {
198 return 1;
199 }
200 int lv_activation_filter(struct cmd_context *cmd, const char *lvid_s,
201 int *activate_lv)
202 {
203 return 1;
204 }
205 int lv_activate(struct cmd_context *cmd, const char *lvid_s, int exclusive)
206 {
207 return 1;
208 }
209 int lv_activate_with_filter(struct cmd_context *cmd, const char *lvid_s, int exclusive)
210 {
211 return 1;
212 }
213
214 int lv_mknodes(struct cmd_context *cmd, const struct logical_volume *lv)
215 {
216 return 1;
217 }
218
219 int pv_uses_vg(struct physical_volume *pv,
220 struct volume_group *vg)
221 {
222 return 0;
223 }
224
225 void activation_release(void)
226 {
227 return;
228 }
229
230 void activation_exit(void)
231 {
232 return;
233 }
234
235 #else /* DEVMAPPER_SUPPORT */
236
237 static int _activation = 1;
238
239 void set_activation(int act)
240 {
241 if (act == _activation)
242 return;
243
244 _activation = act;
245 if (_activation)
246 log_verbose("Activation enabled. Device-mapper kernel "
247 "driver will be used.");
248 else
249 log_warn("WARNING: Activation disabled. No device-mapper "
250 "interaction will be attempted.");
251 }
252
253 int activation(void)
254 {
255 return _activation;
256 }
257
258 static int _passes_activation_filter(struct cmd_context *cmd,
259 struct logical_volume *lv)
260 {
261 const struct config_node *cn;
262 const struct config_value *cv;
263 const char *str;
264 char path[PATH_MAX];
265
266 if (!(cn = find_config_tree_node(cmd, "activation/volume_list"))) {
267 log_verbose("activation/volume_list configuration setting "
268 "not defined, checking only host tags for %s/%s",
269 lv->vg->name, lv->name);
270
271 /* If no host tags defined, activate */
272 if (dm_list_empty(&cmd->tags))
273 return 1;
274
275 /* If any host tag matches any LV or VG tag, activate */
276 if (str_list_match_list(&cmd->tags, &lv->tags, NULL) ||
277 str_list_match_list(&cmd->tags, &lv->vg->tags, NULL))
278 return 1;
279
280 log_verbose("No host tag matches %s/%s",
281 lv->vg->name, lv->name);
282
283 /* Don't activate */
284 return 0;
285 }
286 else
287 log_verbose("activation/volume_list configuration setting "
288 "defined, checking the list to match %s/%s",
289 lv->vg->name, lv->name);
290
291 for (cv = cn->v; cv; cv = cv->next) {
292 if (cv->type != CFG_STRING) {
293 log_error("Ignoring invalid string in config file "
294 "activation/volume_list");
295 continue;
296 }
297 str = cv->v.str;
298 if (!*str) {
299 log_error("Ignoring empty string in config file "
300 "activation/volume_list");
301 continue;
302 }
303
304
305 /* Tag? */
306 if (*str == '@') {
307 str++;
308 if (!*str) {
309 log_error("Ignoring empty tag in config file "
310 "activation/volume_list");
311 continue;
312 }
313 /* If any host tag matches any LV or VG tag, activate */
314 if (!strcmp(str, "*")) {
315 if (str_list_match_list(&cmd->tags, &lv->tags, NULL)
316 || str_list_match_list(&cmd->tags,
317 &lv->vg->tags, NULL))
318 return 1;
319 else
320 continue;
321 }
322 /* If supplied tag matches LV or VG tag, activate */
323 if (str_list_match_item(&lv->tags, str) ||
324 str_list_match_item(&lv->vg->tags, str))
325 return 1;
326 else
327 continue;
328 }
329 if (!strchr(str, '/')) {
330 /* vgname supplied */
331 if (!strcmp(str, lv->vg->name))
332 return 1;
333 else
334 continue;
335 }
336 /* vgname/lvname */
337 if (dm_snprintf(path, sizeof(path), "%s/%s", lv->vg->name,
338 lv->name) < 0) {
339 log_error("dm_snprintf error from %s/%s", lv->vg->name,
340 lv->name);
341 continue;
342 }
343 if (!strcmp(path, str))
344 return 1;
345 }
346
347 log_verbose("No item supplied in activation/volume_list configuration "
348 "setting matches %s/%s", lv->vg->name, lv->name);
349
350 return 0;
351 }
352
353 int library_version(char *version, size_t size)
354 {
355 if (!activation())
356 return 0;
357
358 return dm_get_library_version(version, size);
359 }
360
361 int driver_version(char *version, size_t size)
362 {
363 if (!activation())
364 return 0;
365
366 log_very_verbose("Getting driver version");
367
368 return dm_driver_version(version, size);
369 }
370
371 int target_version(const char *target_name, uint32_t *maj,
372 uint32_t *min, uint32_t *patchlevel)
373 {
374 int r = 0;
375 struct dm_task *dmt;
376 struct dm_versions *target, *last_target;
377
378 log_very_verbose("Getting target version for %s", target_name);
379 if (!(dmt = dm_task_create(DM_DEVICE_LIST_VERSIONS)))
380 return_0;
381
382 if (!dm_task_run(dmt)) {
383 log_debug("Failed to get %s target version", target_name);
384 /* Assume this was because LIST_VERSIONS isn't supported */
385 return 1;
386 }
387
388 target = dm_task_get_versions(dmt);
389
390 do {
391 last_target = target;
392
393 if (!strcmp(target_name, target->name)) {
394 r = 1;
395 *maj = target->version[0];
396 *min = target->version[1];
397 *patchlevel = target->version[2];
398 goto out;
399 }
400
401 target = (struct dm_versions *)((char *) target + target->next);
402 } while (last_target != target);
403
404 out:
405 dm_task_destroy(dmt);
406
407 return r;
408 }
409
410 int module_present(struct cmd_context *cmd, const char *target_name)
411 {
412 int ret = 0;
413 #ifdef MODPROBE_CMD
414 char module[128];
415 const char *argv[3];
416
417 if (dm_snprintf(module, sizeof(module), "dm-%s", target_name) < 0) {
418 log_error("module_present module name too long: %s",
419 target_name);
420 return 0;
421 }
422
423 argv[0] = MODPROBE_CMD;
424 argv[1] = module;
425 argv[2] = NULL;
426
427 ret = exec_cmd(cmd, argv, NULL, 0);
428 #endif
429 return ret;
430 }
431
432 int target_present(struct cmd_context *cmd, const char *target_name,
433 int use_modprobe)
434 {
435 uint32_t maj, min, patchlevel;
436
437 if (!activation())
438 return 0;
439
440 #ifdef MODPROBE_CMD
441 if (use_modprobe) {
442 if (target_version(target_name, &maj, &min, &patchlevel))
443 return 1;
444
445 if (!module_present(cmd, target_name))
446 return_0;
447 }
448 #endif
449
450 return target_version(target_name, &maj, &min, &patchlevel);
451 }
452
453 /*
454 * Returns 1 if info structure populated, else 0 on failure.
455 */
456 int lv_info(struct cmd_context *cmd, const struct logical_volume *lv, unsigned origin_only,
457 struct lvinfo *info, int with_open_count, int with_read_ahead)
458 {
459 struct dm_info dminfo;
460
461 if (!activation())
462 return 0;
463 /*
464 * If open_count info is requested and we have to be sure our own udev
465 * transactions are finished
466 * For non-clustered locking type we are only interested for non-delete operation
467 * in progress - as only those could lead to opened files
468 */
469 if (with_open_count) {
470 if (locking_is_clustered())
471 sync_local_dev_names(cmd); /* Wait to have udev in sync */
472 else if (fs_has_non_delete_ops())
473 fs_unlock(); /* For non clustered - wait if there are non-delete ops */
474 }
475
476 if (!dev_manager_info(lv->vg->cmd->mem, lv, origin_only ? "real" : NULL, with_open_count,
477 with_read_ahead, &dminfo, &info->read_ahead))
478 return_0;
479
480 info->exists = dminfo.exists;
481 info->suspended = dminfo.suspended;
482 info->open_count = dminfo.open_count;
483 info->major = dminfo.major;
484 info->minor = dminfo.minor;
485 info->read_only = dminfo.read_only;
486 info->live_table = dminfo.live_table;
487 info->inactive_table = dminfo.inactive_table;
488
489 return 1;
490 }
491
492 int lv_info_by_lvid(struct cmd_context *cmd, const char *lvid_s,
493 unsigned origin_only,
494 struct lvinfo *info, int with_open_count, int with_read_ahead)
495 {
496 int r;
497 struct logical_volume *lv;
498
499 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
500 return 0;
501
502 if (!lv_is_origin(lv))
503 origin_only = 0;
504
505 r = lv_info(cmd, lv, origin_only, info, with_open_count, with_read_ahead);
506 free_vg(lv->vg);
507
508 return r;
509 }
510
511 /*
512 * Returns 1 if percent set, else 0 on failure.
513 */
514 int lv_check_transient(struct logical_volume *lv)
515 {
516 int r;
517 struct dev_manager *dm;
518
519 if (!activation())
520 return 0;
521
522 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name)))
523 return_0;
524
525 if (!(r = dev_manager_transient(dm, lv)))
526 stack;
527
528 dev_manager_destroy(dm);
529
530 return r;
531 }
532
533 /*
534 * Returns 1 if percent set, else 0 on failure.
535 */
536 int lv_snapshot_percent(const struct logical_volume *lv, percent_t *percent)
537 {
538 int r;
539 struct dev_manager *dm;
540
541 if (!activation())
542 return 0;
543
544 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name)))
545 return_0;
546
547 if (!(r = dev_manager_snapshot_percent(dm, lv, percent)))
548 stack;
549
550 dev_manager_destroy(dm);
551
552 return r;
553 }
554
555 /* FIXME Merge with snapshot_percent */
556 int lv_mirror_percent(struct cmd_context *cmd, struct logical_volume *lv,
557 int wait, percent_t *percent, uint32_t *event_nr)
558 {
559 int r;
560 struct dev_manager *dm;
561 struct lvinfo info;
562
563 /* If mirrored LV is temporarily shrinked to 1 area (= linear),
564 * it should be considered in-sync. */
565 if (dm_list_size(&lv->segments) == 1 && first_seg(lv)->area_count == 1) {
566 *percent = PERCENT_100;
567 return 1;
568 }
569
570 if (!activation())
571 return 0;
572
573 if (!lv_info(cmd, lv, 0, &info, 0, 0))
574 return_0;
575
576 if (!info.exists)
577 return 0;
578
579 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name)))
580 return_0;
581
582 if (!(r = dev_manager_mirror_percent(dm, lv, wait, percent, event_nr)))
583 stack;
584
585 dev_manager_destroy(dm);
586
587 return r;
588 }
589
590 static int _lv_active(struct cmd_context *cmd, struct logical_volume *lv)
591 {
592 struct lvinfo info;
593
594 if (!lv_info(cmd, lv, 0, &info, 0, 0)) {
595 stack;
596 return -1;
597 }
598
599 return info.exists;
600 }
601
602 static int _lv_open_count(struct cmd_context *cmd, struct logical_volume *lv)
603 {
604 struct lvinfo info;
605
606 if (!lv_info(cmd, lv, 0, &info, 1, 0)) {
607 stack;
608 return -1;
609 }
610
611 return info.open_count;
612 }
613
614 static int _lv_activate_lv(struct logical_volume *lv, unsigned origin_only)
615 {
616 int r;
617 struct dev_manager *dm;
618
619 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name)))
620 return_0;
621
622 if (!(r = dev_manager_activate(dm, lv, origin_only)))
623 stack;
624
625 dev_manager_destroy(dm);
626 return r;
627 }
628
629 static int _lv_preload(struct logical_volume *lv, unsigned origin_only, int *flush_required)
630 {
631 int r;
632 struct dev_manager *dm;
633
634 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name)))
635 return_0;
636
637 if (!(r = dev_manager_preload(dm, lv, origin_only, flush_required)))
638 stack;
639
640 dev_manager_destroy(dm);
641 return r;
642 }
643
644 static int _lv_deactivate(struct logical_volume *lv)
645 {
646 int r;
647 struct dev_manager *dm;
648
649 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name)))
650 return_0;
651
652 if (!(r = dev_manager_deactivate(dm, lv)))
653 stack;
654
655 dev_manager_destroy(dm);
656 return r;
657 }
658
659 static int _lv_suspend_lv(struct logical_volume *lv, unsigned origin_only, int lockfs, int flush_required)
660 {
661 int r;
662 struct dev_manager *dm;
663
664 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name)))
665 return_0;
666
667 if (!(r = dev_manager_suspend(dm, lv, origin_only, lockfs, flush_required)))
668 stack;
669
670 dev_manager_destroy(dm);
671 return r;
672 }
673
674 /*
675 * These two functions return the number of visible LVs in the state,
676 * or -1 on error.
677 */
678 int lvs_in_vg_activated(struct volume_group *vg)
679 {
680 struct lv_list *lvl;
681 int count = 0;
682
683 if (!activation())
684 return 0;
685
686 dm_list_iterate_items(lvl, &vg->lvs) {
687 if (lv_is_visible(lvl->lv))
688 count += (_lv_active(vg->cmd, lvl->lv) == 1);
689 }
690
691 return count;
692 }
693
694 int lvs_in_vg_opened(const struct volume_group *vg)
695 {
696 const struct lv_list *lvl;
697 int count = 0;
698
699 if (!activation())
700 return 0;
701
702 dm_list_iterate_items(lvl, &vg->lvs) {
703 if (lv_is_visible(lvl->lv))
704 count += (_lv_open_count(vg->cmd, lvl->lv) > 0);
705 }
706
707 return count;
708 }
709
710 /*
711 * _lv_is_active
712 * @lv: logical volume being queried
713 * @locally: set if active locally (when provided)
714 * @exclusive: set if active exclusively (when provided)
715 *
716 * Determine whether an LV is active locally or in a cluster.
717 * In addition to the return code which indicates whether or
718 * not the LV is active somewhere, two other values are set
719 * to yield more information about the status of the activation:
720 * return locally exclusively status
721 * ====== ======= =========== ======
722 * 0 0 0 not active
723 * 1 0 0 active remotely
724 * 1 0 1 exclusive remotely
725 * 1 1 0 active locally and possibly remotely
726 * 1 1 1 exclusive locally (or local && !cluster)
727 * The VG lock must be held to call this function.
728 *
729 * Returns: 0 or 1
730 */
731 static int _lv_is_active(struct logical_volume *lv,
732 int *locally, int *exclusive)
733 {
734 int r, l, e; /* remote, local, and exclusive */
735
736 r = l = e = 0;
737
738 if (_lv_active(lv->vg->cmd, lv))
739 l = 1;
740
741 if (!vg_is_clustered(lv->vg)) {
742 e = 1; /* exclusive by definition */
743 goto out;
744 }
745
746 /* Active locally, and the caller doesn't care about exclusive */
747 if (l && !exclusive)
748 goto out;
749
750 if ((r = remote_lock_held(lv->lvid.s, &e)) >= 0)
751 goto out;
752
753 /*
754 * If lock query is not supported (due to interfacing with old
755 * code), then we cannot evaluate exclusivity properly.
756 *
757 * Old users of this function will never be affected by this,
758 * since they are only concerned about active vs. not active.
759 * New users of this function who specifically ask for 'exclusive'
760 * will be given an error message.
761 */
762 if (l) {
763 if (exclusive)
764 log_error("Unable to determine exclusivity of %s",
765 lv->name);
766 goto out;
767 }
768
769 if (activate_lv_excl(lv->vg->cmd, lv)) {
770 if (!deactivate_lv(lv->vg->cmd, lv))
771 stack;
772 return 0;
773 }
774
775 out:
776 if (locally)
777 *locally = l;
778 if (exclusive)
779 *exclusive = e;
780
781 log_very_verbose("%s/%s is %sactive%s%s",
782 lv->vg->name, lv->name,
783 (r || l) ? "" : "not ",
784 (exclusive && e) ? " exclusive" : "",
785 e ? (l ? " locally" : " remotely") : "");
786
787 return r || l;
788 }
789
790 int lv_is_active(struct logical_volume *lv)
791 {
792 return _lv_is_active(lv, NULL, NULL);
793 }
794
795 /*
796 int lv_is_active_locally(struct logical_volume *lv)
797 {
798 int l;
799 return _lv_is_active(lv, &l, NULL) && l;
800 }
801 */
802
803 int lv_is_active_exclusive_locally(struct logical_volume *lv)
804 {
805 int l, e;
806 return _lv_is_active(lv, &l, &e) && l && e;
807 }
808
809 int lv_is_active_exclusive_remotely(struct logical_volume *lv)
810 {
811 int l, e;
812 return _lv_is_active(lv, &l, &e) && !l && e;
813 }
814
815 #ifdef DMEVENTD
816 static struct dm_event_handler *_create_dm_event_handler(struct cmd_context *cmd, const char *dmuuid, const char *dso,
817 const int timeout, enum dm_event_mask mask)
818 {
819 struct dm_event_handler *dmevh;
820
821 if (!(dmevh = dm_event_handler_create()))
822 return_NULL;
823
824 if (dm_event_handler_set_dmeventd_path(dmevh, find_config_tree_str(cmd, "dmeventd/executable", NULL)))
825 goto_bad;
826
827 if (dm_event_handler_set_dso(dmevh, dso))
828 goto_bad;
829
830 if (dm_event_handler_set_uuid(dmevh, dmuuid))
831 goto_bad;
832
833 dm_event_handler_set_timeout(dmevh, timeout);
834 dm_event_handler_set_event_mask(dmevh, mask);
835
836 return dmevh;
837
838 bad:
839 dm_event_handler_destroy(dmevh);
840 return NULL;
841 }
842
843 char *get_monitor_dso_path(struct cmd_context *cmd, const char *libpath)
844 {
845 char *path;
846
847 if (!(path = dm_pool_alloc(cmd->mem, PATH_MAX))) {
848 log_error("Failed to allocate dmeventd library path.");
849 return NULL;
850 }
851
852 get_shared_library_path(cmd, libpath, path, PATH_MAX);
853
854 return path;
855 }
856
857 int target_registered_with_dmeventd(struct cmd_context *cmd, const char *dso,
858 struct logical_volume *lv, int *pending)
859 {
860 char *uuid;
861 enum dm_event_mask evmask = 0;
862 struct dm_event_handler *dmevh;
863
864 *pending = 0;
865
866 if (!dso)
867 return_0;
868
869 /* We always monitor the "real" device, never the "snapshot-origin" itself. */
870 if (!(uuid = build_dm_uuid(cmd->mem, lv->lvid.s, lv_is_origin(lv) ? "real" : NULL)))
871 return_0;
872
873 if (!(dmevh = _create_dm_event_handler(cmd, uuid, dso, 0, DM_EVENT_ALL_ERRORS)))
874 return_0;
875
876 if (dm_event_get_registered_device(dmevh, 0)) {
877 dm_event_handler_destroy(dmevh);
878 return 0;
879 }
880
881 evmask = dm_event_handler_get_event_mask(dmevh);
882 if (evmask & DM_EVENT_REGISTRATION_PENDING) {
883 *pending = 1;
884 evmask &= ~DM_EVENT_REGISTRATION_PENDING;
885 }
886
887 dm_event_handler_destroy(dmevh);
888
889 return evmask;
890 }
891
892 int target_register_events(struct cmd_context *cmd, const char *dso, struct logical_volume *lv,
893 int evmask __attribute__((unused)), int set, int timeout)
894 {
895 char *uuid;
896 struct dm_event_handler *dmevh;
897 int r;
898
899 if (!dso)
900 return_0;
901
902 /* We always monitor the "real" device, never the "snapshot-origin" itself. */
903 if (!(uuid = build_dm_uuid(cmd->mem, lv->lvid.s, lv_is_origin(lv) ? "real" : NULL)))
904 return_0;
905
906 if (!(dmevh = _create_dm_event_handler(cmd, uuid, dso, timeout,
907 DM_EVENT_ALL_ERRORS | (timeout ? DM_EVENT_TIMEOUT : 0))))
908 return_0;
909
910 r = set ? dm_event_register_handler(dmevh) : dm_event_unregister_handler(dmevh);
911
912 dm_event_handler_destroy(dmevh);
913
914 if (!r)
915 return_0;
916
917 log_info("%s %s for events", set ? "Monitored" : "Unmonitored", uuid);
918
919 return 1;
920 }
921
922 #endif
923
924 /*
925 * Returns 0 if an attempt to (un)monitor the device failed.
926 * Returns 1 otherwise.
927 */
928 int monitor_dev_for_events(struct cmd_context *cmd, struct logical_volume *lv,
929 unsigned origin_only, int monitor)
930 {
931 #ifdef DMEVENTD
932 int i, pending = 0, monitored;
933 int r = 1;
934 struct dm_list *tmp, *snh, *snht;
935 struct lv_segment *seg;
936 struct lv_segment *log_seg;
937 int (*monitor_fn) (struct lv_segment *s, int e);
938 uint32_t s;
939
940 /* skip dmeventd code altogether */
941 if (dmeventd_monitor_mode() == DMEVENTD_MONITOR_IGNORE)
942 return 1;
943
944 /*
945 * Nothing to do if dmeventd configured not to be used.
946 */
947 if (monitor && !dmeventd_monitor_mode())
948 return 1;
949
950 /*
951 * In case of a snapshot device, we monitor lv->snapshot->lv,
952 * not the actual LV itself.
953 */
954 if (lv_is_cow(lv) && !lv_is_merging_cow(lv))
955 return monitor_dev_for_events(cmd, lv->snapshot->lv, 0, monitor);
956
957 /*
958 * In case this LV is a snapshot origin, we instead monitor
959 * each of its respective snapshots. The origin itself may
960 * also need to be monitored if it is a mirror, for example.
961 */
962 if (!origin_only && lv_is_origin(lv))
963 dm_list_iterate_safe(snh, snht, &lv->snapshot_segs)
964 if (!monitor_dev_for_events(cmd, dm_list_struct_base(snh,
965 struct lv_segment, origin_list)->cow, 0, monitor))
966 r = 0;
967
968 /*
969 * If the volume is mirrored and its log is also mirrored, monitor
970 * the log volume as well.
971 */
972 if ((seg = first_seg(lv)) != NULL && seg->log_lv != NULL &&
973 (log_seg = first_seg(seg->log_lv)) != NULL &&
974 seg_is_mirrored(log_seg))
975 if (!monitor_dev_for_events(cmd, seg->log_lv, 0, monitor))
976 r = 0;
977
978 dm_list_iterate(tmp, &lv->segments) {
979 seg = dm_list_item(tmp, struct lv_segment);
980
981 /* Recurse for AREA_LV */
982 for (s = 0; s < seg->area_count; s++) {
983 if (seg_type(seg, s) != AREA_LV)
984 continue;
985 if (!monitor_dev_for_events(cmd, seg_lv(seg, s), 0,
986 monitor)) {
987 log_error("Failed to %smonitor %s",
988 monitor ? "" : "un",
989 seg_lv(seg, s)->name);
990 r = 0;
991 }
992 }
993
994 if (!seg_monitored(seg) || (seg->status & PVMOVE))
995 continue;
996
997 monitor_fn = NULL;
998
999 /* Check monitoring status */
1000 if (seg->segtype->ops->target_monitored)
1001 monitored = seg->segtype->ops->target_monitored(seg, &pending);
1002 else
1003 continue; /* segtype doesn't support registration */
1004
1005 /*
1006 * FIXME: We should really try again if pending
1007 */
1008 monitored = (pending) ? 0 : monitored;
1009
1010 if (monitor) {
1011 if (monitored)
1012 log_verbose("%s/%s already monitored.", lv->vg->name, lv->name);
1013 else if (seg->segtype->ops->target_monitor_events)
1014 monitor_fn = seg->segtype->ops->target_monitor_events;
1015 } else {
1016 if (!monitored)
1017 log_verbose("%s/%s already not monitored.", lv->vg->name, lv->name);
1018 else if (seg->segtype->ops->target_unmonitor_events)
1019 monitor_fn = seg->segtype->ops->target_unmonitor_events;
1020 }
1021
1022 /* Do [un]monitor */
1023 if (!monitor_fn)
1024 continue;
1025
1026 log_verbose("%sonitoring %s/%s%s", monitor ? "M" : "Not m", lv->vg->name, lv->name,
1027 test_mode() ? " [Test mode: skipping this]" : "");
1028
1029 /* FIXME Test mode should really continue a bit further. */
1030 if (test_mode())
1031 continue;
1032
1033 /* FIXME specify events */
1034 if (!monitor_fn(seg, 0)) {
1035 log_error("%s/%s: %s segment monitoring function failed.",
1036 lv->vg->name, lv->name, seg->segtype->name);
1037 return 0;
1038 }
1039
1040 /* Check [un]monitor results */
1041 /* Try a couple times if pending, but not forever... */
1042 for (i = 0; i < 10; i++) {
1043 pending = 0;
1044 monitored = seg->segtype->ops->target_monitored(seg, &pending);
1045 if (pending ||
1046 (!monitored && monitor) ||
1047 (monitored && !monitor))
1048 log_very_verbose("%s/%s %smonitoring still pending: waiting...",
1049 lv->vg->name, lv->name, monitor ? "" : "un");
1050 else
1051 break;
1052 sleep(1);
1053 }
1054
1055 if (r)
1056 r = (monitored && monitor) || (!monitored && !monitor);
1057 }
1058
1059 return r;
1060 #else
1061 return 1;
1062 #endif
1063 }
1064
1065 static int _lv_suspend(struct cmd_context *cmd, const char *lvid_s,
1066 unsigned origin_only, int error_if_not_suspended)
1067 {
1068 struct logical_volume *lv = NULL, *lv_pre = NULL;
1069 struct lvinfo info;
1070 int r = 0, lockfs = 0, flush_required = 0;
1071
1072 if (!activation())
1073 return 1;
1074
1075 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1076 goto_out;
1077
1078 /* Use precommitted metadata if present */
1079 if (!(lv_pre = lv_from_lvid(cmd, lvid_s, 1)))
1080 goto_out;
1081
1082 /* Ignore origin_only unless LV is origin in both old and new metadata */
1083 if (!lv_is_origin(lv) || !lv_is_origin(lv_pre))
1084 origin_only = 0;
1085
1086 if (test_mode()) {
1087 _skip("Suspending %s%s.", lv->name, origin_only ? " origin without snapshots" : "");
1088 r = 1;
1089 goto out;
1090 }
1091
1092 if (!lv_info(cmd, lv, origin_only, &info, 0, 0))
1093 goto_out;
1094
1095 if (!info.exists || info.suspended) {
1096 if (!error_if_not_suspended) {
1097 r = 1;
1098 if (info.suspended)
1099 critical_section_inc(cmd);
1100 }
1101 goto out;
1102 }
1103
1104 if (!lv_read_replicator_vgs(lv))
1105 goto_out;
1106
1107 lv_calculate_readahead(lv, NULL);
1108
1109 /* If VG was precommitted, preload devices for the LV */
1110 if ((lv_pre->vg->status & PRECOMMITTED)) {
1111 if (!_lv_preload(lv_pre, origin_only, &flush_required)) {
1112 /* FIXME Revert preloading */
1113 goto_out;
1114 }
1115 }
1116
1117 if (!monitor_dev_for_events(cmd, lv, origin_only, 0))
1118 /* FIXME Consider aborting here */
1119 stack;
1120
1121 critical_section_inc(cmd);
1122
1123 if (!origin_only &&
1124 (lv_is_origin(lv_pre) || lv_is_cow(lv_pre)))
1125 lockfs = 1;
1126
1127 if (!_lv_suspend_lv(lv, origin_only, lockfs, flush_required)) {
1128 critical_section_dec(cmd);
1129 fs_unlock();
1130 goto out;
1131 }
1132
1133 r = 1;
1134 out:
1135 if (lv_pre)
1136 free_vg(lv_pre->vg);
1137 if (lv) {
1138 lv_release_replicator_vgs(lv);
1139 free_vg(lv->vg);
1140 }
1141
1142 return r;
1143 }
1144
1145 /* Returns success if the device is not active */
1146 int lv_suspend_if_active(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only)
1147 {
1148 return _lv_suspend(cmd, lvid_s, origin_only, 0);
1149 }
1150
1151 /* No longer used */
1152 /***********
1153 int lv_suspend(struct cmd_context *cmd, const char *lvid_s)
1154 {
1155 return _lv_suspend(cmd, lvid_s, 1);
1156 }
1157 ***********/
1158
1159 /*
1160 * _lv_resume
1161 * @cmd
1162 * @lvid_s
1163 * @origin_only
1164 * @exclusive: This parameter only has an affect in cluster-context.
1165 * It forces local target type to be used (instead of
1166 * cluster-aware type).
1167 * @error_if_not_active
1168 */
1169 static int _lv_resume(struct cmd_context *cmd, const char *lvid_s,
1170 unsigned origin_only, unsigned exclusive,
1171 int error_if_not_active)
1172 {
1173 struct logical_volume *lv;
1174 struct lvinfo info;
1175 int r = 0;
1176
1177 if (!activation())
1178 return 1;
1179
1180 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1181 goto_out;
1182
1183 if (!lv_is_origin(lv))
1184 origin_only = 0;
1185
1186 if (test_mode()) {
1187 _skip("Resuming %s%s.", lv->name, origin_only ? " without snapshots" : "");
1188 r = 1;
1189 goto out;
1190 }
1191
1192 if (!lv_info(cmd, lv, origin_only, &info, 0, 0))
1193 goto_out;
1194
1195 if (!info.exists || !info.suspended) {
1196 if (error_if_not_active)
1197 goto_out;
1198 r = 1;
1199 goto out;
1200 }
1201
1202 /*
1203 * When targets are activated exclusively in a cluster, the
1204 * non-clustered target should be used. This only happens
1205 * if ACTIVATE_EXCL is set in lv->status.
1206 */
1207 if (exclusive)
1208 lv->status |= ACTIVATE_EXCL;
1209
1210 if (!_lv_activate_lv(lv, origin_only))
1211 goto_out;
1212
1213 critical_section_dec(cmd);
1214
1215 if (!monitor_dev_for_events(cmd, lv, origin_only, 1))
1216 stack;
1217
1218 r = 1;
1219 out:
1220 if (lv)
1221 free_vg(lv->vg);
1222
1223 return r;
1224 }
1225
1226 /* Returns success if the device is not active */
1227 int lv_resume_if_active(struct cmd_context *cmd, const char *lvid_s,
1228 unsigned origin_only, unsigned exclusive)
1229 {
1230 return _lv_resume(cmd, lvid_s, origin_only, exclusive, 0);
1231 }
1232
1233 int lv_resume(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only)
1234 {
1235 return _lv_resume(cmd, lvid_s, origin_only, 0, 1);
1236 }
1237
1238 static int _lv_has_open_snapshots(struct logical_volume *lv)
1239 {
1240 struct lv_segment *snap_seg;
1241 struct lvinfo info;
1242 int r = 0;
1243
1244 dm_list_iterate_items_gen(snap_seg, &lv->snapshot_segs, origin_list) {
1245 if (!lv_info(lv->vg->cmd, snap_seg->cow, 0, &info, 1, 0)) {
1246 r = 1;
1247 continue;
1248 }
1249
1250 if (info.exists && info.open_count) {
1251 log_error("LV %s/%s has open snapshot %s: "
1252 "not deactivating", lv->vg->name, lv->name,
1253 snap_seg->cow->name);
1254 r = 1;
1255 }
1256 }
1257
1258 return r;
1259 }
1260
1261 int lv_deactivate(struct cmd_context *cmd, const char *lvid_s)
1262 {
1263 struct logical_volume *lv;
1264 struct lvinfo info;
1265 int r = 0;
1266
1267 if (!activation())
1268 return 1;
1269
1270 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1271 goto out;
1272
1273 if (test_mode()) {
1274 _skip("Deactivating '%s'.", lv->name);
1275 r = 1;
1276 goto out;
1277 }
1278
1279 if (!lv_info(cmd, lv, 0, &info, 1, 0))
1280 goto_out;
1281
1282 if (!info.exists) {
1283 r = 1;
1284 goto out;
1285 }
1286
1287 if (lv_is_visible(lv)) {
1288 if (info.open_count) {
1289 log_error("LV %s/%s in use: not deactivating",
1290 lv->vg->name, lv->name);
1291 goto out;
1292 }
1293 if (lv_is_origin(lv) && _lv_has_open_snapshots(lv))
1294 goto_out;
1295 }
1296
1297 if (!lv_read_replicator_vgs(lv))
1298 goto_out;
1299
1300 lv_calculate_readahead(lv, NULL);
1301
1302 if (!monitor_dev_for_events(cmd, lv, 0, 0))
1303 stack;
1304
1305 critical_section_inc(cmd);
1306 r = _lv_deactivate(lv);
1307 critical_section_dec(cmd);
1308
1309 if (!lv_info(cmd, lv, 0, &info, 0, 0) || info.exists)
1310 r = 0;
1311 out:
1312 if (lv) {
1313 lv_release_replicator_vgs(lv);
1314 free_vg(lv->vg);
1315 }
1316
1317 return r;
1318 }
1319
1320 /* Test if LV passes filter */
1321 int lv_activation_filter(struct cmd_context *cmd, const char *lvid_s,
1322 int *activate_lv)
1323 {
1324 struct logical_volume *lv;
1325 int r = 0;
1326
1327 if (!activation()) {
1328 *activate_lv = 1;
1329 return 1;
1330 }
1331
1332 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1333 goto out;
1334
1335 if (!_passes_activation_filter(cmd, lv)) {
1336 log_verbose("Not activating %s/%s since it does not pass "
1337 "activation filter.", lv->vg->name, lv->name);
1338 *activate_lv = 0;
1339 } else
1340 *activate_lv = 1;
1341 r = 1;
1342 out:
1343 if (lv)
1344 free_vg(lv->vg);
1345
1346 return r;
1347 }
1348
1349 static int _lv_activate(struct cmd_context *cmd, const char *lvid_s,
1350 int exclusive, int filter)
1351 {
1352 struct logical_volume *lv;
1353 struct lvinfo info;
1354 int r = 0;
1355
1356 if (!activation())
1357 return 1;
1358
1359 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1360 goto out;
1361
1362 if (filter && !_passes_activation_filter(cmd, lv)) {
1363 log_error("Not activating %s/%s since it does not pass "
1364 "activation filter.", lv->vg->name, lv->name);
1365 goto out;
1366 }
1367
1368 if ((!lv->vg->cmd->partial_activation) && (lv->status & PARTIAL_LV)) {
1369 log_error("Refusing activation of partial LV %s. Use --partial to override.",
1370 lv->name);
1371 goto_out;
1372 }
1373
1374 if (lv_has_unknown_segments(lv)) {
1375 log_error("Refusing activation of LV %s containing "
1376 "an unrecognised segment.", lv->name);
1377 goto_out;
1378 }
1379
1380 if (test_mode()) {
1381 _skip("Activating '%s'.", lv->name);
1382 r = 1;
1383 goto out;
1384 }
1385
1386 if (!lv_info(cmd, lv, 0, &info, 0, 0))
1387 goto_out;
1388
1389 if (info.exists && !info.suspended && info.live_table) {
1390 r = 1;
1391 goto out;
1392 }
1393
1394 if (!lv_read_replicator_vgs(lv))
1395 goto_out;
1396
1397 lv_calculate_readahead(lv, NULL);
1398
1399 if (exclusive)
1400 lv->status |= ACTIVATE_EXCL;
1401
1402 critical_section_inc(cmd);
1403 if (!(r = _lv_activate_lv(lv, 0)))
1404 stack;
1405 critical_section_dec(cmd);
1406
1407 if (r && !monitor_dev_for_events(cmd, lv, 0, 1))
1408 stack;
1409
1410 out:
1411 if (lv) {
1412 lv_release_replicator_vgs(lv);
1413 free_vg(lv->vg);
1414 }
1415
1416 return r;
1417 }
1418
1419 /* Activate LV */
1420 int lv_activate(struct cmd_context *cmd, const char *lvid_s, int exclusive)
1421 {
1422 if (!_lv_activate(cmd, lvid_s, exclusive, 0))
1423 return_0;
1424
1425 return 1;
1426 }
1427
1428 /* Activate LV only if it passes filter */
1429 int lv_activate_with_filter(struct cmd_context *cmd, const char *lvid_s, int exclusive)
1430 {
1431 if (!_lv_activate(cmd, lvid_s, exclusive, 1))
1432 return_0;
1433
1434 return 1;
1435 }
1436
1437 int lv_mknodes(struct cmd_context *cmd, const struct logical_volume *lv)
1438 {
1439 int r = 1;
1440
1441 if (!lv) {
1442 r = dm_mknodes(NULL);
1443 fs_unlock();
1444 return r;
1445 }
1446
1447 if (!activation())
1448 return 1;
1449
1450 r = dev_manager_mknodes(lv);
1451
1452 fs_unlock();
1453
1454 return r;
1455 }
1456
1457 /*
1458 * Does PV use VG somewhere in its construction?
1459 * Returns 1 on failure.
1460 */
1461 int pv_uses_vg(struct physical_volume *pv,
1462 struct volume_group *vg)
1463 {
1464 if (!activation())
1465 return 0;
1466
1467 if (!dm_is_dm_major(MAJOR(pv->dev->dev)))
1468 return 0;
1469
1470 return dev_manager_device_uses_vg(pv->dev, vg);
1471 }
1472
1473 void activation_release(void)
1474 {
1475 dev_manager_release();
1476 }
1477
1478 void activation_exit(void)
1479 {
1480 dev_manager_exit();
1481 }
1482 #endif
This page took 0.102255 seconds and 6 git commands to generate.