]> sourceware.org Git - lvm2.git/blob - lib/cache/lvmcache.c
thin: fix recent commits
[lvm2.git] / lib / cache / lvmcache.c
1 /*
2 * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
3 * Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved.
4 *
5 * This file is part of LVM2.
6 *
7 * This copyrighted material is made available to anyone wishing to use,
8 * modify, copy, or redistribute it subject to the terms and conditions
9 * of the GNU Lesser General Public License v.2.1.
10 *
11 * You should have received a copy of the GNU Lesser General Public License
12 * along with this program; if not, write to the Free Software Foundation,
13 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 */
15
16 #include "lib.h"
17 #include "lvmcache.h"
18 #include "toolcontext.h"
19 #include "dev-cache.h"
20 #include "locking.h"
21 #include "metadata.h"
22 #include "filter.h"
23 #include "filter-persistent.h"
24 #include "memlock.h"
25 #include "str_list.h"
26 #include "format-text.h"
27 #include "format_pool.h"
28 #include "format1.h"
29 #include "config.h"
30
31 #include "lvmetad.h"
32
33 #define CACHE_INVALID 0x00000001
34 #define CACHE_LOCKED 0x00000002
35
36 /* One per device */
37 struct lvmcache_info {
38 struct dm_list list; /* Join VG members together */
39 struct dm_list mdas; /* list head for metadata areas */
40 struct dm_list das; /* list head for data areas */
41 struct lvmcache_vginfo *vginfo; /* NULL == unknown */
42 struct label *label;
43 const struct format_type *fmt;
44 struct device *dev;
45 uint64_t device_size; /* Bytes */
46 uint32_t status;
47 };
48
49 /* One per VG */
50 struct lvmcache_vginfo {
51 struct dm_list list; /* Join these vginfos together */
52 struct dm_list infos; /* List head for lvmcache_infos */
53 const struct format_type *fmt;
54 char *vgname; /* "" == orphan */
55 uint32_t status;
56 char vgid[ID_LEN + 1];
57 char _padding[7];
58 struct lvmcache_vginfo *next; /* Another VG with same name? */
59 char *creation_host;
60 size_t vgmetadata_size;
61 char *vgmetadata; /* Copy of VG metadata as format_text string */
62 struct dm_config_tree *cft; /* Config tree created from vgmetadata */
63 /* Lifetime is directly tied to vgmetadata */
64 struct volume_group *cached_vg;
65 unsigned holders;
66 unsigned vg_use_count; /* Counter of vg reusage */
67 unsigned precommitted; /* Is vgmetadata live or precommitted? */
68 };
69
70 static struct dm_hash_table *_pvid_hash = NULL;
71 static struct dm_hash_table *_vgid_hash = NULL;
72 static struct dm_hash_table *_vgname_hash = NULL;
73 static struct dm_hash_table *_lock_hash = NULL;
74 static DM_LIST_INIT(_vginfos);
75 static int _scanning_in_progress = 0;
76 static int _has_scanned = 0;
77 static int _vgs_locked = 0;
78 static int _vg_global_lock_held = 0; /* Global lock held when cache wiped? */
79
80 int lvmcache_init(void)
81 {
82 /*
83 * FIXME add a proper lvmcache_locking_reset() that
84 * resets the cache so no previous locks are locked
85 */
86 _vgs_locked = 0;
87
88 dm_list_init(&_vginfos);
89
90 if (!(_vgname_hash = dm_hash_create(128)))
91 return 0;
92
93 if (!(_vgid_hash = dm_hash_create(128)))
94 return 0;
95
96 if (!(_pvid_hash = dm_hash_create(128)))
97 return 0;
98
99 if (!(_lock_hash = dm_hash_create(128)))
100 return 0;
101
102 /*
103 * Reinitialising the cache clears the internal record of
104 * which locks are held. The global lock can be held during
105 * this operation so its state must be restored afterwards.
106 */
107 if (_vg_global_lock_held) {
108 lvmcache_lock_vgname(VG_GLOBAL, 0);
109 _vg_global_lock_held = 0;
110 }
111
112 lvmetad_init();
113
114 return 1;
115 }
116
117 void lvmcache_seed_infos_from_lvmetad(struct cmd_context *cmd)
118 {
119 if (!lvmetad_active() || _has_scanned)
120 return;
121
122 if (!lvmetad_pv_list_to_lvmcache(cmd)) {
123 stack;
124 return;
125 }
126
127 _has_scanned = 1;
128 }
129
130 /* Volume Group metadata cache functions */
131 static void _free_cached_vgmetadata(struct lvmcache_vginfo *vginfo)
132 {
133 if (!vginfo || !vginfo->vgmetadata)
134 return;
135
136 dm_free(vginfo->vgmetadata);
137
138 vginfo->vgmetadata = NULL;
139
140 /* Release also cached config tree */
141 if (vginfo->cft) {
142 dm_config_destroy(vginfo->cft);
143 vginfo->cft = NULL;
144 }
145
146 log_debug("Metadata cache: VG %s wiped.", vginfo->vgname);
147
148 release_vg(vginfo->cached_vg);
149 }
150
151 /*
152 * Cache VG metadata against the vginfo with matching vgid.
153 */
154 static void _store_metadata(struct volume_group *vg, unsigned precommitted)
155 {
156 char uuid[64] __attribute__((aligned(8)));
157 struct lvmcache_vginfo *vginfo;
158 char *data;
159 size_t size;
160
161 if (!(vginfo = lvmcache_vginfo_from_vgid((const char *)&vg->id))) {
162 stack;
163 return;
164 }
165
166 if (!(size = export_vg_to_buffer(vg, &data))) {
167 stack;
168 _free_cached_vgmetadata(vginfo);
169 return;
170 }
171
172 /* Avoid reparsing of the same data string */
173 if (vginfo->vgmetadata && vginfo->vgmetadata_size == size &&
174 strcmp(vginfo->vgmetadata, data) == 0)
175 dm_free(data);
176 else {
177 _free_cached_vgmetadata(vginfo);
178 vginfo->vgmetadata_size = size;
179 vginfo->vgmetadata = data;
180 }
181
182 vginfo->precommitted = precommitted;
183
184 if (!id_write_format((const struct id *)vginfo->vgid, uuid, sizeof(uuid))) {
185 stack;
186 return;
187 }
188
189 log_debug("Metadata cache: VG %s (%s) stored (%" PRIsize_t " bytes%s).",
190 vginfo->vgname, uuid, size,
191 precommitted ? ", precommitted" : "");
192 }
193
194 static void _update_cache_info_lock_state(struct lvmcache_info *info,
195 int locked,
196 int *cached_vgmetadata_valid)
197 {
198 int was_locked = (info->status & CACHE_LOCKED) ? 1 : 0;
199
200 /*
201 * Cache becomes invalid whenever lock state changes unless
202 * exclusive VG_GLOBAL is held (i.e. while scanning).
203 */
204 if (!lvmcache_vgname_is_locked(VG_GLOBAL) && (was_locked != locked)) {
205 info->status |= CACHE_INVALID;
206 *cached_vgmetadata_valid = 0;
207 }
208
209 if (locked)
210 info->status |= CACHE_LOCKED;
211 else
212 info->status &= ~CACHE_LOCKED;
213 }
214
215 static void _update_cache_vginfo_lock_state(struct lvmcache_vginfo *vginfo,
216 int locked)
217 {
218 struct lvmcache_info *info;
219 int cached_vgmetadata_valid = 1;
220
221 dm_list_iterate_items(info, &vginfo->infos)
222 _update_cache_info_lock_state(info, locked,
223 &cached_vgmetadata_valid);
224
225 if (!cached_vgmetadata_valid)
226 _free_cached_vgmetadata(vginfo);
227 }
228
229 static void _update_cache_lock_state(const char *vgname, int locked)
230 {
231 struct lvmcache_vginfo *vginfo;
232
233 if (!(vginfo = lvmcache_vginfo_from_vgname(vgname, NULL)))
234 return;
235
236 _update_cache_vginfo_lock_state(vginfo, locked);
237 }
238
239 static void _drop_metadata(const char *vgname, int drop_precommitted)
240 {
241 struct lvmcache_vginfo *vginfo;
242 struct lvmcache_info *info;
243
244 if (!(vginfo = lvmcache_vginfo_from_vgname(vgname, NULL)))
245 return;
246
247 /*
248 * Invalidate cached PV labels.
249 * If cached precommitted metadata exists that means we
250 * already invalidated the PV labels (before caching it)
251 * and we must not do it again.
252 */
253 if (!drop_precommitted && vginfo->precommitted && !vginfo->vgmetadata)
254 log_error(INTERNAL_ERROR "metadata commit (or revert) missing before "
255 "dropping metadata from cache.");
256
257 if (drop_precommitted || !vginfo->precommitted)
258 dm_list_iterate_items(info, &vginfo->infos)
259 info->status |= CACHE_INVALID;
260
261 _free_cached_vgmetadata(vginfo);
262
263 /* VG revert */
264 if (drop_precommitted)
265 vginfo->precommitted = 0;
266 }
267
268 /*
269 * Remote node uses this to upgrade precommited metadata to commited state
270 * when receives vg_commit notification.
271 * (Note that devices can be suspended here, if so, precommited metadata are already read.)
272 */
273 void lvmcache_commit_metadata(const char *vgname)
274 {
275 struct lvmcache_vginfo *vginfo;
276
277 if (!(vginfo = lvmcache_vginfo_from_vgname(vgname, NULL)))
278 return;
279
280 if (vginfo->precommitted) {
281 log_debug("Precommitted metadata cache: VG %s upgraded to committed.",
282 vginfo->vgname);
283 vginfo->precommitted = 0;
284 }
285 }
286
287 void lvmcache_drop_metadata(const char *vgname, int drop_precommitted)
288 {
289 /* For VG_ORPHANS, we need to invalidate all labels on orphan PVs. */
290 if (!strcmp(vgname, VG_ORPHANS)) {
291 _drop_metadata(FMT_TEXT_ORPHAN_VG_NAME, 0);
292 _drop_metadata(FMT_LVM1_ORPHAN_VG_NAME, 0);
293 _drop_metadata(FMT_POOL_ORPHAN_VG_NAME, 0);
294
295 /* Indicate that PVs could now be missing from the cache */
296 init_full_scan_done(0);
297 } else if (!lvmcache_vgname_is_locked(VG_GLOBAL))
298 _drop_metadata(vgname, drop_precommitted);
299 }
300
301 /*
302 * Ensure vgname2 comes after vgname1 alphabetically.
303 * Orphan locks come last.
304 * VG_GLOBAL comes first.
305 */
306 static int _vgname_order_correct(const char *vgname1, const char *vgname2)
307 {
308 if (is_global_vg(vgname1))
309 return 1;
310
311 if (is_global_vg(vgname2))
312 return 0;
313
314 if (is_orphan_vg(vgname1))
315 return 0;
316
317 if (is_orphan_vg(vgname2))
318 return 1;
319
320 if (strcmp(vgname1, vgname2) < 0)
321 return 1;
322
323 return 0;
324 }
325
326 /*
327 * Ensure VG locks are acquired in alphabetical order.
328 */
329 int lvmcache_verify_lock_order(const char *vgname)
330 {
331 struct dm_hash_node *n;
332 const char *vgname2;
333
334 if (!_lock_hash)
335 return_0;
336
337 dm_hash_iterate(n, _lock_hash) {
338 if (!dm_hash_get_data(_lock_hash, n))
339 return_0;
340
341 if (!(vgname2 = dm_hash_get_key(_lock_hash, n))) {
342 log_error(INTERNAL_ERROR "VG lock %s hits NULL.",
343 vgname);
344 return 0;
345 }
346
347 if (!_vgname_order_correct(vgname2, vgname)) {
348 log_errno(EDEADLK, INTERNAL_ERROR "VG lock %s must "
349 "be requested before %s, not after.",
350 vgname, vgname2);
351 return 0;
352 }
353 }
354
355 return 1;
356 }
357
358 void lvmcache_lock_vgname(const char *vgname, int read_only __attribute__((unused)))
359 {
360 if (!_lock_hash && !lvmcache_init()) {
361 log_error("Internal cache initialisation failed");
362 return;
363 }
364
365 if (dm_hash_lookup(_lock_hash, vgname))
366 log_error(INTERNAL_ERROR "Nested locking attempted on VG %s.",
367 vgname);
368
369 if (!dm_hash_insert(_lock_hash, vgname, (void *) 1))
370 log_error("Cache locking failure for %s", vgname);
371
372 _update_cache_lock_state(vgname, 1);
373
374 if (strcmp(vgname, VG_GLOBAL))
375 _vgs_locked++;
376 }
377
378 int lvmcache_vgname_is_locked(const char *vgname)
379 {
380 if (!_lock_hash)
381 return 0;
382
383 return dm_hash_lookup(_lock_hash, is_orphan_vg(vgname) ? VG_ORPHANS : vgname) ? 1 : 0;
384 }
385
386 void lvmcache_unlock_vgname(const char *vgname)
387 {
388 if (!dm_hash_lookup(_lock_hash, vgname))
389 log_error(INTERNAL_ERROR "Attempt to unlock unlocked VG %s.",
390 vgname);
391
392 _update_cache_lock_state(vgname, 0);
393
394 dm_hash_remove(_lock_hash, vgname);
395
396 /* FIXME Do this per-VG */
397 if (strcmp(vgname, VG_GLOBAL) && !--_vgs_locked)
398 dev_close_all();
399 }
400
401 int lvmcache_vgs_locked(void)
402 {
403 return _vgs_locked;
404 }
405
406 static void _vginfo_attach_info(struct lvmcache_vginfo *vginfo,
407 struct lvmcache_info *info)
408 {
409 if (!vginfo)
410 return;
411
412 info->vginfo = vginfo;
413 dm_list_add(&vginfo->infos, &info->list);
414 }
415
416 static void _vginfo_detach_info(struct lvmcache_info *info)
417 {
418 if (!dm_list_empty(&info->list)) {
419 dm_list_del(&info->list);
420 dm_list_init(&info->list);
421 }
422
423 info->vginfo = NULL;
424 }
425
426 /* If vgid supplied, require a match. */
427 struct lvmcache_vginfo *lvmcache_vginfo_from_vgname(const char *vgname, const char *vgid)
428 {
429 struct lvmcache_vginfo *vginfo;
430
431 if (!vgname)
432 return lvmcache_vginfo_from_vgid(vgid);
433
434 if (!_vgname_hash)
435 return NULL;
436
437 if (!(vginfo = dm_hash_lookup(_vgname_hash, vgname)))
438 return NULL;
439
440 if (vgid)
441 do
442 if (!strncmp(vgid, vginfo->vgid, ID_LEN))
443 return vginfo;
444 while ((vginfo = vginfo->next));
445
446 return vginfo;
447 }
448
449 const struct format_type *lvmcache_fmt_from_vgname(struct cmd_context *cmd,
450 const char *vgname, const char *vgid,
451 unsigned revalidate_labels)
452 {
453 struct lvmcache_vginfo *vginfo;
454 struct lvmcache_info *info;
455 struct label *label;
456 struct dm_list *devh, *tmp;
457 struct dm_list devs;
458 struct device_list *devl;
459 struct volume_group *vg;
460 const struct format_type *fmt;
461 char vgid_found[ID_LEN + 1] __attribute__((aligned(8)));
462
463 if (!(vginfo = lvmcache_vginfo_from_vgname(vgname, vgid))) {
464 if (!lvmetad_active())
465 return NULL; /* too bad */
466 /* If we don't have the info but we have lvmetad, we can ask
467 * there before failing. */
468 if ((vg = lvmetad_vg_lookup(cmd, vgname, vgid))) {
469 fmt = vg->fid->fmt;
470 release_vg(vg);
471 return fmt;
472 }
473 return NULL;
474 }
475
476 /*
477 * If this function is called repeatedly, only the first one needs to revalidate.
478 */
479 if (!revalidate_labels)
480 goto out;
481
482 /*
483 * This function is normally called before reading metadata so
484 * we check cached labels here. Unfortunately vginfo is volatile.
485 */
486 dm_list_init(&devs);
487 dm_list_iterate_items(info, &vginfo->infos) {
488 if (!(devl = dm_malloc(sizeof(*devl)))) {
489 log_error("device_list element allocation failed");
490 return NULL;
491 }
492 devl->dev = info->dev;
493 dm_list_add(&devs, &devl->list);
494 }
495
496 memcpy(vgid_found, vginfo->vgid, sizeof(vgid_found));
497
498 dm_list_iterate_safe(devh, tmp, &devs) {
499 devl = dm_list_item(devh, struct device_list);
500 (void) label_read(devl->dev, &label, UINT64_C(0));
501 dm_list_del(&devl->list);
502 dm_free(devl);
503 }
504
505 /* If vginfo changed, caller needs to rescan */
506 if (!(vginfo = lvmcache_vginfo_from_vgname(vgname, vgid_found)) ||
507 strncmp(vginfo->vgid, vgid_found, ID_LEN))
508 return NULL;
509
510 out:
511 return vginfo->fmt;
512 }
513
514 struct lvmcache_vginfo *lvmcache_vginfo_from_vgid(const char *vgid)
515 {
516 struct lvmcache_vginfo *vginfo;
517 char id[ID_LEN + 1] __attribute__((aligned(8)));
518
519 if (!_vgid_hash || !vgid)
520 return NULL;
521
522 /* vgid not necessarily NULL-terminated */
523 strncpy(&id[0], vgid, ID_LEN);
524 id[ID_LEN] = '\0';
525
526 if (!(vginfo = dm_hash_lookup(_vgid_hash, id)))
527 return NULL;
528
529 return vginfo;
530 }
531
532 const char *lvmcache_vgname_from_vgid(struct dm_pool *mem, const char *vgid)
533 {
534 struct lvmcache_vginfo *vginfo;
535 const char *vgname = NULL;
536
537 if ((vginfo = lvmcache_vginfo_from_vgid(vgid)))
538 vgname = vginfo->vgname;
539
540 if (mem && vgname)
541 return dm_pool_strdup(mem, vgname);
542
543 return vgname;
544 }
545
546 static int _info_is_valid(struct lvmcache_info *info)
547 {
548 if (info->status & CACHE_INVALID)
549 return 0;
550
551 /*
552 * The caller must hold the VG lock to manipulate metadata.
553 * In a cluster, remote nodes sometimes read metadata in the
554 * knowledge that the controlling node is holding the lock.
555 * So if the VG appears to be unlocked here, it should be safe
556 * to use the cached value.
557 */
558 if (info->vginfo && !lvmcache_vgname_is_locked(info->vginfo->vgname))
559 return 1;
560
561 if (!(info->status & CACHE_LOCKED))
562 return 0;
563
564 return 1;
565 }
566
567 static int _vginfo_is_valid(struct lvmcache_vginfo *vginfo)
568 {
569 struct lvmcache_info *info;
570
571 /* Invalid if any info is invalid */
572 dm_list_iterate_items(info, &vginfo->infos)
573 if (!_info_is_valid(info))
574 return 0;
575
576 return 1;
577 }
578
579 /* vginfo is invalid if it does not contain at least one valid info */
580 static int _vginfo_is_invalid(struct lvmcache_vginfo *vginfo)
581 {
582 struct lvmcache_info *info;
583
584 dm_list_iterate_items(info, &vginfo->infos)
585 if (_info_is_valid(info))
586 return 0;
587
588 return 1;
589 }
590
591 /*
592 * If valid_only is set, data will only be returned if the cached data is
593 * known still to be valid.
594 */
595 struct lvmcache_info *lvmcache_info_from_pvid(const char *pvid, int valid_only)
596 {
597 struct lvmcache_info *info;
598 char id[ID_LEN + 1] __attribute__((aligned(8)));
599
600 if (!_pvid_hash || !pvid)
601 return NULL;
602
603 strncpy(&id[0], pvid, ID_LEN);
604 id[ID_LEN] = '\0';
605
606 if (!(info = dm_hash_lookup(_pvid_hash, id)))
607 return NULL;
608
609 if (valid_only && !_info_is_valid(info))
610 return NULL;
611
612 return info;
613 }
614
615 const char *lvmcache_vgname_from_info(struct lvmcache_info *info)
616 {
617 if (info->vginfo)
618 return info->vginfo->vgname;
619 return NULL;
620 }
621
622 char *lvmcache_vgname_from_pvid(struct cmd_context *cmd, const char *pvid)
623 {
624 struct lvmcache_info *info;
625 char *vgname;
626
627 if (!lvmcache_device_from_pvid(cmd, (const struct id *)pvid, NULL, NULL)) {
628 log_error("Couldn't find device with uuid %s.", pvid);
629 return NULL;
630 }
631
632 info = lvmcache_info_from_pvid(pvid, 0);
633 if (!info)
634 return_NULL;
635
636 if (!(vgname = dm_pool_strdup(cmd->mem, info->vginfo->vgname))) {
637 log_errno(ENOMEM, "vgname allocation failed");
638 return NULL;
639 }
640 return vgname;
641 }
642
643 static void _rescan_entry(struct lvmcache_info *info)
644 {
645 struct label *label;
646
647 if (info->status & CACHE_INVALID)
648 (void) label_read(info->dev, &label, UINT64_C(0));
649 }
650
651 static int _scan_invalid(void)
652 {
653 dm_hash_iter(_pvid_hash, (dm_hash_iterate_fn) _rescan_entry);
654
655 return 1;
656 }
657
658 int lvmcache_label_scan(struct cmd_context *cmd, int full_scan)
659 {
660 struct label *label;
661 struct dev_iter *iter;
662 struct device *dev;
663 struct format_type *fmt;
664
665 int r = 0;
666
667 if (lvmetad_active())
668 return 1;
669
670 /* Avoid recursion when a PVID can't be found! */
671 if (_scanning_in_progress)
672 return 0;
673
674 _scanning_in_progress = 1;
675
676 if (!_vgname_hash && !lvmcache_init()) {
677 log_error("Internal cache initialisation failed");
678 goto out;
679 }
680
681 if (_has_scanned && !full_scan) {
682 r = _scan_invalid();
683 goto out;
684 }
685
686 if (full_scan == 2 && (cmd->filter && !cmd->filter->use_count) && !refresh_filters(cmd))
687 goto_out;
688
689 if (!cmd->filter || !(iter = dev_iter_create(cmd->filter, (full_scan == 2) ? 1 : 0))) {
690 log_error("dev_iter creation failed");
691 goto out;
692 }
693
694 while ((dev = dev_iter_get(iter)))
695 (void) label_read(dev, &label, UINT64_C(0));
696
697 dev_iter_destroy(iter);
698
699 _has_scanned = 1;
700
701 /* Perform any format-specific scanning e.g. text files */
702 if (cmd->independent_metadata_areas)
703 dm_list_iterate_items(fmt, &cmd->formats)
704 if (fmt->ops->scan && !fmt->ops->scan(fmt, NULL))
705 goto out;
706
707 /*
708 * If we are a long-lived process, write out the updated persistent
709 * device cache for the benefit of short-lived processes.
710 */
711 if (full_scan == 2 && cmd->is_long_lived && cmd->dump_filter)
712 persistent_filter_dump(cmd->filter, 0);
713
714 r = 1;
715
716 out:
717 _scanning_in_progress = 0;
718
719 return r;
720 }
721
722 struct volume_group *lvmcache_get_vg(struct cmd_context *cmd, const char *vgname,
723 const char *vgid, unsigned precommitted)
724 {
725 struct lvmcache_vginfo *vginfo;
726 struct volume_group *vg = NULL;
727 struct format_instance *fid;
728 struct format_instance_ctx fic;
729
730 /*
731 * We currently do not store precommitted metadata in lvmetad at
732 * all. This means that any request for precommitted metadata is served
733 * using the classic scanning mechanics, and read from disk or from
734 * lvmcache.
735 */
736 if (lvmetad_active() && !precommitted) {
737 /* Still serve the locally cached VG if available */
738 if (vgid && (vginfo = lvmcache_vginfo_from_vgid(vgid)) &&
739 vginfo->vgmetadata && (vg = vginfo->cached_vg))
740 goto out;
741 return lvmetad_vg_lookup(cmd, vgname, vgid);
742 }
743
744 if (!vgid || !(vginfo = lvmcache_vginfo_from_vgid(vgid)) || !vginfo->vgmetadata)
745 return NULL;
746
747 if (!_vginfo_is_valid(vginfo))
748 return NULL;
749
750 /*
751 * Don't return cached data if either:
752 * (i) precommitted metadata is requested but we don't have it cached
753 * - caller should read it off disk;
754 * (ii) live metadata is requested but we have precommitted metadata cached
755 * and no devices are suspended so caller may read it off disk.
756 *
757 * If live metadata is requested but we have precommitted metadata cached
758 * and devices are suspended, we assume this precommitted metadata has
759 * already been preloaded and committed so it's OK to return it as live.
760 * Note that we do not clear the PRECOMMITTED flag.
761 */
762 if ((precommitted && !vginfo->precommitted) ||
763 (!precommitted && vginfo->precommitted && !critical_section()))
764 return NULL;
765
766 /* Use already-cached VG struct when available */
767 if ((vg = vginfo->cached_vg))
768 goto out;
769
770 fic.type = FMT_INSTANCE_MDAS | FMT_INSTANCE_AUX_MDAS;
771 fic.context.vg_ref.vg_name = vginfo->vgname;
772 fic.context.vg_ref.vg_id = vgid;
773 if (!(fid = vginfo->fmt->ops->create_instance(vginfo->fmt, &fic)))
774 return_NULL;
775
776 /* Build config tree from vgmetadata, if not yet cached */
777 if (!vginfo->cft &&
778 !(vginfo->cft =
779 dm_config_from_string(vginfo->vgmetadata)))
780 goto_bad;
781
782 if (!(vg = import_vg_from_config_tree(vginfo->cft, fid)))
783 goto_bad;
784
785 /* Cache VG struct for reuse */
786 vginfo->cached_vg = vg;
787 vginfo->holders = 1;
788 vginfo->vg_use_count = 0;
789 vg->vginfo = vginfo;
790
791 if (!dm_pool_lock(vg->vgmem, detect_internal_vg_cache_corruption()))
792 goto_bad;
793
794 out:
795 vginfo->holders++;
796 vginfo->vg_use_count++;
797 log_debug("Using cached %smetadata for VG %s with %u holder(s).",
798 vginfo->precommitted ? "pre-committed " : "",
799 vginfo->vgname, vginfo->holders);
800
801 return vg;
802
803 bad:
804 _free_cached_vgmetadata(vginfo);
805 return NULL;
806 }
807
808 // #if 0
809 int lvmcache_vginfo_holders_dec_and_test_for_zero(struct lvmcache_vginfo *vginfo)
810 {
811 log_debug("VG %s decrementing %d holder(s) at %p.",
812 vginfo->cached_vg->name, vginfo->holders, vginfo->cached_vg);
813
814 if (--vginfo->holders)
815 return 0;
816
817 if (vginfo->vg_use_count > 1)
818 log_debug("VG %s reused %d times.",
819 vginfo->cached_vg->name, vginfo->vg_use_count);
820
821 /* Debug perform crc check only when it's been used more then once */
822 if (!dm_pool_unlock(vginfo->cached_vg->vgmem,
823 detect_internal_vg_cache_corruption() &&
824 (vginfo->vg_use_count > 1)))
825 stack;
826
827 vginfo->cached_vg->vginfo = NULL;
828 vginfo->cached_vg = NULL;
829
830 return 1;
831 }
832 // #endif
833
834 struct dm_list *lvmcache_get_vgids(struct cmd_context *cmd,
835 int include_internal)
836 {
837 struct dm_list *vgids;
838 struct lvmcache_vginfo *vginfo;
839
840 // TODO plug into lvmetad here automagically?
841 lvmcache_label_scan(cmd, 0);
842
843 if (!(vgids = str_list_create(cmd->mem))) {
844 log_error("vgids list allocation failed");
845 return NULL;
846 }
847
848 dm_list_iterate_items(vginfo, &_vginfos) {
849 if (!include_internal && is_orphan_vg(vginfo->vgname))
850 continue;
851
852 if (!str_list_add(cmd->mem, vgids,
853 dm_pool_strdup(cmd->mem, vginfo->vgid))) {
854 log_error("strlist allocation failed");
855 return NULL;
856 }
857 }
858
859 return vgids;
860 }
861
862 struct dm_list *lvmcache_get_vgnames(struct cmd_context *cmd,
863 int include_internal)
864 {
865 struct dm_list *vgnames;
866 struct lvmcache_vginfo *vginfo;
867
868 lvmcache_label_scan(cmd, 0);
869
870 if (!(vgnames = str_list_create(cmd->mem))) {
871 log_errno(ENOMEM, "vgnames list allocation failed");
872 return NULL;
873 }
874
875 dm_list_iterate_items(vginfo, &_vginfos) {
876 if (!include_internal && is_orphan_vg(vginfo->vgname))
877 continue;
878
879 if (!str_list_add(cmd->mem, vgnames,
880 dm_pool_strdup(cmd->mem, vginfo->vgname))) {
881 log_errno(ENOMEM, "strlist allocation failed");
882 return NULL;
883 }
884 }
885
886 return vgnames;
887 }
888
889 struct dm_list *lvmcache_get_pvids(struct cmd_context *cmd, const char *vgname,
890 const char *vgid)
891 {
892 struct dm_list *pvids;
893 struct lvmcache_vginfo *vginfo;
894 struct lvmcache_info *info;
895
896 if (!(pvids = str_list_create(cmd->mem))) {
897 log_error("pvids list allocation failed");
898 return NULL;
899 }
900
901 if (!(vginfo = lvmcache_vginfo_from_vgname(vgname, vgid)))
902 return pvids;
903
904 dm_list_iterate_items(info, &vginfo->infos) {
905 if (!str_list_add(cmd->mem, pvids,
906 dm_pool_strdup(cmd->mem, info->dev->pvid))) {
907 log_error("strlist allocation failed");
908 return NULL;
909 }
910 }
911
912 return pvids;
913 }
914
915 static struct device *_device_from_pvid(const struct id *pvid,
916 uint64_t *label_sector)
917 {
918 struct lvmcache_info *info;
919 struct label *label;
920
921 if ((info = lvmcache_info_from_pvid((const char *) pvid, 0))) {
922 if (lvmetad_active()) {
923 if (info->label && label_sector)
924 *label_sector = info->label->sector;
925 return info->dev;
926 }
927
928 if (label_read(info->dev, &label, UINT64_C(0))) {
929 info = (struct lvmcache_info *) label->info;
930 if (id_equal(pvid, (struct id *) &info->dev->pvid)) {
931 if (label_sector)
932 *label_sector = label->sector;
933 return info->dev;
934 }
935 }
936 }
937 return NULL;
938 }
939
940 struct device *lvmcache_device_from_pvid(struct cmd_context *cmd, const struct id *pvid,
941 unsigned *scan_done_once, uint64_t *label_sector)
942 {
943 struct device *dev;
944
945 /* Already cached ? */
946 dev = _device_from_pvid(pvid, label_sector);
947 if (dev)
948 return dev;
949
950 lvmcache_label_scan(cmd, 0);
951
952 /* Try again */
953 dev = _device_from_pvid(pvid, label_sector);
954 if (dev)
955 return dev;
956
957 if (critical_section() || (scan_done_once && *scan_done_once))
958 return NULL;
959
960 lvmcache_label_scan(cmd, 2);
961 if (scan_done_once)
962 *scan_done_once = 1;
963
964 /* Try again */
965 dev = _device_from_pvid(pvid, label_sector);
966 if (dev)
967 return dev;
968
969 return NULL;
970 }
971
972 const char *lvmcache_pvid_from_devname(struct cmd_context *cmd,
973 const char *devname)
974 {
975 struct device *dev;
976 struct label *label;
977
978 if (!(dev = dev_cache_get(devname, cmd->filter))) {
979 log_error("%s: Couldn't find device. Check your filters?",
980 devname);
981 return NULL;
982 }
983
984 if (!(label_read(dev, &label, UINT64_C(0))))
985 return NULL;
986
987 return dev->pvid;
988 }
989
990
991 static int _free_vginfo(struct lvmcache_vginfo *vginfo)
992 {
993 struct lvmcache_vginfo *primary_vginfo, *vginfo2;
994 int r = 1;
995
996 _free_cached_vgmetadata(vginfo);
997
998 vginfo2 = primary_vginfo = lvmcache_vginfo_from_vgname(vginfo->vgname, NULL);
999
1000 if (vginfo == primary_vginfo) {
1001 dm_hash_remove(_vgname_hash, vginfo->vgname);
1002 if (vginfo->next && !dm_hash_insert(_vgname_hash, vginfo->vgname,
1003 vginfo->next)) {
1004 log_error("_vgname_hash re-insertion for %s failed",
1005 vginfo->vgname);
1006 r = 0;
1007 }
1008 } else do
1009 if (vginfo2->next == vginfo) {
1010 vginfo2->next = vginfo->next;
1011 break;
1012 }
1013 while ((vginfo2 = vginfo2->next));
1014
1015 dm_free(vginfo->vgname);
1016 dm_free(vginfo->creation_host);
1017
1018 if (*vginfo->vgid && _vgid_hash &&
1019 lvmcache_vginfo_from_vgid(vginfo->vgid) == vginfo)
1020 dm_hash_remove(_vgid_hash, vginfo->vgid);
1021
1022 dm_list_del(&vginfo->list);
1023
1024 dm_free(vginfo);
1025
1026 return r;
1027 }
1028
1029 /*
1030 * vginfo must be info->vginfo unless info is NULL
1031 */
1032 static int _drop_vginfo(struct lvmcache_info *info, struct lvmcache_vginfo *vginfo)
1033 {
1034 if (info)
1035 _vginfo_detach_info(info);
1036
1037 /* vginfo still referenced? */
1038 if (!vginfo || is_orphan_vg(vginfo->vgname) ||
1039 !dm_list_empty(&vginfo->infos))
1040 return 1;
1041
1042 if (!_free_vginfo(vginfo))
1043 return_0;
1044
1045 return 1;
1046 }
1047
1048 /* Unused
1049 void lvmcache_del(struct lvmcache_info *info)
1050 {
1051 if (info->dev->pvid[0] && _pvid_hash)
1052 dm_hash_remove(_pvid_hash, info->dev->pvid);
1053
1054 _drop_vginfo(info, info->vginfo);
1055
1056 info->label->labeller->ops->destroy_label(info->label->labeller,
1057 info->label);
1058 dm_free(info);
1059
1060 return;
1061 } */
1062
1063 static int _lvmcache_update_pvid(struct lvmcache_info *info, const char *pvid)
1064 {
1065 /*
1066 * Nothing to do if already stored with same pvid.
1067 */
1068
1069 if (((dm_hash_lookup(_pvid_hash, pvid)) == info) &&
1070 !strcmp(info->dev->pvid, pvid))
1071 return 1;
1072 if (*info->dev->pvid)
1073 dm_hash_remove(_pvid_hash, info->dev->pvid);
1074 strncpy(info->dev->pvid, pvid, sizeof(info->dev->pvid));
1075 if (!dm_hash_insert(_pvid_hash, pvid, info)) {
1076 log_error("_lvmcache_update: pvid insertion failed: %s", pvid);
1077 return 0;
1078 }
1079
1080 return 1;
1081 }
1082
1083 /*
1084 * vginfo must be info->vginfo unless info is NULL (orphans)
1085 */
1086 static int _lvmcache_update_vgid(struct lvmcache_info *info,
1087 struct lvmcache_vginfo *vginfo,
1088 const char *vgid)
1089 {
1090 if (!vgid || !vginfo ||
1091 !strncmp(vginfo->vgid, vgid, ID_LEN))
1092 return 1;
1093
1094 if (vginfo && *vginfo->vgid)
1095 dm_hash_remove(_vgid_hash, vginfo->vgid);
1096 if (!vgid) {
1097 /* FIXME: unreachable code path */
1098 log_debug("lvmcache: %s: clearing VGID", info ? dev_name(info->dev) : vginfo->vgname);
1099 return 1;
1100 }
1101
1102 strncpy(vginfo->vgid, vgid, ID_LEN);
1103 vginfo->vgid[ID_LEN] = '\0';
1104 if (!dm_hash_insert(_vgid_hash, vginfo->vgid, vginfo)) {
1105 log_error("_lvmcache_update: vgid hash insertion failed: %s",
1106 vginfo->vgid);
1107 return 0;
1108 }
1109
1110 if (!is_orphan_vg(vginfo->vgname))
1111 log_debug("lvmcache: %s: setting %s VGID to %s",
1112 (info) ? dev_name(info->dev) : "",
1113 vginfo->vgname, vginfo->vgid);
1114
1115 return 1;
1116 }
1117
1118 static int _insert_vginfo(struct lvmcache_vginfo *new_vginfo, const char *vgid,
1119 uint32_t vgstatus, const char *creation_host,
1120 struct lvmcache_vginfo *primary_vginfo)
1121 {
1122 struct lvmcache_vginfo *last_vginfo = primary_vginfo;
1123 char uuid_primary[64] __attribute__((aligned(8)));
1124 char uuid_new[64] __attribute__((aligned(8)));
1125 int use_new = 0;
1126
1127 /* Pre-existing VG takes precedence. Unexported VG takes precedence. */
1128 if (primary_vginfo) {
1129 if (!id_write_format((const struct id *)vgid, uuid_new, sizeof(uuid_new)))
1130 return_0;
1131
1132 if (!id_write_format((const struct id *)&primary_vginfo->vgid, uuid_primary,
1133 sizeof(uuid_primary)))
1134 return_0;
1135
1136 /*
1137 * If Primary not exported, new exported => keep
1138 * Else Primary exported, new not exported => change
1139 * Else Primary has hostname for this machine => keep
1140 * Else Primary has no hostname, new has one => change
1141 * Else New has hostname for this machine => change
1142 * Else Keep primary.
1143 */
1144 if (!(primary_vginfo->status & EXPORTED_VG) &&
1145 (vgstatus & EXPORTED_VG))
1146 log_warn("WARNING: Duplicate VG name %s: "
1147 "Existing %s takes precedence over "
1148 "exported %s", new_vginfo->vgname,
1149 uuid_primary, uuid_new);
1150 else if ((primary_vginfo->status & EXPORTED_VG) &&
1151 !(vgstatus & EXPORTED_VG)) {
1152 log_warn("WARNING: Duplicate VG name %s: "
1153 "%s takes precedence over exported %s",
1154 new_vginfo->vgname, uuid_new,
1155 uuid_primary);
1156 use_new = 1;
1157 } else if (primary_vginfo->creation_host &&
1158 !strcmp(primary_vginfo->creation_host,
1159 primary_vginfo->fmt->cmd->hostname))
1160 log_warn("WARNING: Duplicate VG name %s: "
1161 "Existing %s (created here) takes precedence "
1162 "over %s", new_vginfo->vgname, uuid_primary,
1163 uuid_new);
1164 else if (!primary_vginfo->creation_host && creation_host) {
1165 log_warn("WARNING: Duplicate VG name %s: "
1166 "%s (with creation_host) takes precedence over %s",
1167 new_vginfo->vgname, uuid_new,
1168 uuid_primary);
1169 use_new = 1;
1170 } else if (creation_host &&
1171 !strcmp(creation_host,
1172 primary_vginfo->fmt->cmd->hostname)) {
1173 log_warn("WARNING: Duplicate VG name %s: "
1174 "%s (created here) takes precedence over %s",
1175 new_vginfo->vgname, uuid_new,
1176 uuid_primary);
1177 use_new = 1;
1178 }
1179
1180 if (!use_new) {
1181 while (last_vginfo->next)
1182 last_vginfo = last_vginfo->next;
1183 last_vginfo->next = new_vginfo;
1184 return 1;
1185 }
1186
1187 dm_hash_remove(_vgname_hash, primary_vginfo->vgname);
1188 }
1189
1190 if (!dm_hash_insert(_vgname_hash, new_vginfo->vgname, new_vginfo)) {
1191 log_error("cache_update: vg hash insertion failed: %s",
1192 new_vginfo->vgname);
1193 return 0;
1194 }
1195
1196 if (primary_vginfo)
1197 new_vginfo->next = primary_vginfo;
1198
1199 return 1;
1200 }
1201
1202 static int _lvmcache_update_vgname(struct lvmcache_info *info,
1203 const char *vgname, const char *vgid,
1204 uint32_t vgstatus, const char *creation_host,
1205 const struct format_type *fmt)
1206 {
1207 struct lvmcache_vginfo *vginfo, *primary_vginfo, *orphan_vginfo;
1208 struct lvmcache_info *info2, *info3;
1209 char mdabuf[32];
1210 // struct lvmcache_vginfo *old_vginfo, *next;
1211
1212 if (!vgname || (info && info->vginfo && !strcmp(info->vginfo->vgname, vgname)))
1213 return 1;
1214
1215 /* Remove existing vginfo entry */
1216 if (info)
1217 _drop_vginfo(info, info->vginfo);
1218
1219 /* Get existing vginfo or create new one */
1220 if (!(vginfo = lvmcache_vginfo_from_vgname(vgname, vgid))) {
1221 /*** FIXME - vginfo ends up duplicated instead of renamed.
1222 // Renaming? This lookup fails.
1223 if ((vginfo = vginfo_from_vgid(vgid))) {
1224 next = vginfo->next;
1225 old_vginfo = vginfo_from_vgname(vginfo->vgname, NULL);
1226 if (old_vginfo == vginfo) {
1227 dm_hash_remove(_vgname_hash, old_vginfo->vgname);
1228 if (old_vginfo->next) {
1229 if (!dm_hash_insert(_vgname_hash, old_vginfo->vgname, old_vginfo->next)) {
1230 log_error("vg hash re-insertion failed: %s",
1231 old_vginfo->vgname);
1232 return 0;
1233 }
1234 }
1235 } else do {
1236 if (old_vginfo->next == vginfo) {
1237 old_vginfo->next = vginfo->next;
1238 break;
1239 }
1240 } while ((old_vginfo = old_vginfo->next));
1241 vginfo->next = NULL;
1242
1243 dm_free(vginfo->vgname);
1244 if (!(vginfo->vgname = dm_strdup(vgname))) {
1245 log_error("cache vgname alloc failed for %s", vgname);
1246 return 0;
1247 }
1248
1249 // Rename so can assume new name does not already exist
1250 if (!dm_hash_insert(_vgname_hash, vginfo->vgname, vginfo->next)) {
1251 log_error("vg hash re-insertion failed: %s",
1252 vginfo->vgname);
1253 return 0;
1254 }
1255 } else {
1256 ***/
1257 if (!(vginfo = dm_zalloc(sizeof(*vginfo)))) {
1258 log_error("lvmcache_update_vgname: list alloc failed");
1259 return 0;
1260 }
1261 if (!(vginfo->vgname = dm_strdup(vgname))) {
1262 dm_free(vginfo);
1263 log_error("cache vgname alloc failed for %s", vgname);
1264 return 0;
1265 }
1266 dm_list_init(&vginfo->infos);
1267
1268 /*
1269 * If we're scanning and there's an invalidated entry, remove it.
1270 * Otherwise we risk bogus warnings of duplicate VGs.
1271 */
1272 while ((primary_vginfo = lvmcache_vginfo_from_vgname(vgname, NULL)) &&
1273 _scanning_in_progress && _vginfo_is_invalid(primary_vginfo)) {
1274 orphan_vginfo = lvmcache_vginfo_from_vgname(primary_vginfo->fmt->orphan_vg_name, NULL);
1275 if (!orphan_vginfo) {
1276 log_error(INTERNAL_ERROR "Orphan vginfo %s lost from cache.",
1277 primary_vginfo->fmt->orphan_vg_name);
1278 dm_free(vginfo->vgname);
1279 dm_free(vginfo);
1280 return 0;
1281 }
1282 dm_list_iterate_items_safe(info2, info3, &primary_vginfo->infos) {
1283 _vginfo_detach_info(info2);
1284 _vginfo_attach_info(orphan_vginfo, info2);
1285 if (info2->mdas.n)
1286 sprintf(mdabuf, " with %u mdas",
1287 dm_list_size(&info2->mdas));
1288 else
1289 mdabuf[0] = '\0';
1290 log_debug("lvmcache: %s: now in VG %s%s%s%s%s",
1291 dev_name(info2->dev),
1292 vgname, orphan_vginfo->vgid[0] ? " (" : "",
1293 orphan_vginfo->vgid[0] ? orphan_vginfo->vgid : "",
1294 orphan_vginfo->vgid[0] ? ")" : "", mdabuf);
1295 }
1296
1297 if (!_drop_vginfo(NULL, primary_vginfo))
1298 return_0;
1299 }
1300
1301 if (!_insert_vginfo(vginfo, vgid, vgstatus, creation_host,
1302 primary_vginfo)) {
1303 dm_free(vginfo->vgname);
1304 dm_free(vginfo);
1305 return 0;
1306 }
1307 /* Ensure orphans appear last on list_iterate */
1308 if (is_orphan_vg(vgname))
1309 dm_list_add(&_vginfos, &vginfo->list);
1310 else
1311 dm_list_add_h(&_vginfos, &vginfo->list);
1312 /***
1313 }
1314 ***/
1315 }
1316
1317 if (info)
1318 _vginfo_attach_info(vginfo, info);
1319 else if (!_lvmcache_update_vgid(NULL, vginfo, vgid)) /* Orphans */
1320 return_0;
1321
1322 _update_cache_vginfo_lock_state(vginfo, lvmcache_vgname_is_locked(vgname));
1323
1324 /* FIXME Check consistency of list! */
1325 vginfo->fmt = fmt;
1326
1327 if (info) {
1328 if (info->mdas.n)
1329 sprintf(mdabuf, " with %u mdas", dm_list_size(&info->mdas));
1330 else
1331 mdabuf[0] = '\0';
1332 log_debug("lvmcache: %s: now in VG %s%s%s%s%s",
1333 dev_name(info->dev),
1334 vgname, vginfo->vgid[0] ? " (" : "",
1335 vginfo->vgid[0] ? vginfo->vgid : "",
1336 vginfo->vgid[0] ? ")" : "", mdabuf);
1337 } else
1338 log_debug("lvmcache: initialised VG %s", vgname);
1339
1340 return 1;
1341 }
1342
1343 static int _lvmcache_update_vgstatus(struct lvmcache_info *info, uint32_t vgstatus,
1344 const char *creation_host)
1345 {
1346 if (!info || !info->vginfo)
1347 return 1;
1348
1349 if ((info->vginfo->status & EXPORTED_VG) != (vgstatus & EXPORTED_VG))
1350 log_debug("lvmcache: %s: VG %s %s exported",
1351 dev_name(info->dev), info->vginfo->vgname,
1352 vgstatus & EXPORTED_VG ? "now" : "no longer");
1353
1354 info->vginfo->status = vgstatus;
1355
1356 if (!creation_host)
1357 return 1;
1358
1359 if (info->vginfo->creation_host && !strcmp(creation_host,
1360 info->vginfo->creation_host))
1361 return 1;
1362
1363 if (info->vginfo->creation_host)
1364 dm_free(info->vginfo->creation_host);
1365
1366 if (!(info->vginfo->creation_host = dm_strdup(creation_host))) {
1367 log_error("cache creation host alloc failed for %s",
1368 creation_host);
1369 return 0;
1370 }
1371
1372 log_debug("lvmcache: %s: VG %s: Set creation host to %s.",
1373 dev_name(info->dev), info->vginfo->vgname, creation_host);
1374
1375 return 1;
1376 }
1377
1378 int lvmcache_add_orphan_vginfo(const char *vgname, struct format_type *fmt)
1379 {
1380 if (!_lock_hash && !lvmcache_init()) {
1381 log_error("Internal cache initialisation failed");
1382 return 0;
1383 }
1384
1385 return _lvmcache_update_vgname(NULL, vgname, vgname, 0, "", fmt);
1386 }
1387
1388 int lvmcache_update_vgname_and_id(struct lvmcache_info *info,
1389 const char *vgname, const char *vgid,
1390 uint32_t vgstatus, const char *creation_host)
1391 {
1392 if (!vgname && !info->vginfo) {
1393 log_error(INTERNAL_ERROR "NULL vgname handed to cache");
1394 /* FIXME Remove this */
1395 vgname = info->fmt->orphan_vg_name;
1396 vgid = vgname;
1397 }
1398
1399 /* When using lvmetad, the PV could not have become orphaned. */
1400 if (lvmetad_active() && is_orphan_vg(vgname) && info->vginfo)
1401 return 1;
1402
1403 /* If PV without mdas is already in a real VG, don't make it orphan */
1404 if (is_orphan_vg(vgname) && info->vginfo &&
1405 mdas_empty_or_ignored(&info->mdas) &&
1406 !is_orphan_vg(info->vginfo->vgname) && critical_section())
1407 return 1;
1408
1409 /* If moving PV from orphan to real VG, always mark it valid */
1410 if (!is_orphan_vg(vgname))
1411 info->status &= ~CACHE_INVALID;
1412
1413 if (!_lvmcache_update_vgname(info, vgname, vgid, vgstatus,
1414 creation_host, info->fmt) ||
1415 !_lvmcache_update_vgid(info, info->vginfo, vgid) ||
1416 !_lvmcache_update_vgstatus(info, vgstatus, creation_host))
1417 return_0;
1418
1419 return 1;
1420 }
1421
1422 int lvmcache_update_vg(struct volume_group *vg, unsigned precommitted)
1423 {
1424 struct pv_list *pvl;
1425 struct lvmcache_info *info;
1426 char pvid_s[ID_LEN + 1] __attribute__((aligned(8)));
1427
1428 pvid_s[sizeof(pvid_s) - 1] = '\0';
1429
1430 dm_list_iterate_items(pvl, &vg->pvs) {
1431 strncpy(pvid_s, (char *) &pvl->pv->id, sizeof(pvid_s) - 1);
1432 /* FIXME Could pvl->pv->dev->pvid ever be different? */
1433 if ((info = lvmcache_info_from_pvid(pvid_s, 0)) &&
1434 !lvmcache_update_vgname_and_id(info, vg->name,
1435 (char *) &vg->id,
1436 vg->status, NULL))
1437 return_0;
1438 }
1439
1440 /* store text representation of vg to cache */
1441 if (vg->cmd->current_settings.cache_vgmetadata)
1442 _store_metadata(vg, precommitted);
1443
1444 return 1;
1445 }
1446
1447 struct lvmcache_info *lvmcache_add(struct labeller *labeller, const char *pvid,
1448 struct device *dev,
1449 const char *vgname, const char *vgid,
1450 uint32_t vgstatus)
1451 {
1452 struct label *label;
1453 struct lvmcache_info *existing, *info;
1454 char pvid_s[ID_LEN + 1] __attribute__((aligned(8)));
1455
1456 if (!_vgname_hash && !lvmcache_init()) {
1457 log_error("Internal cache initialisation failed");
1458 return NULL;
1459 }
1460
1461 strncpy(pvid_s, pvid, sizeof(pvid_s) - 1);
1462 pvid_s[sizeof(pvid_s) - 1] = '\0';
1463
1464 if (!(existing = lvmcache_info_from_pvid(pvid_s, 0)) &&
1465 !(existing = lvmcache_info_from_pvid(dev->pvid, 0))) {
1466 if (!(label = label_create(labeller)))
1467 return_NULL;
1468 if (!(info = dm_zalloc(sizeof(*info)))) {
1469 log_error("lvmcache_info allocation failed");
1470 label_destroy(label);
1471 return NULL;
1472 }
1473
1474 label->info = info;
1475 info->label = label;
1476 dm_list_init(&info->list);
1477 info->dev = dev;
1478
1479 lvmcache_del_mdas(info);
1480 lvmcache_del_das(info);
1481 } else {
1482 if (existing->dev != dev) {
1483 /* Is the existing entry a duplicate pvid e.g. md ? */
1484 if (dev_subsystem_part_major(existing->dev) &&
1485 !dev_subsystem_part_major(dev)) {
1486 log_very_verbose("Ignoring duplicate PV %s on "
1487 "%s - using %s %s",
1488 pvid, dev_name(dev),
1489 dev_subsystem_name(existing->dev),
1490 dev_name(existing->dev));
1491 return NULL;
1492 } else if (dm_is_dm_major(MAJOR(existing->dev->dev)) &&
1493 !dm_is_dm_major(MAJOR(dev->dev))) {
1494 log_very_verbose("Ignoring duplicate PV %s on "
1495 "%s - using dm %s",
1496 pvid, dev_name(dev),
1497 dev_name(existing->dev));
1498 return NULL;
1499 } else if (!dev_subsystem_part_major(existing->dev) &&
1500 dev_subsystem_part_major(dev))
1501 log_very_verbose("Duplicate PV %s on %s - "
1502 "using %s %s", pvid,
1503 dev_name(existing->dev),
1504 dev_subsystem_name(existing->dev),
1505 dev_name(dev));
1506 else if (!dm_is_dm_major(MAJOR(existing->dev->dev)) &&
1507 dm_is_dm_major(MAJOR(dev->dev)))
1508 log_very_verbose("Duplicate PV %s on %s - "
1509 "using dm %s", pvid,
1510 dev_name(existing->dev),
1511 dev_name(dev));
1512 /* FIXME If both dm, check dependencies */
1513 //else if (dm_is_dm_major(MAJOR(existing->dev->dev)) &&
1514 //dm_is_dm_major(MAJOR(dev->dev)))
1515 //
1516 else if (!strcmp(pvid_s, existing->dev->pvid))
1517 log_error("Found duplicate PV %s: using %s not "
1518 "%s", pvid, dev_name(dev),
1519 dev_name(existing->dev));
1520 }
1521 if (strcmp(pvid_s, existing->dev->pvid))
1522 log_debug("Updating pvid cache to %s (%s) from %s (%s)",
1523 pvid_s, dev_name(dev),
1524 existing->dev->pvid, dev_name(existing->dev));
1525 /* Switch over to new preferred device */
1526 existing->dev = dev;
1527 info = existing;
1528 /* Has labeller changed? */
1529 if (info->label->labeller != labeller) {
1530 label_destroy(info->label);
1531 if (!(info->label = label_create(labeller)))
1532 /* FIXME leaves info without label! */
1533 return_NULL;
1534 info->label->info = info;
1535 }
1536 label = info->label;
1537 }
1538
1539 info->fmt = (const struct format_type *) labeller->private;
1540 info->status |= CACHE_INVALID;
1541
1542 if (!_lvmcache_update_pvid(info, pvid_s)) {
1543 if (!existing) {
1544 dm_free(info);
1545 label_destroy(label);
1546 }
1547 return NULL;
1548 }
1549
1550 if (!lvmcache_update_vgname_and_id(info, vgname, vgid, vgstatus, NULL)) {
1551 if (!existing) {
1552 dm_hash_remove(_pvid_hash, pvid_s);
1553 strcpy(info->dev->pvid, "");
1554 dm_free(info);
1555 label_destroy(label);
1556 }
1557 return NULL;
1558 }
1559
1560 return info;
1561 }
1562
1563 static void _lvmcache_destroy_entry(struct lvmcache_info *info)
1564 {
1565 _vginfo_detach_info(info);
1566 strcpy(info->dev->pvid, "");
1567 label_destroy(info->label);
1568 dm_free(info);
1569 }
1570
1571 static void _lvmcache_destroy_vgnamelist(struct lvmcache_vginfo *vginfo)
1572 {
1573 struct lvmcache_vginfo *next;
1574
1575 do {
1576 next = vginfo->next;
1577 if (!_free_vginfo(vginfo))
1578 stack;
1579 } while ((vginfo = next));
1580 }
1581
1582 static void _lvmcache_destroy_lockname(struct dm_hash_node *n)
1583 {
1584 char *vgname;
1585
1586 if (!dm_hash_get_data(_lock_hash, n))
1587 return;
1588
1589 vgname = dm_hash_get_key(_lock_hash, n);
1590
1591 if (!strcmp(vgname, VG_GLOBAL))
1592 _vg_global_lock_held = 1;
1593 else
1594 log_error(INTERNAL_ERROR "Volume Group %s was not unlocked",
1595 dm_hash_get_key(_lock_hash, n));
1596 }
1597
1598 void lvmcache_destroy(struct cmd_context *cmd, int retain_orphans)
1599 {
1600 struct dm_hash_node *n;
1601 log_verbose("Wiping internal VG cache");
1602
1603 _has_scanned = 0;
1604
1605 if (_vgid_hash) {
1606 dm_hash_destroy(_vgid_hash);
1607 _vgid_hash = NULL;
1608 }
1609
1610 if (_pvid_hash) {
1611 dm_hash_iter(_pvid_hash, (dm_hash_iterate_fn) _lvmcache_destroy_entry);
1612 dm_hash_destroy(_pvid_hash);
1613 _pvid_hash = NULL;
1614 }
1615
1616 if (_vgname_hash) {
1617 dm_hash_iter(_vgname_hash,
1618 (dm_hash_iterate_fn) _lvmcache_destroy_vgnamelist);
1619 dm_hash_destroy(_vgname_hash);
1620 _vgname_hash = NULL;
1621 }
1622
1623 if (_lock_hash) {
1624 dm_hash_iterate(n, _lock_hash)
1625 _lvmcache_destroy_lockname(n);
1626 dm_hash_destroy(_lock_hash);
1627 _lock_hash = NULL;
1628 }
1629
1630 if (!dm_list_empty(&_vginfos))
1631 log_error(INTERNAL_ERROR "_vginfos list should be empty");
1632 dm_list_init(&_vginfos);
1633
1634 if (retain_orphans)
1635 if (!init_lvmcache_orphans(cmd))
1636 stack;
1637 }
1638
1639 int lvmcache_pvid_is_locked(const char *pvid) {
1640 struct lvmcache_info *info;
1641 info = lvmcache_info_from_pvid(pvid, 0);
1642 if (!info || !info->vginfo)
1643 return 0;
1644
1645 return lvmcache_vgname_is_locked(info->vginfo->vgname);
1646 }
1647
1648 int lvmcache_fid_add_mdas(struct lvmcache_info *info, struct format_instance *fid,
1649 const char *id, int id_len)
1650 {
1651 return fid_add_mdas(fid, &info->mdas, id, id_len);
1652 }
1653
1654 int lvmcache_fid_add_mdas_pv(struct lvmcache_info *info, struct format_instance *fid)
1655 {
1656 return lvmcache_fid_add_mdas(info, fid, info->dev->pvid, ID_LEN);
1657 }
1658
1659 int lvmcache_fid_add_mdas_vg(struct lvmcache_vginfo *vginfo, struct format_instance *fid)
1660 {
1661 struct lvmcache_info *info;
1662 dm_list_iterate_items(info, &vginfo->infos) {
1663 if (!lvmcache_fid_add_mdas_pv(info, fid))
1664 return_0;
1665 }
1666 return 1;
1667 }
1668
1669 static int _get_pv_if_in_vg(struct lvmcache_info *info,
1670 struct physical_volume *pv)
1671 {
1672 char vgname[NAME_LEN + 1];
1673 char vgid[ID_LEN + 1];
1674
1675 if (info->vginfo && info->vginfo->vgname &&
1676 !is_orphan_vg(info->vginfo->vgname)) {
1677 /*
1678 * get_pv_from_vg_by_id() may call
1679 * lvmcache_label_scan() and drop cached
1680 * vginfo so make a local copy of string.
1681 */
1682 strcpy(vgname, info->vginfo->vgname);
1683 memcpy(vgid, info->vginfo->vgid, sizeof(vgid));
1684
1685 if (get_pv_from_vg_by_id(info->fmt, vgname, vgid,
1686 info->dev->pvid, pv))
1687 return 1;
1688 }
1689
1690 return 0;
1691 }
1692
1693 int lvmcache_populate_pv_fields(struct lvmcache_info *info,
1694 struct physical_volume *pv,
1695 int scan_label_only)
1696 {
1697 struct data_area_list *da;
1698
1699 /* Have we already cached vgname? */
1700 if (!scan_label_only && _get_pv_if_in_vg(info, pv))
1701 return 1;
1702
1703 /* Perform full scan (just the first time) and try again */
1704 if (!scan_label_only && !critical_section() && !full_scan_done()) {
1705 lvmcache_label_scan(info->fmt->cmd, 2);
1706
1707 if (_get_pv_if_in_vg(info, pv))
1708 return 1;
1709 }
1710
1711 /* Orphan */
1712 pv->dev = info->dev;
1713 pv->fmt = info->fmt;
1714 pv->size = info->device_size >> SECTOR_SHIFT;
1715 pv->vg_name = FMT_TEXT_ORPHAN_VG_NAME;
1716 memcpy(&pv->id, &info->dev->pvid, sizeof(pv->id));
1717
1718 /* Currently only support exactly one data area */
1719 if (dm_list_size(&info->das) != 1) {
1720 log_error("Must be exactly one data area (found %d) on PV %s",
1721 dm_list_size(&info->das), dev_name(info->dev));
1722 return 0;
1723 }
1724
1725 dm_list_iterate_items(da, &info->das)
1726 pv->pe_start = da->disk_locn.offset >> SECTOR_SHIFT;
1727
1728 return 1;
1729 }
1730
1731 int lvmcache_check_format(struct lvmcache_info *info, const struct format_type *fmt)
1732 {
1733 if (info->fmt != fmt) {
1734 log_error("PV %s is a different format (seqno %s)",
1735 dev_name(info->dev), info->fmt->name);
1736 return 0;
1737 }
1738 return 1;
1739 }
1740
1741 void lvmcache_del_mdas(struct lvmcache_info *info)
1742 {
1743 if (info->mdas.n)
1744 del_mdas(&info->mdas);
1745 dm_list_init(&info->mdas);
1746 }
1747
1748 void lvmcache_del_das(struct lvmcache_info *info)
1749 {
1750 if (info->das.n)
1751 del_das(&info->das);
1752 dm_list_init(&info->das);
1753 }
1754
1755 int lvmcache_add_mda(struct lvmcache_info *info, struct device *dev,
1756 uint64_t start, uint64_t size, unsigned ignored)
1757 {
1758 return add_mda(info->fmt, NULL, &info->mdas, dev, start, size, ignored);
1759 }
1760
1761 int lvmcache_add_da(struct lvmcache_info *info, uint64_t start, uint64_t size)
1762 {
1763 return add_da(NULL, &info->das, start, size);
1764 }
1765
1766
1767 void lvmcache_update_pv(struct lvmcache_info *info, struct physical_volume *pv,
1768 const struct format_type *fmt)
1769 {
1770 info->device_size = pv->size << SECTOR_SHIFT;
1771 info->fmt = fmt;
1772 }
1773
1774 int lvmcache_update_das(struct lvmcache_info *info, struct physical_volume *pv)
1775 {
1776 struct data_area_list *da;
1777 if (info->das.n) {
1778 if (!pv->pe_start)
1779 dm_list_iterate_items(da, &info->das)
1780 pv->pe_start = da->disk_locn.offset >> SECTOR_SHIFT;
1781 del_das(&info->das);
1782 } else
1783 dm_list_init(&info->das);
1784
1785 if (!add_da(NULL, &info->das, pv->pe_start << SECTOR_SHIFT, 0 /*pv->size << SECTOR_SHIFT*/))
1786 return_0;
1787
1788 return 1;
1789 }
1790
1791 int lvmcache_foreach_pv(struct lvmcache_vginfo *vginfo,
1792 int (*fun)(struct lvmcache_info *, void *),
1793 void *baton)
1794 {
1795 struct lvmcache_info *info;
1796 dm_list_iterate_items(info, &vginfo->infos) {
1797 if (!fun(info, baton))
1798 return_0;
1799 }
1800
1801 return 1;
1802 }
1803
1804 int lvmcache_foreach_mda(struct lvmcache_info *info,
1805 int (*fun)(struct metadata_area *, void *),
1806 void *baton)
1807 {
1808 struct metadata_area *mda;
1809 dm_list_iterate_items(mda, &info->mdas) {
1810 if (!fun(mda, baton))
1811 return_0;
1812 }
1813
1814 return 1;
1815 }
1816
1817 int lvmcache_mda_count(struct lvmcache_info *info)
1818 {
1819 return dm_list_size(&info->mdas);
1820 }
1821
1822 int lvmcache_foreach_da(struct lvmcache_info *info,
1823 int (*fun)(struct disk_locn *, void *),
1824 void *baton)
1825 {
1826 struct data_area_list *da;
1827 dm_list_iterate_items(da, &info->das) {
1828 if (!fun(&da->disk_locn, baton))
1829 return_0;
1830 }
1831
1832 return 1;
1833 }
1834
1835 /*
1836 * The lifetime of the label returned is tied to the lifetime of the
1837 * lvmcache_info which is the same as lvmcache itself.
1838 */
1839 struct label *lvmcache_get_label(struct lvmcache_info *info) {
1840 return info->label;
1841 }
1842
1843 void lvmcache_make_valid(struct lvmcache_info *info) {
1844 info->status &= ~CACHE_INVALID;
1845 }
1846
1847 uint64_t lvmcache_device_size(struct lvmcache_info *info) {
1848 return info->device_size;
1849 }
1850
1851 void lvmcache_set_device_size(struct lvmcache_info *info, uint64_t size) {
1852 info->device_size = size;
1853 }
1854
1855 struct device *lvmcache_device(struct lvmcache_info *info) {
1856 return info->dev;
1857 }
1858
1859 int lvmcache_is_orphan(struct lvmcache_info *info) {
1860 if (!info->vginfo)
1861 return 1; /* FIXME? */
1862 return is_orphan_vg(info->vginfo->vgname);
1863 }
1864
1865 int lvmcache_vgid_is_cached(const char *vgid) {
1866 struct lvmcache_vginfo *vginfo;
1867
1868 if (lvmetad_active())
1869 return 1;
1870
1871 vginfo = lvmcache_vginfo_from_vgid(vgid);
1872
1873 if (!vginfo || !vginfo->vgname)
1874 return 0;
1875
1876 if (is_orphan_vg(vginfo->vgname))
1877 return 0;
1878
1879 return 1;
1880 }
1881
1882 /*
1883 * Return true iff it is impossible to find out from this info alone whether the
1884 * PV in question is or is not an orphan.
1885 */
1886 int lvmcache_uncertain_ownership(struct lvmcache_info *info) {
1887 return mdas_empty_or_ignored(&info->mdas);
1888 }
1889
1890 int lvmcache_smallest_mda_size(struct lvmcache_info *info)
1891 {
1892 return find_min_mda_size(&info->mdas);
1893 }
1894
1895 const struct format_type *lvmcache_fmt(struct lvmcache_info *info) {
1896 return info->fmt;
1897 }
This page took 0.17473 seconds and 5 git commands to generate.