2 * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
3 * Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved.
5 * This file is part of LVM2.
7 * This copyrighted material is made available to anyone wishing to use,
8 * modify, copy, or redistribute it subject to the terms and conditions
9 * of the GNU Lesser General Public License v.2.1.
11 * You should have received a copy of the GNU Lesser General Public License
12 * along with this program; if not, write to the Free Software Foundation,
13 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 #include "toolcontext.h"
19 #include "dev-cache.h"
23 #include "filter-persistent.h"
26 #include "format-text.h"
27 #include "format_pool.h"
33 #define CACHE_INVALID 0x00000001
34 #define CACHE_LOCKED 0x00000002
37 struct lvmcache_info
{
38 struct dm_list list
; /* Join VG members together */
39 struct dm_list mdas
; /* list head for metadata areas */
40 struct dm_list das
; /* list head for data areas */
41 struct lvmcache_vginfo
*vginfo
; /* NULL == unknown */
43 const struct format_type
*fmt
;
45 uint64_t device_size
; /* Bytes */
50 struct lvmcache_vginfo
{
51 struct dm_list list
; /* Join these vginfos together */
52 struct dm_list infos
; /* List head for lvmcache_infos */
53 const struct format_type
*fmt
;
54 char *vgname
; /* "" == orphan */
56 char vgid
[ID_LEN
+ 1];
58 struct lvmcache_vginfo
*next
; /* Another VG with same name? */
60 size_t vgmetadata_size
;
61 char *vgmetadata
; /* Copy of VG metadata as format_text string */
62 struct dm_config_tree
*cft
; /* Config tree created from vgmetadata */
63 /* Lifetime is directly tied to vgmetadata */
64 struct volume_group
*cached_vg
;
66 unsigned vg_use_count
; /* Counter of vg reusage */
67 unsigned precommitted
; /* Is vgmetadata live or precommitted? */
70 static struct dm_hash_table
*_pvid_hash
= NULL
;
71 static struct dm_hash_table
*_vgid_hash
= NULL
;
72 static struct dm_hash_table
*_vgname_hash
= NULL
;
73 static struct dm_hash_table
*_lock_hash
= NULL
;
74 static DM_LIST_INIT(_vginfos
);
75 static int _scanning_in_progress
= 0;
76 static int _has_scanned
= 0;
77 static int _vgs_locked
= 0;
78 static int _vg_global_lock_held
= 0; /* Global lock held when cache wiped? */
80 int lvmcache_init(void)
83 * FIXME add a proper lvmcache_locking_reset() that
84 * resets the cache so no previous locks are locked
88 dm_list_init(&_vginfos
);
90 if (!(_vgname_hash
= dm_hash_create(128)))
93 if (!(_vgid_hash
= dm_hash_create(128)))
96 if (!(_pvid_hash
= dm_hash_create(128)))
99 if (!(_lock_hash
= dm_hash_create(128)))
103 * Reinitialising the cache clears the internal record of
104 * which locks are held. The global lock can be held during
105 * this operation so its state must be restored afterwards.
107 if (_vg_global_lock_held
) {
108 lvmcache_lock_vgname(VG_GLOBAL
, 0);
109 _vg_global_lock_held
= 0;
117 void lvmcache_seed_infos_from_lvmetad(struct cmd_context
*cmd
)
119 if (!lvmetad_active() || _has_scanned
)
122 if (!lvmetad_pv_list_to_lvmcache(cmd
)) {
130 /* Volume Group metadata cache functions */
131 static void _free_cached_vgmetadata(struct lvmcache_vginfo
*vginfo
)
133 if (!vginfo
|| !vginfo
->vgmetadata
)
136 dm_free(vginfo
->vgmetadata
);
138 vginfo
->vgmetadata
= NULL
;
140 /* Release also cached config tree */
142 dm_config_destroy(vginfo
->cft
);
146 log_debug("Metadata cache: VG %s wiped.", vginfo
->vgname
);
148 release_vg(vginfo
->cached_vg
);
152 * Cache VG metadata against the vginfo with matching vgid.
154 static void _store_metadata(struct volume_group
*vg
, unsigned precommitted
)
156 char uuid
[64] __attribute__((aligned(8)));
157 struct lvmcache_vginfo
*vginfo
;
161 if (!(vginfo
= lvmcache_vginfo_from_vgid((const char *)&vg
->id
))) {
166 if (!(size
= export_vg_to_buffer(vg
, &data
))) {
168 _free_cached_vgmetadata(vginfo
);
172 /* Avoid reparsing of the same data string */
173 if (vginfo
->vgmetadata
&& vginfo
->vgmetadata_size
== size
&&
174 strcmp(vginfo
->vgmetadata
, data
) == 0)
177 _free_cached_vgmetadata(vginfo
);
178 vginfo
->vgmetadata_size
= size
;
179 vginfo
->vgmetadata
= data
;
182 vginfo
->precommitted
= precommitted
;
184 if (!id_write_format((const struct id
*)vginfo
->vgid
, uuid
, sizeof(uuid
))) {
189 log_debug("Metadata cache: VG %s (%s) stored (%" PRIsize_t
" bytes%s).",
190 vginfo
->vgname
, uuid
, size
,
191 precommitted
? ", precommitted" : "");
194 static void _update_cache_info_lock_state(struct lvmcache_info
*info
,
196 int *cached_vgmetadata_valid
)
198 int was_locked
= (info
->status
& CACHE_LOCKED
) ? 1 : 0;
201 * Cache becomes invalid whenever lock state changes unless
202 * exclusive VG_GLOBAL is held (i.e. while scanning).
204 if (!lvmcache_vgname_is_locked(VG_GLOBAL
) && (was_locked
!= locked
)) {
205 info
->status
|= CACHE_INVALID
;
206 *cached_vgmetadata_valid
= 0;
210 info
->status
|= CACHE_LOCKED
;
212 info
->status
&= ~CACHE_LOCKED
;
215 static void _update_cache_vginfo_lock_state(struct lvmcache_vginfo
*vginfo
,
218 struct lvmcache_info
*info
;
219 int cached_vgmetadata_valid
= 1;
221 dm_list_iterate_items(info
, &vginfo
->infos
)
222 _update_cache_info_lock_state(info
, locked
,
223 &cached_vgmetadata_valid
);
225 if (!cached_vgmetadata_valid
)
226 _free_cached_vgmetadata(vginfo
);
229 static void _update_cache_lock_state(const char *vgname
, int locked
)
231 struct lvmcache_vginfo
*vginfo
;
233 if (!(vginfo
= lvmcache_vginfo_from_vgname(vgname
, NULL
)))
236 _update_cache_vginfo_lock_state(vginfo
, locked
);
239 static void _drop_metadata(const char *vgname
, int drop_precommitted
)
241 struct lvmcache_vginfo
*vginfo
;
242 struct lvmcache_info
*info
;
244 if (!(vginfo
= lvmcache_vginfo_from_vgname(vgname
, NULL
)))
248 * Invalidate cached PV labels.
249 * If cached precommitted metadata exists that means we
250 * already invalidated the PV labels (before caching it)
251 * and we must not do it again.
253 if (!drop_precommitted
&& vginfo
->precommitted
&& !vginfo
->vgmetadata
)
254 log_error(INTERNAL_ERROR
"metadata commit (or revert) missing before "
255 "dropping metadata from cache.");
257 if (drop_precommitted
|| !vginfo
->precommitted
)
258 dm_list_iterate_items(info
, &vginfo
->infos
)
259 info
->status
|= CACHE_INVALID
;
261 _free_cached_vgmetadata(vginfo
);
264 if (drop_precommitted
)
265 vginfo
->precommitted
= 0;
269 * Remote node uses this to upgrade precommited metadata to commited state
270 * when receives vg_commit notification.
271 * (Note that devices can be suspended here, if so, precommited metadata are already read.)
273 void lvmcache_commit_metadata(const char *vgname
)
275 struct lvmcache_vginfo
*vginfo
;
277 if (!(vginfo
= lvmcache_vginfo_from_vgname(vgname
, NULL
)))
280 if (vginfo
->precommitted
) {
281 log_debug("Precommitted metadata cache: VG %s upgraded to committed.",
283 vginfo
->precommitted
= 0;
287 void lvmcache_drop_metadata(const char *vgname
, int drop_precommitted
)
289 /* For VG_ORPHANS, we need to invalidate all labels on orphan PVs. */
290 if (!strcmp(vgname
, VG_ORPHANS
)) {
291 _drop_metadata(FMT_TEXT_ORPHAN_VG_NAME
, 0);
292 _drop_metadata(FMT_LVM1_ORPHAN_VG_NAME
, 0);
293 _drop_metadata(FMT_POOL_ORPHAN_VG_NAME
, 0);
295 /* Indicate that PVs could now be missing from the cache */
296 init_full_scan_done(0);
297 } else if (!lvmcache_vgname_is_locked(VG_GLOBAL
))
298 _drop_metadata(vgname
, drop_precommitted
);
302 * Ensure vgname2 comes after vgname1 alphabetically.
303 * Orphan locks come last.
304 * VG_GLOBAL comes first.
306 static int _vgname_order_correct(const char *vgname1
, const char *vgname2
)
308 if (is_global_vg(vgname1
))
311 if (is_global_vg(vgname2
))
314 if (is_orphan_vg(vgname1
))
317 if (is_orphan_vg(vgname2
))
320 if (strcmp(vgname1
, vgname2
) < 0)
327 * Ensure VG locks are acquired in alphabetical order.
329 int lvmcache_verify_lock_order(const char *vgname
)
331 struct dm_hash_node
*n
;
337 dm_hash_iterate(n
, _lock_hash
) {
338 if (!dm_hash_get_data(_lock_hash
, n
))
341 if (!(vgname2
= dm_hash_get_key(_lock_hash
, n
))) {
342 log_error(INTERNAL_ERROR
"VG lock %s hits NULL.",
347 if (!_vgname_order_correct(vgname2
, vgname
)) {
348 log_errno(EDEADLK
, INTERNAL_ERROR
"VG lock %s must "
349 "be requested before %s, not after.",
358 void lvmcache_lock_vgname(const char *vgname
, int read_only
__attribute__((unused
)))
360 if (!_lock_hash
&& !lvmcache_init()) {
361 log_error("Internal cache initialisation failed");
365 if (dm_hash_lookup(_lock_hash
, vgname
))
366 log_error(INTERNAL_ERROR
"Nested locking attempted on VG %s.",
369 if (!dm_hash_insert(_lock_hash
, vgname
, (void *) 1))
370 log_error("Cache locking failure for %s", vgname
);
372 _update_cache_lock_state(vgname
, 1);
374 if (strcmp(vgname
, VG_GLOBAL
))
378 int lvmcache_vgname_is_locked(const char *vgname
)
383 return dm_hash_lookup(_lock_hash
, is_orphan_vg(vgname
) ? VG_ORPHANS
: vgname
) ? 1 : 0;
386 void lvmcache_unlock_vgname(const char *vgname
)
388 if (!dm_hash_lookup(_lock_hash
, vgname
))
389 log_error(INTERNAL_ERROR
"Attempt to unlock unlocked VG %s.",
392 _update_cache_lock_state(vgname
, 0);
394 dm_hash_remove(_lock_hash
, vgname
);
396 /* FIXME Do this per-VG */
397 if (strcmp(vgname
, VG_GLOBAL
) && !--_vgs_locked
)
401 int lvmcache_vgs_locked(void)
406 static void _vginfo_attach_info(struct lvmcache_vginfo
*vginfo
,
407 struct lvmcache_info
*info
)
412 info
->vginfo
= vginfo
;
413 dm_list_add(&vginfo
->infos
, &info
->list
);
416 static void _vginfo_detach_info(struct lvmcache_info
*info
)
418 if (!dm_list_empty(&info
->list
)) {
419 dm_list_del(&info
->list
);
420 dm_list_init(&info
->list
);
426 /* If vgid supplied, require a match. */
427 struct lvmcache_vginfo
*lvmcache_vginfo_from_vgname(const char *vgname
, const char *vgid
)
429 struct lvmcache_vginfo
*vginfo
;
432 return lvmcache_vginfo_from_vgid(vgid
);
437 if (!(vginfo
= dm_hash_lookup(_vgname_hash
, vgname
)))
442 if (!strncmp(vgid
, vginfo
->vgid
, ID_LEN
))
444 while ((vginfo
= vginfo
->next
));
449 const struct format_type
*lvmcache_fmt_from_vgname(struct cmd_context
*cmd
,
450 const char *vgname
, const char *vgid
,
451 unsigned revalidate_labels
)
453 struct lvmcache_vginfo
*vginfo
;
454 struct lvmcache_info
*info
;
456 struct dm_list
*devh
, *tmp
;
458 struct device_list
*devl
;
459 struct volume_group
*vg
;
460 const struct format_type
*fmt
;
461 char vgid_found
[ID_LEN
+ 1] __attribute__((aligned(8)));
463 if (!(vginfo
= lvmcache_vginfo_from_vgname(vgname
, vgid
))) {
464 if (!lvmetad_active())
465 return NULL
; /* too bad */
466 /* If we don't have the info but we have lvmetad, we can ask
467 * there before failing. */
468 if ((vg
= lvmetad_vg_lookup(cmd
, vgname
, vgid
))) {
477 * If this function is called repeatedly, only the first one needs to revalidate.
479 if (!revalidate_labels
)
483 * This function is normally called before reading metadata so
484 * we check cached labels here. Unfortunately vginfo is volatile.
487 dm_list_iterate_items(info
, &vginfo
->infos
) {
488 if (!(devl
= dm_malloc(sizeof(*devl
)))) {
489 log_error("device_list element allocation failed");
492 devl
->dev
= info
->dev
;
493 dm_list_add(&devs
, &devl
->list
);
496 memcpy(vgid_found
, vginfo
->vgid
, sizeof(vgid_found
));
498 dm_list_iterate_safe(devh
, tmp
, &devs
) {
499 devl
= dm_list_item(devh
, struct device_list
);
500 (void) label_read(devl
->dev
, &label
, UINT64_C(0));
501 dm_list_del(&devl
->list
);
505 /* If vginfo changed, caller needs to rescan */
506 if (!(vginfo
= lvmcache_vginfo_from_vgname(vgname
, vgid_found
)) ||
507 strncmp(vginfo
->vgid
, vgid_found
, ID_LEN
))
514 struct lvmcache_vginfo
*lvmcache_vginfo_from_vgid(const char *vgid
)
516 struct lvmcache_vginfo
*vginfo
;
517 char id
[ID_LEN
+ 1] __attribute__((aligned(8)));
519 if (!_vgid_hash
|| !vgid
)
522 /* vgid not necessarily NULL-terminated */
523 strncpy(&id
[0], vgid
, ID_LEN
);
526 if (!(vginfo
= dm_hash_lookup(_vgid_hash
, id
)))
532 const char *lvmcache_vgname_from_vgid(struct dm_pool
*mem
, const char *vgid
)
534 struct lvmcache_vginfo
*vginfo
;
535 const char *vgname
= NULL
;
537 if ((vginfo
= lvmcache_vginfo_from_vgid(vgid
)))
538 vgname
= vginfo
->vgname
;
541 return dm_pool_strdup(mem
, vgname
);
546 static int _info_is_valid(struct lvmcache_info
*info
)
548 if (info
->status
& CACHE_INVALID
)
552 * The caller must hold the VG lock to manipulate metadata.
553 * In a cluster, remote nodes sometimes read metadata in the
554 * knowledge that the controlling node is holding the lock.
555 * So if the VG appears to be unlocked here, it should be safe
556 * to use the cached value.
558 if (info
->vginfo
&& !lvmcache_vgname_is_locked(info
->vginfo
->vgname
))
561 if (!(info
->status
& CACHE_LOCKED
))
567 static int _vginfo_is_valid(struct lvmcache_vginfo
*vginfo
)
569 struct lvmcache_info
*info
;
571 /* Invalid if any info is invalid */
572 dm_list_iterate_items(info
, &vginfo
->infos
)
573 if (!_info_is_valid(info
))
579 /* vginfo is invalid if it does not contain at least one valid info */
580 static int _vginfo_is_invalid(struct lvmcache_vginfo
*vginfo
)
582 struct lvmcache_info
*info
;
584 dm_list_iterate_items(info
, &vginfo
->infos
)
585 if (_info_is_valid(info
))
592 * If valid_only is set, data will only be returned if the cached data is
593 * known still to be valid.
595 struct lvmcache_info
*lvmcache_info_from_pvid(const char *pvid
, int valid_only
)
597 struct lvmcache_info
*info
;
598 char id
[ID_LEN
+ 1] __attribute__((aligned(8)));
600 if (!_pvid_hash
|| !pvid
)
603 strncpy(&id
[0], pvid
, ID_LEN
);
606 if (!(info
= dm_hash_lookup(_pvid_hash
, id
)))
609 if (valid_only
&& !_info_is_valid(info
))
615 const char *lvmcache_vgname_from_info(struct lvmcache_info
*info
)
618 return info
->vginfo
->vgname
;
622 char *lvmcache_vgname_from_pvid(struct cmd_context
*cmd
, const char *pvid
)
624 struct lvmcache_info
*info
;
627 if (!lvmcache_device_from_pvid(cmd
, (const struct id
*)pvid
, NULL
, NULL
)) {
628 log_error("Couldn't find device with uuid %s.", pvid
);
632 info
= lvmcache_info_from_pvid(pvid
, 0);
636 if (!(vgname
= dm_pool_strdup(cmd
->mem
, info
->vginfo
->vgname
))) {
637 log_errno(ENOMEM
, "vgname allocation failed");
643 static void _rescan_entry(struct lvmcache_info
*info
)
647 if (info
->status
& CACHE_INVALID
)
648 (void) label_read(info
->dev
, &label
, UINT64_C(0));
651 static int _scan_invalid(void)
653 dm_hash_iter(_pvid_hash
, (dm_hash_iterate_fn
) _rescan_entry
);
658 int lvmcache_label_scan(struct cmd_context
*cmd
, int full_scan
)
661 struct dev_iter
*iter
;
663 struct format_type
*fmt
;
667 if (lvmetad_active())
670 /* Avoid recursion when a PVID can't be found! */
671 if (_scanning_in_progress
)
674 _scanning_in_progress
= 1;
676 if (!_vgname_hash
&& !lvmcache_init()) {
677 log_error("Internal cache initialisation failed");
681 if (_has_scanned
&& !full_scan
) {
686 if (full_scan
== 2 && (cmd
->filter
&& !cmd
->filter
->use_count
) && !refresh_filters(cmd
))
689 if (!cmd
->filter
|| !(iter
= dev_iter_create(cmd
->filter
, (full_scan
== 2) ? 1 : 0))) {
690 log_error("dev_iter creation failed");
694 while ((dev
= dev_iter_get(iter
)))
695 (void) label_read(dev
, &label
, UINT64_C(0));
697 dev_iter_destroy(iter
);
701 /* Perform any format-specific scanning e.g. text files */
702 if (cmd
->independent_metadata_areas
)
703 dm_list_iterate_items(fmt
, &cmd
->formats
)
704 if (fmt
->ops
->scan
&& !fmt
->ops
->scan(fmt
, NULL
))
708 * If we are a long-lived process, write out the updated persistent
709 * device cache for the benefit of short-lived processes.
711 if (full_scan
== 2 && cmd
->is_long_lived
&& cmd
->dump_filter
)
712 persistent_filter_dump(cmd
->filter
, 0);
717 _scanning_in_progress
= 0;
722 struct volume_group
*lvmcache_get_vg(struct cmd_context
*cmd
, const char *vgname
,
723 const char *vgid
, unsigned precommitted
)
725 struct lvmcache_vginfo
*vginfo
;
726 struct volume_group
*vg
= NULL
;
727 struct format_instance
*fid
;
728 struct format_instance_ctx fic
;
731 * We currently do not store precommitted metadata in lvmetad at
732 * all. This means that any request for precommitted metadata is served
733 * using the classic scanning mechanics, and read from disk or from
736 if (lvmetad_active() && !precommitted
) {
737 /* Still serve the locally cached VG if available */
738 if (vgid
&& (vginfo
= lvmcache_vginfo_from_vgid(vgid
)) &&
739 vginfo
->vgmetadata
&& (vg
= vginfo
->cached_vg
))
741 return lvmetad_vg_lookup(cmd
, vgname
, vgid
);
744 if (!vgid
|| !(vginfo
= lvmcache_vginfo_from_vgid(vgid
)) || !vginfo
->vgmetadata
)
747 if (!_vginfo_is_valid(vginfo
))
751 * Don't return cached data if either:
752 * (i) precommitted metadata is requested but we don't have it cached
753 * - caller should read it off disk;
754 * (ii) live metadata is requested but we have precommitted metadata cached
755 * and no devices are suspended so caller may read it off disk.
757 * If live metadata is requested but we have precommitted metadata cached
758 * and devices are suspended, we assume this precommitted metadata has
759 * already been preloaded and committed so it's OK to return it as live.
760 * Note that we do not clear the PRECOMMITTED flag.
762 if ((precommitted
&& !vginfo
->precommitted
) ||
763 (!precommitted
&& vginfo
->precommitted
&& !critical_section()))
766 /* Use already-cached VG struct when available */
767 if ((vg
= vginfo
->cached_vg
))
770 fic
.type
= FMT_INSTANCE_MDAS
| FMT_INSTANCE_AUX_MDAS
;
771 fic
.context
.vg_ref
.vg_name
= vginfo
->vgname
;
772 fic
.context
.vg_ref
.vg_id
= vgid
;
773 if (!(fid
= vginfo
->fmt
->ops
->create_instance(vginfo
->fmt
, &fic
)))
776 /* Build config tree from vgmetadata, if not yet cached */
779 dm_config_from_string(vginfo
->vgmetadata
)))
782 if (!(vg
= import_vg_from_config_tree(vginfo
->cft
, fid
)))
785 /* Cache VG struct for reuse */
786 vginfo
->cached_vg
= vg
;
788 vginfo
->vg_use_count
= 0;
791 if (!dm_pool_lock(vg
->vgmem
, detect_internal_vg_cache_corruption()))
796 vginfo
->vg_use_count
++;
797 log_debug("Using cached %smetadata for VG %s with %u holder(s).",
798 vginfo
->precommitted
? "pre-committed " : "",
799 vginfo
->vgname
, vginfo
->holders
);
804 _free_cached_vgmetadata(vginfo
);
809 int lvmcache_vginfo_holders_dec_and_test_for_zero(struct lvmcache_vginfo
*vginfo
)
811 log_debug("VG %s decrementing %d holder(s) at %p.",
812 vginfo
->cached_vg
->name
, vginfo
->holders
, vginfo
->cached_vg
);
814 if (--vginfo
->holders
)
817 if (vginfo
->vg_use_count
> 1)
818 log_debug("VG %s reused %d times.",
819 vginfo
->cached_vg
->name
, vginfo
->vg_use_count
);
821 /* Debug perform crc check only when it's been used more then once */
822 if (!dm_pool_unlock(vginfo
->cached_vg
->vgmem
,
823 detect_internal_vg_cache_corruption() &&
824 (vginfo
->vg_use_count
> 1)))
827 vginfo
->cached_vg
->vginfo
= NULL
;
828 vginfo
->cached_vg
= NULL
;
834 struct dm_list
*lvmcache_get_vgids(struct cmd_context
*cmd
,
835 int include_internal
)
837 struct dm_list
*vgids
;
838 struct lvmcache_vginfo
*vginfo
;
840 // TODO plug into lvmetad here automagically?
841 lvmcache_label_scan(cmd
, 0);
843 if (!(vgids
= str_list_create(cmd
->mem
))) {
844 log_error("vgids list allocation failed");
848 dm_list_iterate_items(vginfo
, &_vginfos
) {
849 if (!include_internal
&& is_orphan_vg(vginfo
->vgname
))
852 if (!str_list_add(cmd
->mem
, vgids
,
853 dm_pool_strdup(cmd
->mem
, vginfo
->vgid
))) {
854 log_error("strlist allocation failed");
862 struct dm_list
*lvmcache_get_vgnames(struct cmd_context
*cmd
,
863 int include_internal
)
865 struct dm_list
*vgnames
;
866 struct lvmcache_vginfo
*vginfo
;
868 lvmcache_label_scan(cmd
, 0);
870 if (!(vgnames
= str_list_create(cmd
->mem
))) {
871 log_errno(ENOMEM
, "vgnames list allocation failed");
875 dm_list_iterate_items(vginfo
, &_vginfos
) {
876 if (!include_internal
&& is_orphan_vg(vginfo
->vgname
))
879 if (!str_list_add(cmd
->mem
, vgnames
,
880 dm_pool_strdup(cmd
->mem
, vginfo
->vgname
))) {
881 log_errno(ENOMEM
, "strlist allocation failed");
889 struct dm_list
*lvmcache_get_pvids(struct cmd_context
*cmd
, const char *vgname
,
892 struct dm_list
*pvids
;
893 struct lvmcache_vginfo
*vginfo
;
894 struct lvmcache_info
*info
;
896 if (!(pvids
= str_list_create(cmd
->mem
))) {
897 log_error("pvids list allocation failed");
901 if (!(vginfo
= lvmcache_vginfo_from_vgname(vgname
, vgid
)))
904 dm_list_iterate_items(info
, &vginfo
->infos
) {
905 if (!str_list_add(cmd
->mem
, pvids
,
906 dm_pool_strdup(cmd
->mem
, info
->dev
->pvid
))) {
907 log_error("strlist allocation failed");
915 static struct device
*_device_from_pvid(const struct id
*pvid
,
916 uint64_t *label_sector
)
918 struct lvmcache_info
*info
;
921 if ((info
= lvmcache_info_from_pvid((const char *) pvid
, 0))) {
922 if (lvmetad_active()) {
923 if (info
->label
&& label_sector
)
924 *label_sector
= info
->label
->sector
;
928 if (label_read(info
->dev
, &label
, UINT64_C(0))) {
929 info
= (struct lvmcache_info
*) label
->info
;
930 if (id_equal(pvid
, (struct id
*) &info
->dev
->pvid
)) {
932 *label_sector
= label
->sector
;
940 struct device
*lvmcache_device_from_pvid(struct cmd_context
*cmd
, const struct id
*pvid
,
941 unsigned *scan_done_once
, uint64_t *label_sector
)
945 /* Already cached ? */
946 dev
= _device_from_pvid(pvid
, label_sector
);
950 lvmcache_label_scan(cmd
, 0);
953 dev
= _device_from_pvid(pvid
, label_sector
);
957 if (critical_section() || (scan_done_once
&& *scan_done_once
))
960 lvmcache_label_scan(cmd
, 2);
965 dev
= _device_from_pvid(pvid
, label_sector
);
972 const char *lvmcache_pvid_from_devname(struct cmd_context
*cmd
,
978 if (!(dev
= dev_cache_get(devname
, cmd
->filter
))) {
979 log_error("%s: Couldn't find device. Check your filters?",
984 if (!(label_read(dev
, &label
, UINT64_C(0))))
991 static int _free_vginfo(struct lvmcache_vginfo
*vginfo
)
993 struct lvmcache_vginfo
*primary_vginfo
, *vginfo2
;
996 _free_cached_vgmetadata(vginfo
);
998 vginfo2
= primary_vginfo
= lvmcache_vginfo_from_vgname(vginfo
->vgname
, NULL
);
1000 if (vginfo
== primary_vginfo
) {
1001 dm_hash_remove(_vgname_hash
, vginfo
->vgname
);
1002 if (vginfo
->next
&& !dm_hash_insert(_vgname_hash
, vginfo
->vgname
,
1004 log_error("_vgname_hash re-insertion for %s failed",
1009 if (vginfo2
->next
== vginfo
) {
1010 vginfo2
->next
= vginfo
->next
;
1013 while ((vginfo2
= vginfo2
->next
));
1015 dm_free(vginfo
->vgname
);
1016 dm_free(vginfo
->creation_host
);
1018 if (*vginfo
->vgid
&& _vgid_hash
&&
1019 lvmcache_vginfo_from_vgid(vginfo
->vgid
) == vginfo
)
1020 dm_hash_remove(_vgid_hash
, vginfo
->vgid
);
1022 dm_list_del(&vginfo
->list
);
1030 * vginfo must be info->vginfo unless info is NULL
1032 static int _drop_vginfo(struct lvmcache_info
*info
, struct lvmcache_vginfo
*vginfo
)
1035 _vginfo_detach_info(info
);
1037 /* vginfo still referenced? */
1038 if (!vginfo
|| is_orphan_vg(vginfo
->vgname
) ||
1039 !dm_list_empty(&vginfo
->infos
))
1042 if (!_free_vginfo(vginfo
))
1049 void lvmcache_del(struct lvmcache_info *info)
1051 if (info->dev->pvid[0] && _pvid_hash)
1052 dm_hash_remove(_pvid_hash, info->dev->pvid);
1054 _drop_vginfo(info, info->vginfo);
1056 info->label->labeller->ops->destroy_label(info->label->labeller,
1063 static int _lvmcache_update_pvid(struct lvmcache_info
*info
, const char *pvid
)
1066 * Nothing to do if already stored with same pvid.
1069 if (((dm_hash_lookup(_pvid_hash
, pvid
)) == info
) &&
1070 !strcmp(info
->dev
->pvid
, pvid
))
1072 if (*info
->dev
->pvid
)
1073 dm_hash_remove(_pvid_hash
, info
->dev
->pvid
);
1074 strncpy(info
->dev
->pvid
, pvid
, sizeof(info
->dev
->pvid
));
1075 if (!dm_hash_insert(_pvid_hash
, pvid
, info
)) {
1076 log_error("_lvmcache_update: pvid insertion failed: %s", pvid
);
1084 * vginfo must be info->vginfo unless info is NULL (orphans)
1086 static int _lvmcache_update_vgid(struct lvmcache_info
*info
,
1087 struct lvmcache_vginfo
*vginfo
,
1090 if (!vgid
|| !vginfo
||
1091 !strncmp(vginfo
->vgid
, vgid
, ID_LEN
))
1094 if (vginfo
&& *vginfo
->vgid
)
1095 dm_hash_remove(_vgid_hash
, vginfo
->vgid
);
1097 /* FIXME: unreachable code path */
1098 log_debug("lvmcache: %s: clearing VGID", info
? dev_name(info
->dev
) : vginfo
->vgname
);
1102 strncpy(vginfo
->vgid
, vgid
, ID_LEN
);
1103 vginfo
->vgid
[ID_LEN
] = '\0';
1104 if (!dm_hash_insert(_vgid_hash
, vginfo
->vgid
, vginfo
)) {
1105 log_error("_lvmcache_update: vgid hash insertion failed: %s",
1110 if (!is_orphan_vg(vginfo
->vgname
))
1111 log_debug("lvmcache: %s: setting %s VGID to %s",
1112 (info
) ? dev_name(info
->dev
) : "",
1113 vginfo
->vgname
, vginfo
->vgid
);
1118 static int _insert_vginfo(struct lvmcache_vginfo
*new_vginfo
, const char *vgid
,
1119 uint32_t vgstatus
, const char *creation_host
,
1120 struct lvmcache_vginfo
*primary_vginfo
)
1122 struct lvmcache_vginfo
*last_vginfo
= primary_vginfo
;
1123 char uuid_primary
[64] __attribute__((aligned(8)));
1124 char uuid_new
[64] __attribute__((aligned(8)));
1127 /* Pre-existing VG takes precedence. Unexported VG takes precedence. */
1128 if (primary_vginfo
) {
1129 if (!id_write_format((const struct id
*)vgid
, uuid_new
, sizeof(uuid_new
)))
1132 if (!id_write_format((const struct id
*)&primary_vginfo
->vgid
, uuid_primary
,
1133 sizeof(uuid_primary
)))
1137 * If Primary not exported, new exported => keep
1138 * Else Primary exported, new not exported => change
1139 * Else Primary has hostname for this machine => keep
1140 * Else Primary has no hostname, new has one => change
1141 * Else New has hostname for this machine => change
1142 * Else Keep primary.
1144 if (!(primary_vginfo
->status
& EXPORTED_VG
) &&
1145 (vgstatus
& EXPORTED_VG
))
1146 log_warn("WARNING: Duplicate VG name %s: "
1147 "Existing %s takes precedence over "
1148 "exported %s", new_vginfo
->vgname
,
1149 uuid_primary
, uuid_new
);
1150 else if ((primary_vginfo
->status
& EXPORTED_VG
) &&
1151 !(vgstatus
& EXPORTED_VG
)) {
1152 log_warn("WARNING: Duplicate VG name %s: "
1153 "%s takes precedence over exported %s",
1154 new_vginfo
->vgname
, uuid_new
,
1157 } else if (primary_vginfo
->creation_host
&&
1158 !strcmp(primary_vginfo
->creation_host
,
1159 primary_vginfo
->fmt
->cmd
->hostname
))
1160 log_warn("WARNING: Duplicate VG name %s: "
1161 "Existing %s (created here) takes precedence "
1162 "over %s", new_vginfo
->vgname
, uuid_primary
,
1164 else if (!primary_vginfo
->creation_host
&& creation_host
) {
1165 log_warn("WARNING: Duplicate VG name %s: "
1166 "%s (with creation_host) takes precedence over %s",
1167 new_vginfo
->vgname
, uuid_new
,
1170 } else if (creation_host
&&
1171 !strcmp(creation_host
,
1172 primary_vginfo
->fmt
->cmd
->hostname
)) {
1173 log_warn("WARNING: Duplicate VG name %s: "
1174 "%s (created here) takes precedence over %s",
1175 new_vginfo
->vgname
, uuid_new
,
1181 while (last_vginfo
->next
)
1182 last_vginfo
= last_vginfo
->next
;
1183 last_vginfo
->next
= new_vginfo
;
1187 dm_hash_remove(_vgname_hash
, primary_vginfo
->vgname
);
1190 if (!dm_hash_insert(_vgname_hash
, new_vginfo
->vgname
, new_vginfo
)) {
1191 log_error("cache_update: vg hash insertion failed: %s",
1192 new_vginfo
->vgname
);
1197 new_vginfo
->next
= primary_vginfo
;
1202 static int _lvmcache_update_vgname(struct lvmcache_info
*info
,
1203 const char *vgname
, const char *vgid
,
1204 uint32_t vgstatus
, const char *creation_host
,
1205 const struct format_type
*fmt
)
1207 struct lvmcache_vginfo
*vginfo
, *primary_vginfo
, *orphan_vginfo
;
1208 struct lvmcache_info
*info2
, *info3
;
1210 // struct lvmcache_vginfo *old_vginfo, *next;
1212 if (!vgname
|| (info
&& info
->vginfo
&& !strcmp(info
->vginfo
->vgname
, vgname
)))
1215 /* Remove existing vginfo entry */
1217 _drop_vginfo(info
, info
->vginfo
);
1219 /* Get existing vginfo or create new one */
1220 if (!(vginfo
= lvmcache_vginfo_from_vgname(vgname
, vgid
))) {
1221 /*** FIXME - vginfo ends up duplicated instead of renamed.
1222 // Renaming? This lookup fails.
1223 if ((vginfo = vginfo_from_vgid(vgid))) {
1224 next = vginfo->next;
1225 old_vginfo = vginfo_from_vgname(vginfo->vgname, NULL);
1226 if (old_vginfo == vginfo) {
1227 dm_hash_remove(_vgname_hash, old_vginfo->vgname);
1228 if (old_vginfo->next) {
1229 if (!dm_hash_insert(_vgname_hash, old_vginfo->vgname, old_vginfo->next)) {
1230 log_error("vg hash re-insertion failed: %s",
1231 old_vginfo->vgname);
1236 if (old_vginfo->next == vginfo) {
1237 old_vginfo->next = vginfo->next;
1240 } while ((old_vginfo = old_vginfo->next));
1241 vginfo->next = NULL;
1243 dm_free(vginfo->vgname);
1244 if (!(vginfo->vgname = dm_strdup(vgname))) {
1245 log_error("cache vgname alloc failed for %s", vgname);
1249 // Rename so can assume new name does not already exist
1250 if (!dm_hash_insert(_vgname_hash, vginfo->vgname, vginfo->next)) {
1251 log_error("vg hash re-insertion failed: %s",
1257 if (!(vginfo
= dm_zalloc(sizeof(*vginfo
)))) {
1258 log_error("lvmcache_update_vgname: list alloc failed");
1261 if (!(vginfo
->vgname
= dm_strdup(vgname
))) {
1263 log_error("cache vgname alloc failed for %s", vgname
);
1266 dm_list_init(&vginfo
->infos
);
1269 * If we're scanning and there's an invalidated entry, remove it.
1270 * Otherwise we risk bogus warnings of duplicate VGs.
1272 while ((primary_vginfo
= lvmcache_vginfo_from_vgname(vgname
, NULL
)) &&
1273 _scanning_in_progress
&& _vginfo_is_invalid(primary_vginfo
)) {
1274 orphan_vginfo
= lvmcache_vginfo_from_vgname(primary_vginfo
->fmt
->orphan_vg_name
, NULL
);
1275 if (!orphan_vginfo
) {
1276 log_error(INTERNAL_ERROR
"Orphan vginfo %s lost from cache.",
1277 primary_vginfo
->fmt
->orphan_vg_name
);
1278 dm_free(vginfo
->vgname
);
1282 dm_list_iterate_items_safe(info2
, info3
, &primary_vginfo
->infos
) {
1283 _vginfo_detach_info(info2
);
1284 _vginfo_attach_info(orphan_vginfo
, info2
);
1286 sprintf(mdabuf
, " with %u mdas",
1287 dm_list_size(&info2
->mdas
));
1290 log_debug("lvmcache: %s: now in VG %s%s%s%s%s",
1291 dev_name(info2
->dev
),
1292 vgname
, orphan_vginfo
->vgid
[0] ? " (" : "",
1293 orphan_vginfo
->vgid
[0] ? orphan_vginfo
->vgid
: "",
1294 orphan_vginfo
->vgid
[0] ? ")" : "", mdabuf
);
1297 if (!_drop_vginfo(NULL
, primary_vginfo
))
1301 if (!_insert_vginfo(vginfo
, vgid
, vgstatus
, creation_host
,
1303 dm_free(vginfo
->vgname
);
1307 /* Ensure orphans appear last on list_iterate */
1308 if (is_orphan_vg(vgname
))
1309 dm_list_add(&_vginfos
, &vginfo
->list
);
1311 dm_list_add_h(&_vginfos
, &vginfo
->list
);
1318 _vginfo_attach_info(vginfo
, info
);
1319 else if (!_lvmcache_update_vgid(NULL
, vginfo
, vgid
)) /* Orphans */
1322 _update_cache_vginfo_lock_state(vginfo
, lvmcache_vgname_is_locked(vgname
));
1324 /* FIXME Check consistency of list! */
1329 sprintf(mdabuf
, " with %u mdas", dm_list_size(&info
->mdas
));
1332 log_debug("lvmcache: %s: now in VG %s%s%s%s%s",
1333 dev_name(info
->dev
),
1334 vgname
, vginfo
->vgid
[0] ? " (" : "",
1335 vginfo
->vgid
[0] ? vginfo
->vgid
: "",
1336 vginfo
->vgid
[0] ? ")" : "", mdabuf
);
1338 log_debug("lvmcache: initialised VG %s", vgname
);
1343 static int _lvmcache_update_vgstatus(struct lvmcache_info
*info
, uint32_t vgstatus
,
1344 const char *creation_host
)
1346 if (!info
|| !info
->vginfo
)
1349 if ((info
->vginfo
->status
& EXPORTED_VG
) != (vgstatus
& EXPORTED_VG
))
1350 log_debug("lvmcache: %s: VG %s %s exported",
1351 dev_name(info
->dev
), info
->vginfo
->vgname
,
1352 vgstatus
& EXPORTED_VG
? "now" : "no longer");
1354 info
->vginfo
->status
= vgstatus
;
1359 if (info
->vginfo
->creation_host
&& !strcmp(creation_host
,
1360 info
->vginfo
->creation_host
))
1363 if (info
->vginfo
->creation_host
)
1364 dm_free(info
->vginfo
->creation_host
);
1366 if (!(info
->vginfo
->creation_host
= dm_strdup(creation_host
))) {
1367 log_error("cache creation host alloc failed for %s",
1372 log_debug("lvmcache: %s: VG %s: Set creation host to %s.",
1373 dev_name(info
->dev
), info
->vginfo
->vgname
, creation_host
);
1378 int lvmcache_add_orphan_vginfo(const char *vgname
, struct format_type
*fmt
)
1380 if (!_lock_hash
&& !lvmcache_init()) {
1381 log_error("Internal cache initialisation failed");
1385 return _lvmcache_update_vgname(NULL
, vgname
, vgname
, 0, "", fmt
);
1388 int lvmcache_update_vgname_and_id(struct lvmcache_info
*info
,
1389 const char *vgname
, const char *vgid
,
1390 uint32_t vgstatus
, const char *creation_host
)
1392 if (!vgname
&& !info
->vginfo
) {
1393 log_error(INTERNAL_ERROR
"NULL vgname handed to cache");
1394 /* FIXME Remove this */
1395 vgname
= info
->fmt
->orphan_vg_name
;
1399 /* When using lvmetad, the PV could not have become orphaned. */
1400 if (lvmetad_active() && is_orphan_vg(vgname
) && info
->vginfo
)
1403 /* If PV without mdas is already in a real VG, don't make it orphan */
1404 if (is_orphan_vg(vgname
) && info
->vginfo
&&
1405 mdas_empty_or_ignored(&info
->mdas
) &&
1406 !is_orphan_vg(info
->vginfo
->vgname
) && critical_section())
1409 /* If moving PV from orphan to real VG, always mark it valid */
1410 if (!is_orphan_vg(vgname
))
1411 info
->status
&= ~CACHE_INVALID
;
1413 if (!_lvmcache_update_vgname(info
, vgname
, vgid
, vgstatus
,
1414 creation_host
, info
->fmt
) ||
1415 !_lvmcache_update_vgid(info
, info
->vginfo
, vgid
) ||
1416 !_lvmcache_update_vgstatus(info
, vgstatus
, creation_host
))
1422 int lvmcache_update_vg(struct volume_group
*vg
, unsigned precommitted
)
1424 struct pv_list
*pvl
;
1425 struct lvmcache_info
*info
;
1426 char pvid_s
[ID_LEN
+ 1] __attribute__((aligned(8)));
1428 pvid_s
[sizeof(pvid_s
) - 1] = '\0';
1430 dm_list_iterate_items(pvl
, &vg
->pvs
) {
1431 strncpy(pvid_s
, (char *) &pvl
->pv
->id
, sizeof(pvid_s
) - 1);
1432 /* FIXME Could pvl->pv->dev->pvid ever be different? */
1433 if ((info
= lvmcache_info_from_pvid(pvid_s
, 0)) &&
1434 !lvmcache_update_vgname_and_id(info
, vg
->name
,
1440 /* store text representation of vg to cache */
1441 if (vg
->cmd
->current_settings
.cache_vgmetadata
)
1442 _store_metadata(vg
, precommitted
);
1447 struct lvmcache_info
*lvmcache_add(struct labeller
*labeller
, const char *pvid
,
1449 const char *vgname
, const char *vgid
,
1452 struct label
*label
;
1453 struct lvmcache_info
*existing
, *info
;
1454 char pvid_s
[ID_LEN
+ 1] __attribute__((aligned(8)));
1456 if (!_vgname_hash
&& !lvmcache_init()) {
1457 log_error("Internal cache initialisation failed");
1461 strncpy(pvid_s
, pvid
, sizeof(pvid_s
) - 1);
1462 pvid_s
[sizeof(pvid_s
) - 1] = '\0';
1464 if (!(existing
= lvmcache_info_from_pvid(pvid_s
, 0)) &&
1465 !(existing
= lvmcache_info_from_pvid(dev
->pvid
, 0))) {
1466 if (!(label
= label_create(labeller
)))
1468 if (!(info
= dm_zalloc(sizeof(*info
)))) {
1469 log_error("lvmcache_info allocation failed");
1470 label_destroy(label
);
1475 info
->label
= label
;
1476 dm_list_init(&info
->list
);
1479 lvmcache_del_mdas(info
);
1480 lvmcache_del_das(info
);
1482 if (existing
->dev
!= dev
) {
1483 /* Is the existing entry a duplicate pvid e.g. md ? */
1484 if (dev_subsystem_part_major(existing
->dev
) &&
1485 !dev_subsystem_part_major(dev
)) {
1486 log_very_verbose("Ignoring duplicate PV %s on "
1488 pvid
, dev_name(dev
),
1489 dev_subsystem_name(existing
->dev
),
1490 dev_name(existing
->dev
));
1492 } else if (dm_is_dm_major(MAJOR(existing
->dev
->dev
)) &&
1493 !dm_is_dm_major(MAJOR(dev
->dev
))) {
1494 log_very_verbose("Ignoring duplicate PV %s on "
1496 pvid
, dev_name(dev
),
1497 dev_name(existing
->dev
));
1499 } else if (!dev_subsystem_part_major(existing
->dev
) &&
1500 dev_subsystem_part_major(dev
))
1501 log_very_verbose("Duplicate PV %s on %s - "
1502 "using %s %s", pvid
,
1503 dev_name(existing
->dev
),
1504 dev_subsystem_name(existing
->dev
),
1506 else if (!dm_is_dm_major(MAJOR(existing
->dev
->dev
)) &&
1507 dm_is_dm_major(MAJOR(dev
->dev
)))
1508 log_very_verbose("Duplicate PV %s on %s - "
1509 "using dm %s", pvid
,
1510 dev_name(existing
->dev
),
1512 /* FIXME If both dm, check dependencies */
1513 //else if (dm_is_dm_major(MAJOR(existing->dev->dev)) &&
1514 //dm_is_dm_major(MAJOR(dev->dev)))
1516 else if (!strcmp(pvid_s
, existing
->dev
->pvid
))
1517 log_error("Found duplicate PV %s: using %s not "
1518 "%s", pvid
, dev_name(dev
),
1519 dev_name(existing
->dev
));
1521 if (strcmp(pvid_s
, existing
->dev
->pvid
))
1522 log_debug("Updating pvid cache to %s (%s) from %s (%s)",
1523 pvid_s
, dev_name(dev
),
1524 existing
->dev
->pvid
, dev_name(existing
->dev
));
1525 /* Switch over to new preferred device */
1526 existing
->dev
= dev
;
1528 /* Has labeller changed? */
1529 if (info
->label
->labeller
!= labeller
) {
1530 label_destroy(info
->label
);
1531 if (!(info
->label
= label_create(labeller
)))
1532 /* FIXME leaves info without label! */
1534 info
->label
->info
= info
;
1536 label
= info
->label
;
1539 info
->fmt
= (const struct format_type
*) labeller
->private;
1540 info
->status
|= CACHE_INVALID
;
1542 if (!_lvmcache_update_pvid(info
, pvid_s
)) {
1545 label_destroy(label
);
1550 if (!lvmcache_update_vgname_and_id(info
, vgname
, vgid
, vgstatus
, NULL
)) {
1552 dm_hash_remove(_pvid_hash
, pvid_s
);
1553 strcpy(info
->dev
->pvid
, "");
1555 label_destroy(label
);
1563 static void _lvmcache_destroy_entry(struct lvmcache_info
*info
)
1565 _vginfo_detach_info(info
);
1566 strcpy(info
->dev
->pvid
, "");
1567 label_destroy(info
->label
);
1571 static void _lvmcache_destroy_vgnamelist(struct lvmcache_vginfo
*vginfo
)
1573 struct lvmcache_vginfo
*next
;
1576 next
= vginfo
->next
;
1577 if (!_free_vginfo(vginfo
))
1579 } while ((vginfo
= next
));
1582 static void _lvmcache_destroy_lockname(struct dm_hash_node
*n
)
1586 if (!dm_hash_get_data(_lock_hash
, n
))
1589 vgname
= dm_hash_get_key(_lock_hash
, n
);
1591 if (!strcmp(vgname
, VG_GLOBAL
))
1592 _vg_global_lock_held
= 1;
1594 log_error(INTERNAL_ERROR
"Volume Group %s was not unlocked",
1595 dm_hash_get_key(_lock_hash
, n
));
1598 void lvmcache_destroy(struct cmd_context
*cmd
, int retain_orphans
)
1600 struct dm_hash_node
*n
;
1601 log_verbose("Wiping internal VG cache");
1606 dm_hash_destroy(_vgid_hash
);
1611 dm_hash_iter(_pvid_hash
, (dm_hash_iterate_fn
) _lvmcache_destroy_entry
);
1612 dm_hash_destroy(_pvid_hash
);
1617 dm_hash_iter(_vgname_hash
,
1618 (dm_hash_iterate_fn
) _lvmcache_destroy_vgnamelist
);
1619 dm_hash_destroy(_vgname_hash
);
1620 _vgname_hash
= NULL
;
1624 dm_hash_iterate(n
, _lock_hash
)
1625 _lvmcache_destroy_lockname(n
);
1626 dm_hash_destroy(_lock_hash
);
1630 if (!dm_list_empty(&_vginfos
))
1631 log_error(INTERNAL_ERROR
"_vginfos list should be empty");
1632 dm_list_init(&_vginfos
);
1635 if (!init_lvmcache_orphans(cmd
))
1639 int lvmcache_pvid_is_locked(const char *pvid
) {
1640 struct lvmcache_info
*info
;
1641 info
= lvmcache_info_from_pvid(pvid
, 0);
1642 if (!info
|| !info
->vginfo
)
1645 return lvmcache_vgname_is_locked(info
->vginfo
->vgname
);
1648 int lvmcache_fid_add_mdas(struct lvmcache_info
*info
, struct format_instance
*fid
,
1649 const char *id
, int id_len
)
1651 return fid_add_mdas(fid
, &info
->mdas
, id
, id_len
);
1654 int lvmcache_fid_add_mdas_pv(struct lvmcache_info
*info
, struct format_instance
*fid
)
1656 return lvmcache_fid_add_mdas(info
, fid
, info
->dev
->pvid
, ID_LEN
);
1659 int lvmcache_fid_add_mdas_vg(struct lvmcache_vginfo
*vginfo
, struct format_instance
*fid
)
1661 struct lvmcache_info
*info
;
1662 dm_list_iterate_items(info
, &vginfo
->infos
) {
1663 if (!lvmcache_fid_add_mdas_pv(info
, fid
))
1669 static int _get_pv_if_in_vg(struct lvmcache_info
*info
,
1670 struct physical_volume
*pv
)
1672 char vgname
[NAME_LEN
+ 1];
1673 char vgid
[ID_LEN
+ 1];
1675 if (info
->vginfo
&& info
->vginfo
->vgname
&&
1676 !is_orphan_vg(info
->vginfo
->vgname
)) {
1678 * get_pv_from_vg_by_id() may call
1679 * lvmcache_label_scan() and drop cached
1680 * vginfo so make a local copy of string.
1682 strcpy(vgname
, info
->vginfo
->vgname
);
1683 memcpy(vgid
, info
->vginfo
->vgid
, sizeof(vgid
));
1685 if (get_pv_from_vg_by_id(info
->fmt
, vgname
, vgid
,
1686 info
->dev
->pvid
, pv
))
1693 int lvmcache_populate_pv_fields(struct lvmcache_info
*info
,
1694 struct physical_volume
*pv
,
1695 int scan_label_only
)
1697 struct data_area_list
*da
;
1699 /* Have we already cached vgname? */
1700 if (!scan_label_only
&& _get_pv_if_in_vg(info
, pv
))
1703 /* Perform full scan (just the first time) and try again */
1704 if (!scan_label_only
&& !critical_section() && !full_scan_done()) {
1705 lvmcache_label_scan(info
->fmt
->cmd
, 2);
1707 if (_get_pv_if_in_vg(info
, pv
))
1712 pv
->dev
= info
->dev
;
1713 pv
->fmt
= info
->fmt
;
1714 pv
->size
= info
->device_size
>> SECTOR_SHIFT
;
1715 pv
->vg_name
= FMT_TEXT_ORPHAN_VG_NAME
;
1716 memcpy(&pv
->id
, &info
->dev
->pvid
, sizeof(pv
->id
));
1718 /* Currently only support exactly one data area */
1719 if (dm_list_size(&info
->das
) != 1) {
1720 log_error("Must be exactly one data area (found %d) on PV %s",
1721 dm_list_size(&info
->das
), dev_name(info
->dev
));
1725 dm_list_iterate_items(da
, &info
->das
)
1726 pv
->pe_start
= da
->disk_locn
.offset
>> SECTOR_SHIFT
;
1731 int lvmcache_check_format(struct lvmcache_info
*info
, const struct format_type
*fmt
)
1733 if (info
->fmt
!= fmt
) {
1734 log_error("PV %s is a different format (seqno %s)",
1735 dev_name(info
->dev
), info
->fmt
->name
);
1741 void lvmcache_del_mdas(struct lvmcache_info
*info
)
1744 del_mdas(&info
->mdas
);
1745 dm_list_init(&info
->mdas
);
1748 void lvmcache_del_das(struct lvmcache_info
*info
)
1751 del_das(&info
->das
);
1752 dm_list_init(&info
->das
);
1755 int lvmcache_add_mda(struct lvmcache_info
*info
, struct device
*dev
,
1756 uint64_t start
, uint64_t size
, unsigned ignored
)
1758 return add_mda(info
->fmt
, NULL
, &info
->mdas
, dev
, start
, size
, ignored
);
1761 int lvmcache_add_da(struct lvmcache_info
*info
, uint64_t start
, uint64_t size
)
1763 return add_da(NULL
, &info
->das
, start
, size
);
1767 void lvmcache_update_pv(struct lvmcache_info
*info
, struct physical_volume
*pv
,
1768 const struct format_type
*fmt
)
1770 info
->device_size
= pv
->size
<< SECTOR_SHIFT
;
1774 int lvmcache_update_das(struct lvmcache_info
*info
, struct physical_volume
*pv
)
1776 struct data_area_list
*da
;
1779 dm_list_iterate_items(da
, &info
->das
)
1780 pv
->pe_start
= da
->disk_locn
.offset
>> SECTOR_SHIFT
;
1781 del_das(&info
->das
);
1783 dm_list_init(&info
->das
);
1785 if (!add_da(NULL
, &info
->das
, pv
->pe_start
<< SECTOR_SHIFT
, 0 /*pv->size << SECTOR_SHIFT*/))
1791 int lvmcache_foreach_pv(struct lvmcache_vginfo
*vginfo
,
1792 int (*fun
)(struct lvmcache_info
*, void *),
1795 struct lvmcache_info
*info
;
1796 dm_list_iterate_items(info
, &vginfo
->infos
) {
1797 if (!fun(info
, baton
))
1804 int lvmcache_foreach_mda(struct lvmcache_info
*info
,
1805 int (*fun
)(struct metadata_area
*, void *),
1808 struct metadata_area
*mda
;
1809 dm_list_iterate_items(mda
, &info
->mdas
) {
1810 if (!fun(mda
, baton
))
1817 int lvmcache_mda_count(struct lvmcache_info
*info
)
1819 return dm_list_size(&info
->mdas
);
1822 int lvmcache_foreach_da(struct lvmcache_info
*info
,
1823 int (*fun
)(struct disk_locn
*, void *),
1826 struct data_area_list
*da
;
1827 dm_list_iterate_items(da
, &info
->das
) {
1828 if (!fun(&da
->disk_locn
, baton
))
1836 * The lifetime of the label returned is tied to the lifetime of the
1837 * lvmcache_info which is the same as lvmcache itself.
1839 struct label
*lvmcache_get_label(struct lvmcache_info
*info
) {
1843 void lvmcache_make_valid(struct lvmcache_info
*info
) {
1844 info
->status
&= ~CACHE_INVALID
;
1847 uint64_t lvmcache_device_size(struct lvmcache_info
*info
) {
1848 return info
->device_size
;
1851 void lvmcache_set_device_size(struct lvmcache_info
*info
, uint64_t size
) {
1852 info
->device_size
= size
;
1855 struct device
*lvmcache_device(struct lvmcache_info
*info
) {
1859 int lvmcache_is_orphan(struct lvmcache_info
*info
) {
1861 return 1; /* FIXME? */
1862 return is_orphan_vg(info
->vginfo
->vgname
);
1865 int lvmcache_vgid_is_cached(const char *vgid
) {
1866 struct lvmcache_vginfo
*vginfo
;
1868 if (lvmetad_active())
1871 vginfo
= lvmcache_vginfo_from_vgid(vgid
);
1873 if (!vginfo
|| !vginfo
->vgname
)
1876 if (is_orphan_vg(vginfo
->vgname
))
1883 * Return true iff it is impossible to find out from this info alone whether the
1884 * PV in question is or is not an orphan.
1886 int lvmcache_uncertain_ownership(struct lvmcache_info
*info
) {
1887 return mdas_empty_or_ignored(&info
->mdas
);
1890 int lvmcache_smallest_mda_size(struct lvmcache_info
*info
)
1892 return find_min_mda_size(&info
->mdas
);
1895 const struct format_type
*lvmcache_fmt(struct lvmcache_info
*info
) {