[PATCH v4 5/6] aarch64: Add sysv specific enabling code for memory tagging
Siddhesh Poyarekar
siddhesh@gotplt.org
Mon Dec 21 13:36:29 GMT 2020
On 12/19/20 12:59 AM, Richard Earnshaw via Libc-alpha wrote:
>
> Add various defines and stubs for enabling MTE on AArch64 sysv-like
> systems such as Linux. The HWCAP feature bit is copied over in the
> same way as other feature bits. Similarly we add a new wrapper header
> for mman.h to define the PROT_MTE flag that can be used with mmap and
> related functions.
>
> We add a new field to struct cpu_features that can be used, for
> example, to check whether or not certain ifunc'd routines should be
> bound to MTE-safe versions.
>
> Finally, if we detect that MTE should be enabled (ie via the glibc
> tunable); we enable MTE during startup as required.
> ---
> sysdeps/unix/sysv/linux/aarch64/bits/hwcap.h | 1 +
> sysdeps/unix/sysv/linux/aarch64/bits/mman.h | 1 +
> .../unix/sysv/linux/aarch64/cpu-features.c | 30 +++++++++++++++++++
> .../unix/sysv/linux/aarch64/cpu-features.h | 2 ++
> 4 files changed, 34 insertions(+)
Looks OK to me.
> diff --git a/sysdeps/unix/sysv/linux/aarch64/bits/hwcap.h b/sysdeps/unix/sysv/linux/aarch64/bits/hwcap.h
> index af90d8a626..389852f1d9 100644
> --- a/sysdeps/unix/sysv/linux/aarch64/bits/hwcap.h
> +++ b/sysdeps/unix/sysv/linux/aarch64/bits/hwcap.h
> @@ -73,3 +73,4 @@
> #define HWCAP2_DGH (1 << 15)
> #define HWCAP2_RNG (1 << 16)
> #define HWCAP2_BTI (1 << 17)
> +#define HWCAP2_MTE (1 << 18)
> diff --git a/sysdeps/unix/sysv/linux/aarch64/bits/mman.h b/sysdeps/unix/sysv/linux/aarch64/bits/mman.h
> index ecae046344..c5ec0aa7d0 100644
> --- a/sysdeps/unix/sysv/linux/aarch64/bits/mman.h
> +++ b/sysdeps/unix/sysv/linux/aarch64/bits/mman.h
> @@ -24,6 +24,7 @@
> arch/arm64/include/uapi/asm/mman.h. */
>
> #define PROT_BTI 0x10
> +#define PROT_MTE 0x20
>
> #include <bits/mman-map-flags-generic.h>
>
> diff --git a/sysdeps/unix/sysv/linux/aarch64/cpu-features.c b/sysdeps/unix/sysv/linux/aarch64/cpu-features.c
> index b9ab827aca..bd899c4b09 100644
> --- a/sysdeps/unix/sysv/linux/aarch64/cpu-features.c
> +++ b/sysdeps/unix/sysv/linux/aarch64/cpu-features.c
> @@ -19,10 +19,17 @@
> #include <cpu-features.h>
> #include <sys/auxv.h>
> #include <elf/dl-hwcaps.h>
> +#include <sys/prctl.h>
>
> #define DCZID_DZP_MASK (1 << 4)
> #define DCZID_BS_MASK (0xf)
>
> +/* The maximal set of permitted tags that the MTE random tag generation
> + instruction may use. We exclude tag 0 because a) we want to reserve
> + that for the libc heap structures and b) because it makes it easier
> + to see when pointer have been correctly tagged. */
> +#define MTE_ALLOWED_TAGS (0xfffe << PR_MTE_TAG_SHIFT)
A Nice(TM) looking variable to mask the beautiful hex that should stay
hidden ;)
> +
> #if HAVE_TUNABLES
> struct cpu_list
> {
> @@ -86,4 +93,27 @@ init_cpu_features (struct cpu_features *cpu_features)
>
> /* Check if BTI is supported. */
> cpu_features->bti = GLRO (dl_hwcap2) & HWCAP2_BTI;
> +
> + /* Setup memory tagging support if the HW and kernel support it, and if
> + the user has requested it. */
> + cpu_features->mte_state = 0;
> +
> +#ifdef USE_MTAG
> +# if HAVE_TUNABLES
> + int mte_state = TUNABLE_GET (glibc, mem, tagging, unsigned, 0);
> + cpu_features->mte_state = (GLRO (dl_hwcap2) & HWCAP2_MTE) ? mte_state : 0;
> + /* If we lack the MTE feature, disable the tunable, since it will
> + otherwise cause instructions that won't run on this CPU to be used. */
> + TUNABLE_SET (glibc, mem, tagging, unsigned, cpu_features->mte_state);
> +# endif
> +
> + if (cpu_features->mte_state & 2)
> + __prctl (PR_SET_TAGGED_ADDR_CTRL,
> + (PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_SYNC | MTE_ALLOWED_TAGS),
> + 0, 0, 0);
> + else if (cpu_features->mte_state)
> + __prctl (PR_SET_TAGGED_ADDR_CTRL,
> + (PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_ASYNC | MTE_ALLOWED_TAGS),
> + 0, 0, 0);
> +#endif
> }
> diff --git a/sysdeps/unix/sysv/linux/aarch64/cpu-features.h b/sysdeps/unix/sysv/linux/aarch64/cpu-features.h
> index 00a4d0c8e7..bebf321a21 100644
> --- a/sysdeps/unix/sysv/linux/aarch64/cpu-features.h
> +++ b/sysdeps/unix/sysv/linux/aarch64/cpu-features.h
> @@ -70,6 +70,8 @@ struct cpu_features
> uint64_t midr_el1;
> unsigned zva_size;
> bool bti;
> + /* Currently, the GLIBC memory tagging tunable only defines 8 bits. */
> + uint8_t mte_state;
> };
>
> #endif /* _CPU_FEATURES_AARCH64_H */
More information about the Libc-alpha
mailing list