From 0e28cfff9dfdb71352151054e0d38816856182d5 Mon Sep 17 00:00:00 2001 From: Florian Weimer Date: Tue, 12 May 2020 19:01:49 +0200 Subject: [PATCH] support: Add support_blob_repeat_allocate_shared Reviewed-by: Carlos O'Donell --- support/blob_repeat.c | 31 ++++++++++++++++----- support/blob_repeat.h | 12 ++++++++- support/tst-support_blob_repeat.c | 45 ++++++++++++++++++++++--------- 3 files changed, 68 insertions(+), 20 deletions(-) diff --git a/support/blob_repeat.c b/support/blob_repeat.c index a7aa9bf4c7..cd6297e026 100644 --- a/support/blob_repeat.c +++ b/support/blob_repeat.c @@ -125,10 +125,11 @@ minimum_stride_size (size_t page_size, size_t element_size) } /* Allocations larger than maximum_small_size potentially use mmap - with alias mappings. */ + with alias mappings. If SHARED, the alias mappings are created + using MAP_SHARED instead of MAP_PRIVATE. */ static struct support_blob_repeat allocate_big (size_t total_size, const void *element, size_t element_size, - size_t count) + size_t count, bool shared) { unsigned long page_size = xsysconf (_SC_PAGESIZE); size_t stride_size = minimum_stride_size (page_size, element_size); @@ -213,7 +214,11 @@ allocate_big (size_t total_size, const void *element, size_t element_size, { size_t remaining_size = total_size; char *current = target; - int flags = MAP_FIXED | MAP_FILE | MAP_PRIVATE; + int flags = MAP_FIXED | MAP_FILE; + if (shared) + flags |= MAP_SHARED; + else + flags |= MAP_PRIVATE; #ifdef MAP_NORESERVE flags |= MAP_NORESERVE; #endif @@ -251,8 +256,8 @@ allocate_big (size_t total_size, const void *element, size_t element_size, } struct support_blob_repeat -support_blob_repeat_allocate (const void *element, size_t element_size, - size_t count) +repeat_allocate (const void *element, size_t element_size, + size_t count, bool shared) { size_t total_size; if (__builtin_mul_overflow (element_size, count, &total_size)) @@ -263,7 +268,21 @@ support_blob_repeat_allocate (const void *element, size_t element_size, if (total_size <= maximum_small_size) return allocate_malloc (total_size, element, element_size, count); else - return allocate_big (total_size, element, element_size, count); + return allocate_big (total_size, element, element_size, count, shared); +} + +struct support_blob_repeat +support_blob_repeat_allocate (const void *element, size_t element_size, + size_t count) +{ + return repeat_allocate (element, element_size, count, false); +} + +struct support_blob_repeat +support_blob_repeat_allocate_shared (const void *element, size_t element_size, + size_t count) +{ + return repeat_allocate (element, element_size, count, true); } void diff --git a/support/blob_repeat.h b/support/blob_repeat.h index 12f33bcd02..519458cf50 100644 --- a/support/blob_repeat.h +++ b/support/blob_repeat.h @@ -38,7 +38,17 @@ struct support_blob_repeat support_blob_repeat_allocate (const void *element, size_t element_size, size_t count); -/* Deallocate the blob created by support_blob_repeat_allocate. */ +/* Like support_blob_repeat_allocate, except that copy-on-write + semantics are disabled. This means writing to one part of the blob + can affect other parts. It is possible to map non-shared memory + over parts of the resulting blob using MAP_ANONYMOUS | MAP_FIXED + | MAP_PRIVATE, so that writes to these parts do not affect + others. */ +struct support_blob_repeat support_blob_repeat_allocate_shared + (const void *element, size_t element_size, size_t count); + +/* Deallocate the blob created by support_blob_repeat_allocate or + support_blob_repeat_allocate_shared. */ void support_blob_repeat_free (struct support_blob_repeat *); #endif /* SUPPORT_BLOB_REPEAT_H */ diff --git a/support/tst-support_blob_repeat.c b/support/tst-support_blob_repeat.c index a0eb9d2b89..b61d6b249a 100644 --- a/support/tst-support_blob_repeat.c +++ b/support/tst-support_blob_repeat.c @@ -17,6 +17,7 @@ . */ #include +#include #include #include @@ -63,21 +64,39 @@ do_test (void) } support_blob_repeat_free (&repeat); - repeat = support_blob_repeat_allocate ("012345678", 9, 10 * 1000 * 1000); - if (repeat.start == NULL) - puts ("warning: not enough memory for large mapping"); - else + for (int do_shared = 0; do_shared < 2; ++do_shared) { - unsigned char *p = repeat.start; - for (int i = 0; i < 10 * 1000 * 1000; ++i) - for (int j = 0; j <= 8; ++j) - if (p[i * 9 + j] != '0' + j) - { - printf ("error: element %d index %d\n", i, j); - TEST_COMPARE (p[i * 9 + j], '0' + j); - } + if (do_shared) + repeat = support_blob_repeat_allocate_shared ("012345678", 9, + 10 * 1000 * 1000); + else + repeat = support_blob_repeat_allocate ("012345678", 9, + 10 * 1000 * 1000); + if (repeat.start == NULL) + puts ("warning: not enough memory for large mapping"); + else + { + unsigned char *p = repeat.start; + for (int i = 0; i < 10 * 1000 * 1000; ++i) + for (int j = 0; j <= 8; ++j) + if (p[i * 9 + j] != '0' + j) + { + printf ("error: element %d index %d\n", i, j); + TEST_COMPARE (p[i * 9 + j], '0' + j); + } + + enum { total_size = 9 * 10 * 1000 * 1000 }; + p[total_size - 1] = '\0'; + asm ("" ::: "memory"); + if (do_shared) + /* The write is repeated in multiple places earlier in the + string due to page sharing. */ + TEST_VERIFY (strlen (repeat.start) < total_size - 1); + else + TEST_COMPARE (strlen (repeat.start), total_size - 1); + } + support_blob_repeat_free (&repeat); } - support_blob_repeat_free (&repeat); return 0; } -- 2.43.5