From aef94fdd5e3987c3b031650f42613ab7b30e912d Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 18 Jan 2024 09:47:59 +0000 Subject: [PATCH] core-mmap: touch + check just start of each page rather than filling them stress-mmap just needs to touch and check the start of each page rather than completely filling them and checking every value. The main overhead is actually writing to recently mapped pages that cause a page fault on each new page being written to, so the win here is small but it still is worth making this optimization. On an Alderlake i9-12900 with 1 CPU this works out to be ~7.6% faster overall on a 10 second mmap stress test. Signed-off-by: Colin Ian King --- core-mmap.c | 49 ++++++++++++++++++++++++++++++++++++++++++++++++- core-mmap.h | 2 ++ stress-mmap.c | 8 ++++---- 3 files changed, 54 insertions(+), 5 deletions(-) diff --git a/core-mmap.c b/core-mmap.c index c379d5160..ae6429da2 100644 --- a/core-mmap.c +++ b/core-mmap.c @@ -31,7 +31,7 @@ * stress_mmap_set() * set mmap'd data, touching pages in * a specific pattern - check with - * mmap_check(). + * stress_mmap_check(). */ void OPTIMIZE3 stress_mmap_set( uint8_t *buf, @@ -154,3 +154,50 @@ int OPTIMIZE3 stress_mmap_check( } return 0; } + +/* + * stress_mmap_set_light() + * set mmap'd data, touching pages in + * a specific pattern at start of each page - check with + * stress_mmap_check_light(). + */ +void OPTIMIZE3 stress_mmap_set_light( + uint8_t *buf, + const size_t sz, + const size_t page_size) +{ + register uint64_t *ptr = (uint64_t *)buf; + register uint64_t val = stress_mwc64(); + register const uint64_t *end = (uint64_t *)(buf + sz); + register const size_t ptr_inc = page_size / sizeof(*ptr); + + while (LIKELY(ptr < end)) { + *ptr = val; + ptr += ptr_inc; + val++; + } +} + +/* + * stress_mmap_check_light() + * check if mmap'd data is sane + */ +int OPTIMIZE3 stress_mmap_check_light( + uint8_t *buf, + const size_t sz, + const size_t page_size) +{ + register uint64_t *ptr = (uint64_t *)buf; + register uint64_t val = *ptr; + register const uint64_t *end = (uint64_t *)(buf + sz); + register const size_t ptr_inc = page_size / sizeof(*ptr); + + while (LIKELY(ptr < end)) { + if (*ptr != val) + return -1; + ptr += ptr_inc; + val++; + } + return 0; +} + diff --git a/core-mmap.h b/core-mmap.h index c7cdca90f..f85eb0dcd 100644 --- a/core-mmap.h +++ b/core-mmap.h @@ -21,5 +21,7 @@ extern void stress_mmap_set(uint8_t *buf, const size_t sz, const size_t page_size); extern int stress_mmap_check( uint8_t *buf, const size_t sz, const size_t page_size); +extern void stress_mmap_set_light(uint8_t *buf, const size_t sz, const size_t page_size); +extern int stress_mmap_check_light( uint8_t *buf, const size_t sz, const size_t page_size); #endif diff --git a/stress-mmap.c b/stress-mmap.c index 0ebf0f629..57d6febfc 100644 --- a/stress-mmap.c +++ b/stress-mmap.c @@ -603,9 +603,9 @@ static int stress_mmap_child(stress_args_t *args, void *ctxt) } /* Ensure we can write to the mapped pages */ - stress_mmap_set(buf, sz, page_size); + stress_mmap_set_light(buf, sz, page_size); if (g_opt_flags & OPT_FLAGS_VERIFY) { - if (stress_mmap_check(buf, sz, page_size) < 0) + if (stress_mmap_check_light(buf, sz, page_size) < 0) pr_fail("%s: mmap'd region of %zu bytes does " "not contain expected data\n", args->name, sz); } @@ -700,8 +700,8 @@ static int stress_mmap_child(stress_args_t *args, void *ctxt) page_size, page_size, context->mmap_mprotect); mapped[page] = PAGE_MAPPED; /* Ensure we can write to the mapped page */ - stress_mmap_set(mappings[page], page_size, page_size); - if (stress_mmap_check(mappings[page], page_size, page_size) < 0) + stress_mmap_set_light(mappings[page], page_size, page_size); + if (stress_mmap_check_light(mappings[page], page_size, page_size) < 0) pr_fail("%s: mmap'd region of %zu bytes does " "not contain expected data\n", args->name, page_size); if (mmap_file) {