diff options
| author | Ingo Molnar <mingo@kernel.org> | 2024-03-12 09:49:52 +0100 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2024-03-12 09:55:57 +0100 |
| commit | 2e2bc42c8381d2c0e9604b59e49264821da29368 (patch) | |
| tree | c158510b5e7942b3a0d6eb6807cbeacf96035798 /tools/testing/selftests/resctrl/fill_buf.c | |
| parent | x86/sev: Move early startup code into .head.text section (diff) | |
| parent | Merge tag 'x86_tdx_for_6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/... (diff) | |
| download | linux-2e2bc42c8381d2c0e9604b59e49264821da29368.tar.gz linux-2e2bc42c8381d2c0e9604b59e49264821da29368.zip | |
Merge branch 'linus' into x86/boot, to resolve conflict
There's a new conflict with Linus's upstream tree, because
in the following merge conflict resolution in <asm/coco.h>:
38b334fc767e Merge tag 'x86_sev_for_v6.9_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Linus has resolved the conflicting placement of 'cc_mask' better
than the original commit:
1c811d403afd x86/sev: Fix position dependent variable references in startup code
... which was also done by an internal merge resolution:
2e5fc4786b7a Merge branch 'x86/sev' into x86/boot, to resolve conflicts and to pick up dependent tree
But Linus is right in 38b334fc767e, the 'cc_mask' declaration is sufficient
within the #ifdef CONFIG_ARCH_HAS_CC_PLATFORM block.
So instead of forcing Linus to do the same resolution again, merge in Linus's
tree and follow his conflict resolution.
Conflicts:
arch/x86/include/asm/coco.h
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'tools/testing/selftests/resctrl/fill_buf.c')
| -rw-r--r-- | tools/testing/selftests/resctrl/fill_buf.c | 132 |
1 files changed, 61 insertions, 71 deletions
diff --git a/tools/testing/selftests/resctrl/fill_buf.c b/tools/testing/selftests/resctrl/fill_buf.c index 0d425f26583a..ae120f1735c0 100644 --- a/tools/testing/selftests/resctrl/fill_buf.c +++ b/tools/testing/selftests/resctrl/fill_buf.c @@ -38,7 +38,7 @@ static void cl_flush(void *p) #endif } -static void mem_flush(unsigned char *buf, size_t buf_size) +void mem_flush(unsigned char *buf, size_t buf_size) { unsigned char *cp = buf; size_t i = 0; @@ -51,39 +51,38 @@ static void mem_flush(unsigned char *buf, size_t buf_size) sb(); } -static void *malloc_and_init_memory(size_t buf_size) -{ - void *p = NULL; - uint64_t *p64; - size_t s64; - int ret; - - ret = posix_memalign(&p, PAGE_SIZE, buf_size); - if (ret < 0) - return NULL; - - p64 = (uint64_t *)p; - s64 = buf_size / sizeof(uint64_t); - - while (s64 > 0) { - *p64 = (uint64_t)rand(); - p64 += (CL_SIZE / sizeof(uint64_t)); - s64 -= (CL_SIZE / sizeof(uint64_t)); - } - - return p; -} +/* + * Buffer index step advance to workaround HW prefetching interfering with + * the measurements. + * + * Must be a prime to step through all indexes of the buffer. + * + * Some primes work better than others on some architectures (from MBA/MBM + * result stability point of view). + */ +#define FILL_IDX_MULT 23 static int fill_one_span_read(unsigned char *buf, size_t buf_size) { - unsigned char *end_ptr = buf + buf_size; - unsigned char sum, *p; - - sum = 0; - p = buf; - while (p < end_ptr) { - sum += *p; - p += (CL_SIZE / 2); + unsigned int size = buf_size / (CL_SIZE / 2); + unsigned int i, idx = 0; + unsigned char sum = 0; + + /* + * Read the buffer in an order that is unexpected by HW prefetching + * optimizations to prevent them interfering with the caching pattern. + * + * The read order is (in terms of halves of cachelines): + * i * FILL_IDX_MULT % size + * The formula is open-coded below to avoiding modulo inside the loop + * as it improves MBA/MBM result stability on some architectures. + */ + for (i = 0; i < size; i++) { + sum += buf[idx * (CL_SIZE / 2)]; + + idx += FILL_IDX_MULT; + while (idx >= size) + idx -= size; } return sum; @@ -101,10 +100,9 @@ static void fill_one_span_write(unsigned char *buf, size_t buf_size) } } -static int fill_cache_read(unsigned char *buf, size_t buf_size, bool once) +void fill_cache_read(unsigned char *buf, size_t buf_size, bool once) { int ret = 0; - FILE *fp; while (1) { ret = fill_one_span_read(buf, buf_size); @@ -113,67 +111,59 @@ static int fill_cache_read(unsigned char *buf, size_t buf_size, bool once) } /* Consume read result so that reading memory is not optimized out. */ - fp = fopen("/dev/null", "w"); - if (!fp) { - perror("Unable to write to /dev/null"); - return -1; - } - fprintf(fp, "Sum: %d ", ret); - fclose(fp); - - return 0; + *value_sink = ret; } -static int fill_cache_write(unsigned char *buf, size_t buf_size, bool once) +static void fill_cache_write(unsigned char *buf, size_t buf_size, bool once) { while (1) { fill_one_span_write(buf, buf_size); if (once) break; } - - return 0; } -static int fill_cache(size_t buf_size, int memflush, int op, bool once) +unsigned char *alloc_buffer(size_t buf_size, int memflush) { - unsigned char *buf; + void *buf = NULL; + uint64_t *p64; + size_t s64; int ret; - buf = malloc_and_init_memory(buf_size); - if (!buf) - return -1; - - /* Flush the memory before using to avoid "cache hot pages" effect */ - if (memflush) - mem_flush(buf, buf_size); - - if (op == 0) - ret = fill_cache_read(buf, buf_size, once); - else - ret = fill_cache_write(buf, buf_size, once); + ret = posix_memalign(&buf, PAGE_SIZE, buf_size); + if (ret < 0) + return NULL; - free(buf); + /* Initialize the buffer */ + p64 = buf; + s64 = buf_size / sizeof(uint64_t); - if (ret) { - printf("\n Error in fill cache read/write...\n"); - return -1; + while (s64 > 0) { + *p64 = (uint64_t)rand(); + p64 += (CL_SIZE / sizeof(uint64_t)); + s64 -= (CL_SIZE / sizeof(uint64_t)); } + /* Flush the memory before using to avoid "cache hot pages" effect */ + if (memflush) + mem_flush(buf, buf_size); - return 0; + return buf; } -int run_fill_buf(size_t span, int memflush, int op, bool once) +int run_fill_buf(size_t buf_size, int memflush, int op, bool once) { - size_t cache_size = span; - int ret; + unsigned char *buf; - ret = fill_cache(cache_size, memflush, op, once); - if (ret) { - printf("\n Error in fill cache\n"); + buf = alloc_buffer(buf_size, memflush); + if (!buf) return -1; - } + + if (op == 0) + fill_cache_read(buf, buf_size, once); + else + fill_cache_write(buf, buf_size, once); + free(buf); return 0; } |
