Sign Up
Log In
Log In
or
Sign Up
Places
All Projects
Status Monitor
Collapse sidebar
SUSE:SLE-15-SP4:Update
perf
perf-bench-fix-futex-bench-to-correct-usage-of-...
Overview
Repositories
Revisions
Requests
Users
Attributes
Meta
File perf-bench-fix-futex-bench-to-correct-usage-of-affinity-for-machines-with-cpus-1k.patch of Package perf
From: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Date: Wed, 6 Apr 2022 23:21:10 +0530 Subject: [PATCH] perf bench: Fix futex bench to correct usage of affinity for machines with #CPUs > 1K Git-commit: c9c2a427dd9fe0e73eceacafb42d54f3c4535693 References: bsc#1208178 Signed-off-by: Tony Jones <tonyj@suse.de> X-Info: IBM supplied backport. X-Info: No 6d18804b963b7 (perf cpumap: Give CPUs their own type). X-Info: Rework to use legacy "cpu->map" rather than perf_cpu_map The 'perf bench futex' testcase fails on systems with more than 1K CPUs. Testcase: perf bench futex all Failure snippet: <<>>Running futex/hash benchmark... perf: pthread_create: No such file or directory <<>> All the futex benchmarks (ie hash, lock-api, requeue, wake, wake-parallel), pthread_create is invoked in respective bench_futex_* function. Though the logs shows direct failure from pthread_create, strace logs showed that actual failure is from "sched_setaffinity" returning EINVAL (invalid argument). This happens because the default mask size in glibc is 1024. To overcome this 1024 CPUs mask size limitation of cpu_set_t, change the mask size using the CPU_*_S macros. Patch addresses this by fixing all the futex benchmarks to use CPU_ALLOC to allocate cpumask, CPU_ALLOC_SIZE for size, and CPU_SET_S to set the mask. Reported-by: Disha Goel <disgoel@linux.vnet.ibm.com> Reviewed-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Signed-off-by: Athira Jajeev <atrajeev@linux.vnet.ibm.com> Tested-by: Disha Goel <disgoel@linux.vnet.ibm.com> Acked-by: Ian Rogers <irogers@google.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Madhavan Srinivasan <maddy@linux.vnet.ibm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Nageswara R Sastry <rnsastry@linux.ibm.com> Cc: linuxppc-dev@lists.ozlabs.org Link: https://lore.kernel.org/r/20220406175113.87881-2-atrajeev@linux.vnet.ibm.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> --- tools/perf/bench/futex-hash.c | 26 +++++++++++++++++++------- tools/perf/bench/futex-lock-pi.c | 21 ++++++++++++++++----- tools/perf/bench/futex-requeue.c | 21 ++++++++++++++++----- tools/perf/bench/futex-wake-parallel.c | 21 ++++++++++++++++----- tools/perf/bench/futex-wake.c | 22 ++++++++++++++++------ 5 files changed, 83 insertions(+), 28 deletions(-) --- a/tools/perf/bench/futex-requeue.c 2023-04-05 11:44:17.650000000 +0530 +++ b/tools/perf/bench/futex-requeue.c 2023-04-05 11:45:05.020000000 +0530 @@ -86,22 +86,33 @@ static void *workerfn(void *arg __maybe_ static void block_threads(pthread_t *w, pthread_attr_t thread_attr, struct perf_cpu_map *cpu) { - cpu_set_t cpuset; + cpu_set_t *cpuset; unsigned int i; + int nrcpus = perf_cpu_map__nr(cpu); + size_t size; threads_starting = nthreads; + cpuset = CPU_ALLOC(nrcpus); + BUG_ON(!cpuset); + size = CPU_ALLOC_SIZE(nrcpus); + /* create and block all threads */ for (i = 0; i < nthreads; i++) { - CPU_ZERO(&cpuset); - CPU_SET(cpu->map[i % cpu->nr], &cpuset); + CPU_ZERO_S(size, cpuset); + CPU_SET_S(cpu->map[i % cpu->nr], size, cpuset); - if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset)) + if (pthread_attr_setaffinity_np(&thread_attr, size, cpuset)) { + CPU_FREE(cpuset); err(EXIT_FAILURE, "pthread_attr_setaffinity_np"); + } - if (pthread_create(&w[i], &thread_attr, workerfn, NULL)) + if (pthread_create(&w[i], &thread_attr, workerfn, NULL)) { + CPU_FREE(cpuset); err(EXIT_FAILURE, "pthread_create"); + } } + CPU_FREE(cpuset); } static void toggle_done(int sig __maybe_unused, --- a/tools/perf/bench/futex-lock-pi.c 2023-04-05 11:44:17.650000000 +0530 +++ b/tools/perf/bench/futex-lock-pi.c 2023-04-05 11:45:05.020000000 +0530 @@ -117,11 +117,17 @@ static void *workerfn(void *arg) static void create_threads(struct worker *w, pthread_attr_t thread_attr, struct perf_cpu_map *cpu) { - cpu_set_t cpuset; + cpu_set_t *cpuset; unsigned int i; + int nrcpus = perf_cpu_map__nr(cpu); + size_t size; threads_starting = nthreads; + cpuset = CPU_ALLOC(nrcpus); + BUG_ON(!cpuset); + size = CPU_ALLOC_SIZE(nrcpus); + for (i = 0; i < nthreads; i++) { worker[i].tid = i; @@ -132,15 +138,20 @@ static void create_threads(struct worker } else worker[i].futex = &global_futex; - CPU_ZERO(&cpuset); - CPU_SET(cpu->map[i % cpu->nr], &cpuset); + CPU_ZERO_S(size, cpuset); + CPU_SET_S(cpu->map[i % cpu->nr], size, cpuset); - if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset)) + if (pthread_attr_setaffinity_np(&thread_attr, size, cpuset)) { + CPU_FREE(cpuset); err(EXIT_FAILURE, "pthread_attr_setaffinity_np"); + } - if (pthread_create(&w[i].thread, &thread_attr, workerfn, &worker[i])) + if (pthread_create(&w[i].thread, &thread_attr, workerfn, &worker[i])) { + CPU_FREE(cpuset); err(EXIT_FAILURE, "pthread_create"); + } } + CPU_FREE(cpuset); } int bench_futex_lock_pi(int argc, const char **argv) --- a/tools/perf/bench/futex-hash.c 2023-04-05 11:44:17.650000000 +0530 +++ b/tools/perf/bench/futex-hash.c 2023-04-05 11:45:05.020000000 +0530 @@ -119,12 +119,14 @@ static void print_summary(void) int bench_futex_hash(int argc, const char **argv) { int ret = 0; - cpu_set_t cpuset; + cpu_set_t *cpuset; struct sigaction act; unsigned int i; pthread_attr_t thread_attr; struct worker *worker = NULL; struct perf_cpu_map *cpu; + int nrcpus; + size_t size; argc = parse_options(argc, argv, options, bench_futex_hash_usage, 0); if (argc) { @@ -162,25 +164,35 @@ int bench_futex_hash(int argc, const cha threads_starting = nthreads; pthread_attr_init(&thread_attr); gettimeofday(&bench__start, NULL); + + nrcpus = perf_cpu_map__nr(cpu); + cpuset = CPU_ALLOC(nrcpus); + BUG_ON(!cpuset); + size = CPU_ALLOC_SIZE(nrcpus); + for (i = 0; i < nthreads; i++) { worker[i].tid = i; worker[i].futex = calloc(nfutexes, sizeof(*worker[i].futex)); if (!worker[i].futex) goto errmem; - CPU_ZERO(&cpuset); - CPU_SET(cpu->map[i % cpu->nr], &cpuset); + CPU_ZERO_S(size, cpuset); - ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset); - if (ret) + CPU_SET_S(cpu->map[i % cpu->nr], size, cpuset); + ret = pthread_attr_setaffinity_np(&thread_attr, size, cpuset); + if (ret) { + CPU_FREE(cpuset); err(EXIT_FAILURE, "pthread_attr_setaffinity_np"); - + } ret = pthread_create(&worker[i].thread, &thread_attr, workerfn, (void *)(struct worker *) &worker[i]); - if (ret) + if (ret) { + CPU_FREE(cpuset); err(EXIT_FAILURE, "pthread_create"); + } } + CPU_FREE(cpuset); pthread_attr_destroy(&thread_attr); pthread_mutex_lock(&thread_lock); --- a/tools/perf/bench/futex-wake-parallel.c 2023-04-05 11:44:17.660000000 +0530 +++ b/tools/perf/bench/futex-wake-parallel.c 2023-04-05 11:45:05.020000000 +0530 @@ -140,22 +140,33 @@ static void *blocked_workerfn(void *arg static void block_threads(pthread_t *w, pthread_attr_t thread_attr, struct perf_cpu_map *cpu) { - cpu_set_t cpuset; + cpu_set_t *cpuset; unsigned int i; + int nrcpus = perf_cpu_map__nr(cpu); + size_t size; threads_starting = nblocked_threads; + cpuset = CPU_ALLOC(nrcpus); + BUG_ON(!cpuset); + size = CPU_ALLOC_SIZE(nrcpus); + /* create and block all threads */ for (i = 0; i < nblocked_threads; i++) { - CPU_ZERO(&cpuset); - CPU_SET(cpu->map[i % cpu->nr], &cpuset); + CPU_ZERO_S(size, cpuset); + CPU_SET_S(cpu->map[i % cpu->nr], size, cpuset); - if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset)) + if (pthread_attr_setaffinity_np(&thread_attr, size, cpuset)) { + CPU_FREE(cpuset); err(EXIT_FAILURE, "pthread_attr_setaffinity_np"); + } - if (pthread_create(&w[i], &thread_attr, blocked_workerfn, NULL)) + if (pthread_create(&w[i], &thread_attr, blocked_workerfn, NULL)) { + CPU_FREE(cpuset); err(EXIT_FAILURE, "pthread_create"); + } } + CPU_FREE(cpuset); } static void print_run(struct thread_data *waking_worker, unsigned int run_num) --- a/tools/perf/bench/futex-wake.c 2023-04-05 11:44:17.660000000 +0530 +++ b/tools/perf/bench/futex-wake.c 2023-04-05 11:45:05.020000000 +0530 @@ -92,22 +92,32 @@ static void print_summary(void) static void block_threads(pthread_t *w, pthread_attr_t thread_attr, struct perf_cpu_map *cpu) { - cpu_set_t cpuset; + cpu_set_t *cpuset; unsigned int i; - + size_t size; + int nrcpus = perf_cpu_map__nr(cpu); threads_starting = nthreads; + cpuset = CPU_ALLOC(nrcpus); + BUG_ON(!cpuset); + size = CPU_ALLOC_SIZE(nrcpus); + /* create and block all threads */ for (i = 0; i < nthreads; i++) { - CPU_ZERO(&cpuset); - CPU_SET(cpu->map[i % cpu->nr], &cpuset); + CPU_ZERO_S(size, cpuset); + CPU_SET_S(cpu->map[i % cpu->nr], size, cpuset); - if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset)) + if (pthread_attr_setaffinity_np(&thread_attr, size, cpuset)) { + CPU_FREE(cpuset); err(EXIT_FAILURE, "pthread_attr_setaffinity_np"); + } - if (pthread_create(&w[i], &thread_attr, workerfn, NULL)) + if (pthread_create(&w[i], &thread_attr, workerfn, NULL)) { + CPU_FREE(cpuset); err(EXIT_FAILURE, "pthread_create"); + } } + CPU_FREE(cpuset); } static void toggle_done(int sig __maybe_unused,
Locations
Projects
Search
Status Monitor
Help
OpenBuildService.org
Documentation
API Documentation
Code of Conduct
Contact
Support
@OBShq
Terms
openSUSE Build Service is sponsored by
The Open Build Service is an
openSUSE project
.
Sign Up
Log In
Places
Places
All Projects
Status Monitor