summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2022-01-17 13:09:29 -0300
committerArnaldo Carvalho de Melo <acme@redhat.com>2022-01-18 09:24:58 -0300
commit49de179577e7b05b57f625bf05cdc60a72de38d0 (patch)
tree21404b5c8dc4ba66b120a50c0121b9477f45907f
parent1855b796f2f672cbb25400be2d3171c26fc869a3 (diff)
downloadlinux-49de179577e7b05b57f625bf05cdc60a72de38d0.tar.gz
linux-49de179577e7b05b57f625bf05cdc60a72de38d0.tar.xz
perf stat: No need to setup affinities when starting a workload
I.e. the simple: $ perf stat sleep 1 Uses a dummy CPU map and thus there is no need to setup/cleanup affinities to avoid IPIs, etc. With this we're down to a sched_getaffinity() call, in the libnuma initialization, that probably can be removed in a followup patch. Acked-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Ian Rogers <irogers@google.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Link: https://lore.kernel.org/r/20220117160931.1191712-3-acme@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-rw-r--r--tools/perf/builtin-stat.c17
1 files changed, 10 insertions, 7 deletions
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 973ade18b72a..934e992c966f 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -788,7 +788,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
const bool forks = (argc > 0);
bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false;
struct evlist_cpu_iterator evlist_cpu_itr;
- struct affinity affinity;
+ struct affinity saved_affinity, *affinity = NULL;
int err;
bool second_pass = false;
@@ -803,8 +803,11 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
if (group)
evlist__set_leader(evsel_list);
- if (affinity__setup(&affinity) < 0)
- return -1;
+ if (!cpu_map__is_dummy(evsel_list->core.cpus)) {
+ if (affinity__setup(&saved_affinity) < 0)
+ return -1;
+ affinity = &saved_affinity;
+ }
evlist__for_each_entry(evsel_list, counter) {
if (bpf_counter__load(counter, &target))
@@ -813,7 +816,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
all_counters_use_bpf = false;
}
- evlist__for_each_cpu(evlist_cpu_itr, evsel_list, &affinity) {
+ evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
counter = evlist_cpu_itr.evsel;
/*
@@ -869,7 +872,7 @@ try_again:
*/
/* First close errored or weak retry */
- evlist__for_each_cpu(evlist_cpu_itr, evsel_list, &affinity) {
+ evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
counter = evlist_cpu_itr.evsel;
if (!counter->reset_group && !counter->errored)
@@ -878,7 +881,7 @@ try_again:
perf_evsel__close_cpu(&counter->core, evlist_cpu_itr.cpu_map_idx);
}
/* Now reopen weak */
- evlist__for_each_cpu(evlist_cpu_itr, evsel_list, &affinity) {
+ evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
counter = evlist_cpu_itr.evsel;
if (!counter->reset_group && !counter->errored)
@@ -904,7 +907,7 @@ try_again_reset:
counter->supported = true;
}
}
- affinity__cleanup(&affinity);
+ affinity__cleanup(affinity);
evlist__for_each_entry(evsel_list, counter) {
if (!counter->supported) {