aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/util/stat-display.c
diff options
context:
space:
mode:
authorYicong Yang <yangyicong@hisilicon.com>2024-02-08 10:40:26 +0800
committerNamhyung Kim <namhyung@kernel.org>2024-02-09 14:59:53 -0800
commitcbc917a1b03bce85f385c1e640c9dcb02ffb9ab0 (patch)
tree85bf911a310bfd4c9e3804d1959afc8441ac3a8f /tools/perf/util/stat-display.c
parentperf tools: Remove misleading comments on map functions (diff)
downloadlinux-cbc917a1b03bce85f385c1e640c9dcb02ffb9ab0.tar.gz
linux-cbc917a1b03bce85f385c1e640c9dcb02ffb9ab0.zip
perf stat: Support per-cluster aggregation
Some platforms have 'cluster' topology and CPUs in the cluster will share resources like L3 Cache Tag (for HiSilicon Kunpeng SoC) or L2 cache (for Intel Jacobsville). Currently parsing and building cluster topology have been supported since [1]. perf stat has already supported aggregation for other topologies like die or socket, etc. It'll be useful to aggregate per-cluster to find problems like L3T bandwidth contention. This patch add support for "--per-cluster" option for per-cluster aggregation. Also update the docs and related test. The output will be like: [root@localhost tmp]# perf stat -a -e LLC-load --per-cluster -- sleep 5 Performance counter stats for 'system wide': S56-D0-CLS158 4 1,321,521,570 LLC-load S56-D0-CLS594 4 794,211,453 LLC-load S56-D0-CLS1030 4 41,623 LLC-load S56-D0-CLS1466 4 41,646 LLC-load S56-D0-CLS1902 4 16,863 LLC-load S56-D0-CLS2338 4 15,721 LLC-load S56-D0-CLS2774 4 22,671 LLC-load [...] On a legacy system without cluster or cluster support, the output will be look like: [root@localhost perf]# perf stat -a -e cycles --per-cluster -- sleep 1 Performance counter stats for 'system wide': S56-D0-CLS0 64 18,011,485 cycles S7182-D0-CLS0 64 16,548,835 cycles Note that this patch doesn't mix the cluster information in the outputs of --per-core to avoid breaking any tools/scripts using it. Note that perf recently supports "--per-cache" aggregation, but it's not the same with the cluster although cluster CPUs may share some cache resources. For example on my machine all clusters within a die share the same L3 cache: $ cat /sys/devices/system/cpu/cpu0/cache/index3/shared_cpu_list 0-31 $ cat /sys/devices/system/cpu/cpu0/topology/cluster_cpus_list 0-3 [1] commit c5e22feffdd7 ("topology: Represent clusters of CPUs within a die") Tested-by: Jie Zhan <zhanjie9@hisilicon.com> Reviewed-by: Tim Chen <tim.c.chen@linux.intel.com> Reviewed-by: Ian Rogers <irogers@google.com> Signed-off-by: Yicong Yang <yangyicong@hisilicon.com> Cc: james.clark@arm.com Cc: 21cnbao@gmail.com Cc: prime.zeng@hisilicon.com Cc: Jonathan.Cameron@huawei.com Cc: fanghao11@huawei.com Cc: linuxarm@huawei.com Cc: tim.c.chen@intel.com Cc: linux-arm-kernel@lists.infradead.org Signed-off-by: Namhyung Kim <namhyung@kernel.org> Link: https://lore.kernel.org/r/20240208024026.2691-1-yangyicong@huawei.com
Diffstat (limited to 'tools/perf/util/stat-display.c')
-rw-r--r--tools/perf/util/stat-display.c13
1 files changed, 13 insertions, 0 deletions
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
index 8c61f8627ebc..4dfe7d9517a9 100644
--- a/tools/perf/util/stat-display.c
+++ b/tools/perf/util/stat-display.c
@@ -201,6 +201,9 @@ static void print_aggr_id_std(struct perf_stat_config *config,
snprintf(buf, sizeof(buf), "S%d-D%d-L%d-ID%d",
id.socket, id.die, id.cache_lvl, id.cache);
break;
+ case AGGR_CLUSTER:
+ snprintf(buf, sizeof(buf), "S%d-D%d-CLS%d", id.socket, id.die, id.cluster);
+ break;
case AGGR_DIE:
snprintf(buf, sizeof(buf), "S%d-D%d", id.socket, id.die);
break;
@@ -251,6 +254,10 @@ static void print_aggr_id_csv(struct perf_stat_config *config,
fprintf(config->output, "S%d-D%d-L%d-ID%d%s%d%s",
id.socket, id.die, id.cache_lvl, id.cache, sep, aggr_nr, sep);
break;
+ case AGGR_CLUSTER:
+ fprintf(config->output, "S%d-D%d-CLS%d%s%d%s",
+ id.socket, id.die, id.cluster, sep, aggr_nr, sep);
+ break;
case AGGR_DIE:
fprintf(output, "S%d-D%d%s%d%s",
id.socket, id.die, sep, aggr_nr, sep);
@@ -300,6 +307,10 @@ static void print_aggr_id_json(struct perf_stat_config *config,
fprintf(output, "\"cache\" : \"S%d-D%d-L%d-ID%d\", \"aggregate-number\" : %d, ",
id.socket, id.die, id.cache_lvl, id.cache, aggr_nr);
break;
+ case AGGR_CLUSTER:
+ fprintf(output, "\"cluster\" : \"S%d-D%d-CLS%d\", \"aggregate-number\" : %d, ",
+ id.socket, id.die, id.cluster, aggr_nr);
+ break;
case AGGR_DIE:
fprintf(output, "\"die\" : \"S%d-D%d\", \"aggregate-number\" : %d, ",
id.socket, id.die, aggr_nr);
@@ -1248,6 +1259,7 @@ static void print_header_interval_std(struct perf_stat_config *config,
case AGGR_NODE:
case AGGR_SOCKET:
case AGGR_DIE:
+ case AGGR_CLUSTER:
case AGGR_CACHE:
case AGGR_CORE:
fprintf(output, "#%*s %-*s cpus",
@@ -1550,6 +1562,7 @@ void evlist__print_counters(struct evlist *evlist, struct perf_stat_config *conf
switch (config->aggr_mode) {
case AGGR_CORE:
case AGGR_CACHE:
+ case AGGR_CLUSTER:
case AGGR_DIE:
case AGGR_SOCKET:
case AGGR_NODE: