Re: [PATCH v1 12/40] perf test: Roundtrip name, don't assume 1 event per name

From: Liang, Kan
Date: Thu Apr 27 2023 - 15:44:46 EST




On 2023-04-26 3:00 a.m., Ian Rogers wrote:
> Opening hardware names and a legacy cache event on a hybrid PMU opens
> it on each PMU. Parsing and checking indexes fails, as the parsed
> index is double the expected. Avoid checking the index by just
> comparing the names immediately after the parse.
>
> This change removes hard coded hybrid logic and removes assumptions
> about the expansion of an event. On hybrid the PMUs may or may not
> support an event and so using a distance isn't a consistent solution.
>
> Signed-off-by: Ian Rogers <irogers@xxxxxxxxxx>

Run the test on Cascade Lake and Alder Lake. It looks good.

Tested-by: Kan Liang <kan.liang@xxxxxxxxxxxxxxx>

Thanks,
Kan
> ---
> tools/perf/tests/evsel-roundtrip-name.c | 119 ++++++++++--------------
> 1 file changed, 49 insertions(+), 70 deletions(-)
>
> diff --git a/tools/perf/tests/evsel-roundtrip-name.c b/tools/perf/tests/evsel-roundtrip-name.c
> index e94fed901992..15ff86f9da0b 100644
> --- a/tools/perf/tests/evsel-roundtrip-name.c
> +++ b/tools/perf/tests/evsel-roundtrip-name.c
> @@ -4,114 +4,93 @@
> #include "parse-events.h"
> #include "tests.h"
> #include "debug.h"
> -#include "pmu.h"
> -#include "pmu-hybrid.h"
> -#include <errno.h>
> #include <linux/kernel.h>
>
> static int perf_evsel__roundtrip_cache_name_test(void)
> {
> - char name[128];
> - int type, op, err = 0, ret = 0, i, idx;
> - struct evsel *evsel;
> - struct evlist *evlist = evlist__new();
> + int ret = TEST_OK;
>
> - if (evlist == NULL)
> - return -ENOMEM;
> -
> - for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
> - for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
> + for (int type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
> + for (int op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
> /* skip invalid cache type */
> if (!evsel__is_cache_op_valid(type, op))
> continue;
>
> - for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
> - __evsel__hw_cache_type_op_res_name(type, op, i, name, sizeof(name));
> - err = parse_event(evlist, name);
> - if (err)
> - ret = err;
> - }
> - }
> - }
> -
> - idx = 0;
> - evsel = evlist__first(evlist);
> + for (int res = 0; res < PERF_COUNT_HW_CACHE_RESULT_MAX; res++) {
> + char name[128];
> + struct evlist *evlist = evlist__new();
> + struct evsel *evsel;
> + int err;
>
> - for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
> - for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
> - /* skip invalid cache type */
> - if (!evsel__is_cache_op_valid(type, op))
> - continue;
> + if (evlist == NULL) {
> + pr_debug("Failed to alloc evlist");
> + return TEST_FAIL;
> + }
> + __evsel__hw_cache_type_op_res_name(type, op, res,
> + name, sizeof(name));
>
> - for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
> - __evsel__hw_cache_type_op_res_name(type, op, i, name, sizeof(name));
> - if (evsel->core.idx != idx)
> + err = parse_event(evlist, name);
> + if (err) {
> + pr_debug("Failure to parse cache event '%s' possibly as PMUs don't support it",
> + name);
> + evlist__delete(evlist);
> continue;
> -
> - ++idx;
> -
> - if (strcmp(evsel__name(evsel), name)) {
> - pr_debug("%s != %s\n", evsel__name(evsel), name);
> - ret = -1;
> }
> -
> - evsel = evsel__next(evsel);
> + evlist__for_each_entry(evlist, evsel) {
> + if (strcmp(evsel__name(evsel), name)) {
> + pr_debug("%s != %s\n", evsel__name(evsel), name);
> + ret = TEST_FAIL;
> + }
> + }
> + evlist__delete(evlist);
> }
> }
> }
> -
> - evlist__delete(evlist);
> return ret;
> }
>
> -static int __perf_evsel__name_array_test(const char *const names[], int nr_names,
> - int distance)
> +static int perf_evsel__name_array_test(const char *const names[], int nr_names)
> {
> - int i, err;
> - struct evsel *evsel;
> - struct evlist *evlist = evlist__new();
> + int ret = TEST_OK;
>
> - if (evlist == NULL)
> - return -ENOMEM;
> + for (int i = 0; i < nr_names; ++i) {
> + struct evlist *evlist = evlist__new();
> + struct evsel *evsel;
> + int err;
>
> - for (i = 0; i < nr_names; ++i) {
> + if (evlist == NULL) {
> + pr_debug("Failed to alloc evlist");
> + return TEST_FAIL;
> + }
> err = parse_event(evlist, names[i]);
> if (err) {
> pr_debug("failed to parse event '%s', err %d\n",
> names[i], err);
> - goto out_delete_evlist;
> + evlist__delete(evlist);
> + ret = TEST_FAIL;
> + continue;
> }
> - }
> -
> - err = 0;
> - evlist__for_each_entry(evlist, evsel) {
> - if (strcmp(evsel__name(evsel), names[evsel->core.idx / distance])) {
> - --err;
> - pr_debug("%s != %s\n", evsel__name(evsel), names[evsel->core.idx / distance]);
> + evlist__for_each_entry(evlist, evsel) {
> + if (strcmp(evsel__name(evsel), names[i])) {
> + pr_debug("%s != %s\n", evsel__name(evsel), names[i]);
> + ret = TEST_FAIL;
> + }
> }
> + evlist__delete(evlist);
> }
> -
> -out_delete_evlist:
> - evlist__delete(evlist);
> - return err;
> + return ret;
> }
>
> -#define perf_evsel__name_array_test(names, distance) \
> - __perf_evsel__name_array_test(names, ARRAY_SIZE(names), distance)
> -
> static int test__perf_evsel__roundtrip_name_test(struct test_suite *test __maybe_unused,
> int subtest __maybe_unused)
> {
> - int err = 0, ret = 0;
> -
> - if (perf_pmu__has_hybrid() && perf_pmu__hybrid_mounted("cpu_atom"))
> - return perf_evsel__name_array_test(evsel__hw_names, 2);
> + int err = 0, ret = TEST_OK;
>
> - err = perf_evsel__name_array_test(evsel__hw_names, 1);
> + err = perf_evsel__name_array_test(evsel__hw_names, PERF_COUNT_HW_MAX);
> if (err)
> ret = err;
>
> - err = __perf_evsel__name_array_test(evsel__sw_names, PERF_COUNT_SW_DUMMY + 1, 1);
> + err = perf_evsel__name_array_test(evsel__sw_names, PERF_COUNT_SW_DUMMY + 1);
> if (err)
> ret = err;
>