1 // SPDX-License-Identifier: GPL-2.0
2 #include <stdbool.h>
3 #include <assert.h>
4 #include <errno.h>
5 #include <stdlib.h>
6 #include <string.h>
7 #include "metricgroup.h"
8 #include "cpumap.h"
9 #include "cputopo.h"
10 #include "debug.h"
11 #include "evlist.h"
12 #include "expr.h"
13 #include <util/expr-bison.h>
14 #include <util/expr-flex.h>
15 #include "util/hashmap.h"
16 #include "util/header.h"
17 #include "util/pmu.h"
18 #include "smt.h"
19 #include "tsc.h"
20 #include <api/fs/fs.h>
21 #include <linux/err.h>
22 #include <linux/kernel.h>
23 #include <linux/zalloc.h>
24 #include <ctype.h>
25 #include <math.h>
26 #include "pmu.h"
27
28 struct expr_id_data {
29 union {
30 struct {
31 double val;
32 int source_count;
33 } val;
34 struct {
35 double val;
36 const char *metric_name;
37 const char *metric_expr;
38 } ref;
39 };
40
41 enum {
42 /* Holding a double value. */
43 EXPR_ID_DATA__VALUE,
44 /* Reference to another metric. */
45 EXPR_ID_DATA__REF,
46 /* A reference but the value has been computed. */
47 EXPR_ID_DATA__REF_VALUE,
48 } kind;
49 };
50
key_hash(long key,void * ctx __maybe_unused)51 static size_t key_hash(long key, void *ctx __maybe_unused)
52 {
53 const char *str = (const char *)key;
54 size_t hash = 0;
55
56 while (*str != '\0') {
57 hash *= 31;
58 hash += *str;
59 str++;
60 }
61 return hash;
62 }
63
key_equal(long key1,long key2,void * ctx __maybe_unused)64 static bool key_equal(long key1, long key2, void *ctx __maybe_unused)
65 {
66 return !strcmp((const char *)key1, (const char *)key2);
67 }
68
ids__new(void)69 struct hashmap *ids__new(void)
70 {
71 struct hashmap *hash;
72
73 hash = hashmap__new(key_hash, key_equal, NULL);
74 if (IS_ERR(hash))
75 return NULL;
76 return hash;
77 }
78
ids__free(struct hashmap * ids)79 void ids__free(struct hashmap *ids)
80 {
81 struct hashmap_entry *cur;
82 size_t bkt;
83
84 if (ids == NULL)
85 return;
86
87 hashmap__for_each_entry(ids, cur, bkt) {
88 zfree(&cur->pkey);
89 zfree(&cur->pvalue);
90 }
91
92 hashmap__free(ids);
93 }
94
ids__insert(struct hashmap * ids,const char * id)95 int ids__insert(struct hashmap *ids, const char *id)
96 {
97 struct expr_id_data *data_ptr = NULL, *old_data = NULL;
98 char *old_key = NULL;
99 int ret;
100
101 ret = hashmap__set(ids, id, data_ptr, &old_key, &old_data);
102 if (ret)
103 free(data_ptr);
104 free(old_key);
105 free(old_data);
106 return ret;
107 }
108
ids__union(struct hashmap * ids1,struct hashmap * ids2)109 struct hashmap *ids__union(struct hashmap *ids1, struct hashmap *ids2)
110 {
111 size_t bkt;
112 struct hashmap_entry *cur;
113 int ret;
114 struct expr_id_data *old_data = NULL;
115 char *old_key = NULL;
116
117 if (!ids1)
118 return ids2;
119
120 if (!ids2)
121 return ids1;
122
123 if (hashmap__size(ids1) < hashmap__size(ids2)) {
124 struct hashmap *tmp = ids1;
125
126 ids1 = ids2;
127 ids2 = tmp;
128 }
129 hashmap__for_each_entry(ids2, cur, bkt) {
130 ret = hashmap__set(ids1, cur->key, cur->value, &old_key, &old_data);
131 free(old_key);
132 free(old_data);
133
134 if (ret) {
135 hashmap__free(ids1);
136 hashmap__free(ids2);
137 return NULL;
138 }
139 }
140 hashmap__free(ids2);
141 return ids1;
142 }
143
144 /* Caller must make sure id is allocated */
expr__add_id(struct expr_parse_ctx * ctx,const char * id)145 int expr__add_id(struct expr_parse_ctx *ctx, const char *id)
146 {
147 return ids__insert(ctx->ids, id);
148 }
149
150 /* Caller must make sure id is allocated */
expr__add_id_val(struct expr_parse_ctx * ctx,const char * id,double val)151 int expr__add_id_val(struct expr_parse_ctx *ctx, const char *id, double val)
152 {
153 return expr__add_id_val_source_count(ctx, id, val, /*source_count=*/1);
154 }
155
156 /* Caller must make sure id is allocated */
expr__add_id_val_source_count(struct expr_parse_ctx * ctx,const char * id,double val,int source_count)157 int expr__add_id_val_source_count(struct expr_parse_ctx *ctx, const char *id,
158 double val, int source_count)
159 {
160 struct expr_id_data *data_ptr = NULL, *old_data = NULL;
161 char *old_key = NULL;
162 int ret;
163
164 data_ptr = malloc(sizeof(*data_ptr));
165 if (!data_ptr)
166 return -ENOMEM;
167 data_ptr->val.val = val;
168 data_ptr->val.source_count = source_count;
169 data_ptr->kind = EXPR_ID_DATA__VALUE;
170
171 ret = hashmap__set(ctx->ids, id, data_ptr, &old_key, &old_data);
172 if (ret)
173 free(data_ptr);
174 free(old_key);
175 free(old_data);
176 return ret;
177 }
178
expr__add_ref(struct expr_parse_ctx * ctx,struct metric_ref * ref)179 int expr__add_ref(struct expr_parse_ctx *ctx, struct metric_ref *ref)
180 {
181 struct expr_id_data *data_ptr = NULL, *old_data = NULL;
182 char *old_key = NULL;
183 char *name;
184 int ret;
185
186 data_ptr = zalloc(sizeof(*data_ptr));
187 if (!data_ptr)
188 return -ENOMEM;
189
190 name = strdup(ref->metric_name);
191 if (!name) {
192 free(data_ptr);
193 return -ENOMEM;
194 }
195
196 /*
197 * Intentionally passing just const char pointers,
198 * originally from 'struct pmu_event' object.
199 * We don't need to change them, so there's no
200 * need to create our own copy.
201 */
202 data_ptr->ref.metric_name = ref->metric_name;
203 data_ptr->ref.metric_expr = ref->metric_expr;
204 data_ptr->kind = EXPR_ID_DATA__REF;
205
206 ret = hashmap__set(ctx->ids, name, data_ptr, &old_key, &old_data);
207 if (ret)
208 free(data_ptr);
209
210 pr_debug2("adding ref metric %s: %s\n",
211 ref->metric_name, ref->metric_expr);
212
213 free(old_key);
214 free(old_data);
215 return ret;
216 }
217
expr__get_id(struct expr_parse_ctx * ctx,const char * id,struct expr_id_data ** data)218 int expr__get_id(struct expr_parse_ctx *ctx, const char *id,
219 struct expr_id_data **data)
220 {
221 return hashmap__find(ctx->ids, id, data) ? 0 : -1;
222 }
223
expr__subset_of_ids(struct expr_parse_ctx * haystack,struct expr_parse_ctx * needles)224 bool expr__subset_of_ids(struct expr_parse_ctx *haystack,
225 struct expr_parse_ctx *needles)
226 {
227 struct hashmap_entry *cur;
228 size_t bkt;
229 struct expr_id_data *data;
230
231 hashmap__for_each_entry(needles->ids, cur, bkt) {
232 if (expr__get_id(haystack, cur->pkey, &data))
233 return false;
234 }
235 return true;
236 }
237
238
expr__resolve_id(struct expr_parse_ctx * ctx,const char * id,struct expr_id_data ** datap)239 int expr__resolve_id(struct expr_parse_ctx *ctx, const char *id,
240 struct expr_id_data **datap)
241 {
242 struct expr_id_data *data;
243
244 if (expr__get_id(ctx, id, datap) || !*datap) {
245 pr_debug("%s not found\n", id);
246 return -1;
247 }
248
249 data = *datap;
250
251 switch (data->kind) {
252 case EXPR_ID_DATA__VALUE:
253 pr_debug2("lookup(%s): val %f\n", id, data->val.val);
254 break;
255 case EXPR_ID_DATA__REF:
256 pr_debug2("lookup(%s): ref metric name %s\n", id,
257 data->ref.metric_name);
258 pr_debug("processing metric: %s ENTRY\n", id);
259 data->kind = EXPR_ID_DATA__REF_VALUE;
260 if (expr__parse(&data->ref.val, ctx, data->ref.metric_expr)) {
261 pr_debug("%s failed to count\n", id);
262 return -1;
263 }
264 pr_debug("processing metric: %s EXIT: %f\n", id, data->ref.val);
265 break;
266 case EXPR_ID_DATA__REF_VALUE:
267 pr_debug2("lookup(%s): ref val %f metric name %s\n", id,
268 data->ref.val, data->ref.metric_name);
269 break;
270 default:
271 assert(0); /* Unreachable. */
272 }
273
274 return 0;
275 }
276
expr__del_id(struct expr_parse_ctx * ctx,const char * id)277 void expr__del_id(struct expr_parse_ctx *ctx, const char *id)
278 {
279 struct expr_id_data *old_val = NULL;
280 char *old_key = NULL;
281
282 hashmap__delete(ctx->ids, id, &old_key, &old_val);
283 free(old_key);
284 free(old_val);
285 }
286
expr__ctx_new(void)287 struct expr_parse_ctx *expr__ctx_new(void)
288 {
289 struct expr_parse_ctx *ctx;
290
291 ctx = calloc(1, sizeof(struct expr_parse_ctx));
292 if (!ctx)
293 return NULL;
294
295 ctx->ids = hashmap__new(key_hash, key_equal, NULL);
296 if (IS_ERR(ctx->ids)) {
297 free(ctx);
298 return NULL;
299 }
300
301 return ctx;
302 }
303
expr__ctx_clear(struct expr_parse_ctx * ctx)304 void expr__ctx_clear(struct expr_parse_ctx *ctx)
305 {
306 struct hashmap_entry *cur;
307 size_t bkt;
308
309 hashmap__for_each_entry(ctx->ids, cur, bkt) {
310 zfree(&cur->pkey);
311 zfree(&cur->pvalue);
312 }
313 hashmap__clear(ctx->ids);
314 }
315
expr__ctx_free(struct expr_parse_ctx * ctx)316 void expr__ctx_free(struct expr_parse_ctx *ctx)
317 {
318 struct hashmap_entry *cur;
319 size_t bkt;
320
321 if (!ctx)
322 return;
323
324 zfree(&ctx->sctx.user_requested_cpu_list);
325 hashmap__for_each_entry(ctx->ids, cur, bkt) {
326 zfree(&cur->pkey);
327 zfree(&cur->pvalue);
328 }
329 hashmap__free(ctx->ids);
330 free(ctx);
331 }
332
333 static int
__expr__parse(double * val,struct expr_parse_ctx * ctx,const char * expr,bool compute_ids)334 __expr__parse(double *val, struct expr_parse_ctx *ctx, const char *expr,
335 bool compute_ids)
336 {
337 YY_BUFFER_STATE buffer;
338 void *scanner;
339 int ret;
340
341 pr_debug2("parsing metric: %s\n", expr);
342
343 ret = expr_lex_init_extra(&ctx->sctx, &scanner);
344 if (ret)
345 return ret;
346
347 buffer = expr__scan_string(expr, scanner);
348
349 #ifdef PARSER_DEBUG
350 expr_debug = 1;
351 expr_set_debug(1, scanner);
352 #endif
353
354 ret = expr_parse(val, ctx, compute_ids, scanner);
355
356 expr__flush_buffer(buffer, scanner);
357 expr__delete_buffer(buffer, scanner);
358 expr_lex_destroy(scanner);
359 return ret;
360 }
361
expr__parse(double * final_val,struct expr_parse_ctx * ctx,const char * expr)362 int expr__parse(double *final_val, struct expr_parse_ctx *ctx,
363 const char *expr)
364 {
365 return __expr__parse(final_val, ctx, expr, /*compute_ids=*/false) ? -1 : 0;
366 }
367
expr__find_ids(const char * expr,const char * one,struct expr_parse_ctx * ctx)368 int expr__find_ids(const char *expr, const char *one,
369 struct expr_parse_ctx *ctx)
370 {
371 int ret = __expr__parse(NULL, ctx, expr, /*compute_ids=*/true);
372
373 if (one)
374 expr__del_id(ctx, one);
375
376 return ret;
377 }
378
expr_id_data__value(const struct expr_id_data * data)379 double expr_id_data__value(const struct expr_id_data *data)
380 {
381 if (data->kind == EXPR_ID_DATA__VALUE)
382 return data->val.val;
383 assert(data->kind == EXPR_ID_DATA__REF_VALUE);
384 return data->ref.val;
385 }
386
expr_id_data__source_count(const struct expr_id_data * data)387 double expr_id_data__source_count(const struct expr_id_data *data)
388 {
389 assert(data->kind == EXPR_ID_DATA__VALUE);
390 return data->val.source_count;
391 }
392
393 #if !defined(__i386__) && !defined(__x86_64__)
arch_get_tsc_freq(void)394 double arch_get_tsc_freq(void)
395 {
396 return 0.0;
397 }
398 #endif
399
has_pmem(void)400 static double has_pmem(void)
401 {
402 static bool has_pmem, cached;
403 const char *sysfs = sysfs__mountpoint();
404 char path[PATH_MAX];
405
406 if (!cached) {
407 snprintf(path, sizeof(path), "%s/firmware/acpi/tables/NFIT", sysfs);
408 has_pmem = access(path, F_OK) == 0;
409 cached = true;
410 }
411 return has_pmem ? 1.0 : 0.0;
412 }
413
expr__get_literal(const char * literal,const struct expr_scanner_ctx * ctx)414 double expr__get_literal(const char *literal, const struct expr_scanner_ctx *ctx)
415 {
416 const struct cpu_topology *topology;
417 double result = NAN;
418
419 if (!strcmp("#num_cpus", literal)) {
420 result = cpu__max_present_cpu().cpu;
421 goto out;
422 }
423 if (!strcmp("#num_cpus_online", literal)) {
424 struct perf_cpu_map *online = cpu_map__online();
425
426 if (online)
427 result = perf_cpu_map__nr(online);
428 goto out;
429 }
430
431 if (!strcasecmp("#system_tsc_freq", literal)) {
432 result = arch_get_tsc_freq();
433 goto out;
434 }
435
436 /*
437 * Assume that topology strings are consistent, such as CPUs "0-1"
438 * wouldn't be listed as "0,1", and so after deduplication the number of
439 * these strings gives an indication of the number of packages, dies,
440 * etc.
441 */
442 if (!strcasecmp("#smt_on", literal)) {
443 result = smt_on() ? 1.0 : 0.0;
444 goto out;
445 }
446 if (!strcmp("#core_wide", literal)) {
447 result = core_wide(ctx->system_wide, ctx->user_requested_cpu_list)
448 ? 1.0 : 0.0;
449 goto out;
450 }
451 if (!strcmp("#num_packages", literal)) {
452 topology = online_topology();
453 result = topology->package_cpus_lists;
454 goto out;
455 }
456 if (!strcmp("#num_dies", literal)) {
457 topology = online_topology();
458 result = topology->die_cpus_lists;
459 goto out;
460 }
461 if (!strcmp("#num_cores", literal)) {
462 topology = online_topology();
463 result = topology->core_cpus_lists;
464 goto out;
465 }
466 if (!strcmp("#slots", literal)) {
467 result = perf_pmu__cpu_slots_per_cycle();
468 goto out;
469 }
470 if (!strcmp("#has_pmem", literal)) {
471 result = has_pmem();
472 goto out;
473 }
474
475 pr_err("Unrecognized literal '%s'", literal);
476 out:
477 pr_debug2("literal: %s = %f\n", literal, result);
478 return result;
479 }
480
481 /* Does the event 'id' parse? Determine via ctx->ids if possible. */
expr__has_event(const struct expr_parse_ctx * ctx,bool compute_ids,const char * id)482 double expr__has_event(const struct expr_parse_ctx *ctx, bool compute_ids, const char *id)
483 {
484 struct evlist *tmp;
485 double ret;
486
487 if (hashmap__find(ctx->ids, id, /*value=*/NULL))
488 return 1.0;
489
490 if (!compute_ids)
491 return 0.0;
492
493 tmp = evlist__new();
494 if (!tmp)
495 return NAN;
496
497 if (strchr(id, '@')) {
498 char *tmp_id, *p;
499
500 tmp_id = strdup(id);
501 if (!tmp_id) {
502 ret = NAN;
503 goto out;
504 }
505 p = strchr(tmp_id, '@');
506 *p = '/';
507 p = strrchr(tmp_id, '@');
508 *p = '/';
509 ret = parse_event(tmp, tmp_id) ? 0 : 1;
510 free(tmp_id);
511 } else {
512 ret = parse_event(tmp, id) ? 0 : 1;
513 }
514 out:
515 evlist__delete(tmp);
516 return ret;
517 }
518
expr__strcmp_cpuid_str(const struct expr_parse_ctx * ctx __maybe_unused,bool compute_ids __maybe_unused,const char * test_id)519 double expr__strcmp_cpuid_str(const struct expr_parse_ctx *ctx __maybe_unused,
520 bool compute_ids __maybe_unused, const char *test_id)
521 {
522 double ret;
523 struct perf_pmu *pmu = perf_pmus__find_core_pmu();
524 char *cpuid = perf_pmu__getcpuid(pmu);
525
526 if (!cpuid)
527 return NAN;
528
529 ret = !strcmp_cpuid_str(test_id, cpuid);
530
531 free(cpuid);
532 return ret;
533 }
534