1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/err.h>
3 #include <linux/zalloc.h>
4 #include <errno.h>
5 #include <sys/types.h>
6 #include <sys/stat.h>
7 #include <fcntl.h>
8 #include <sys/param.h>
9 #include "evlist.h"
10 #include "evsel.h"
11 #include "parse-events.h"
12 #include "parse-events-hybrid.h"
13 #include "debug.h"
14 #include "pmu.h"
15 #include "pmu-hybrid.h"
16 #include "perf.h"
17
config_hybrid_attr(struct perf_event_attr * attr,int type,int pmu_type)18 static void config_hybrid_attr(struct perf_event_attr *attr,
19 int type, int pmu_type)
20 {
21 /*
22 * attr.config layout for type PERF_TYPE_HARDWARE and
23 * PERF_TYPE_HW_CACHE
24 *
25 * PERF_TYPE_HARDWARE: 0xEEEEEEEE000000AA
26 * AA: hardware event ID
27 * EEEEEEEE: PMU type ID
28 * PERF_TYPE_HW_CACHE: 0xEEEEEEEE00DDCCBB
29 * BB: hardware cache ID
30 * CC: hardware cache op ID
31 * DD: hardware cache op result ID
32 * EEEEEEEE: PMU type ID
33 * If the PMU type ID is 0, the PERF_TYPE_RAW will be applied.
34 */
35 attr->type = type;
36 attr->config = attr->config | ((__u64)pmu_type << PERF_PMU_TYPE_SHIFT);
37 }
38
create_event_hybrid(__u32 config_type,int * idx,struct list_head * list,struct perf_event_attr * attr,char * name,struct list_head * config_terms,struct perf_pmu * pmu)39 static int create_event_hybrid(__u32 config_type, int *idx,
40 struct list_head *list,
41 struct perf_event_attr *attr, char *name,
42 struct list_head *config_terms,
43 struct perf_pmu *pmu)
44 {
45 struct evsel *evsel;
46 __u32 type = attr->type;
47 __u64 config = attr->config;
48
49 config_hybrid_attr(attr, config_type, pmu->type);
50 evsel = parse_events__add_event_hybrid(list, idx, attr, name,
51 pmu, config_terms);
52 if (evsel)
53 evsel->pmu_name = strdup(pmu->name);
54 else
55 return -ENOMEM;
56
57 attr->type = type;
58 attr->config = config;
59 return 0;
60 }
61
pmu_cmp(struct parse_events_state * parse_state,struct perf_pmu * pmu)62 static int pmu_cmp(struct parse_events_state *parse_state,
63 struct perf_pmu *pmu)
64 {
65 if (!parse_state->hybrid_pmu_name)
66 return 0;
67
68 return strcmp(parse_state->hybrid_pmu_name, pmu->name);
69 }
70
add_hw_hybrid(struct parse_events_state * parse_state,struct list_head * list,struct perf_event_attr * attr,char * name,struct list_head * config_terms)71 static int add_hw_hybrid(struct parse_events_state *parse_state,
72 struct list_head *list, struct perf_event_attr *attr,
73 char *name, struct list_head *config_terms)
74 {
75 struct perf_pmu *pmu;
76 int ret;
77
78 perf_pmu__for_each_hybrid_pmu(pmu) {
79 LIST_HEAD(terms);
80
81 if (pmu_cmp(parse_state, pmu))
82 continue;
83
84 copy_config_terms(&terms, config_terms);
85 ret = create_event_hybrid(PERF_TYPE_HARDWARE,
86 &parse_state->idx, list, attr, name,
87 &terms, pmu);
88 free_config_terms(&terms);
89 if (ret)
90 return ret;
91 }
92
93 return 0;
94 }
95
create_raw_event_hybrid(int * idx,struct list_head * list,struct perf_event_attr * attr,char * name,struct list_head * config_terms,struct perf_pmu * pmu)96 static int create_raw_event_hybrid(int *idx, struct list_head *list,
97 struct perf_event_attr *attr, char *name,
98 struct list_head *config_terms,
99 struct perf_pmu *pmu)
100 {
101 struct evsel *evsel;
102
103 attr->type = pmu->type;
104 evsel = parse_events__add_event_hybrid(list, idx, attr, name,
105 pmu, config_terms);
106 if (evsel)
107 evsel->pmu_name = strdup(pmu->name);
108 else
109 return -ENOMEM;
110
111 return 0;
112 }
113
add_raw_hybrid(struct parse_events_state * parse_state,struct list_head * list,struct perf_event_attr * attr,char * name,struct list_head * config_terms)114 static int add_raw_hybrid(struct parse_events_state *parse_state,
115 struct list_head *list, struct perf_event_attr *attr,
116 char *name, struct list_head *config_terms)
117 {
118 struct perf_pmu *pmu;
119 int ret;
120
121 perf_pmu__for_each_hybrid_pmu(pmu) {
122 LIST_HEAD(terms);
123
124 if (pmu_cmp(parse_state, pmu))
125 continue;
126
127 copy_config_terms(&terms, config_terms);
128 ret = create_raw_event_hybrid(&parse_state->idx, list, attr,
129 name, &terms, pmu);
130 free_config_terms(&terms);
131 if (ret)
132 return ret;
133 }
134
135 return 0;
136 }
137
parse_events__add_numeric_hybrid(struct parse_events_state * parse_state,struct list_head * list,struct perf_event_attr * attr,char * name,struct list_head * config_terms,bool * hybrid)138 int parse_events__add_numeric_hybrid(struct parse_events_state *parse_state,
139 struct list_head *list,
140 struct perf_event_attr *attr,
141 char *name, struct list_head *config_terms,
142 bool *hybrid)
143 {
144 *hybrid = false;
145 if (attr->type == PERF_TYPE_SOFTWARE)
146 return 0;
147
148 if (!perf_pmu__has_hybrid())
149 return 0;
150
151 *hybrid = true;
152 if (attr->type != PERF_TYPE_RAW) {
153 return add_hw_hybrid(parse_state, list, attr, name,
154 config_terms);
155 }
156
157 return add_raw_hybrid(parse_state, list, attr, name,
158 config_terms);
159 }
160
parse_events__add_cache_hybrid(struct list_head * list,int * idx,struct perf_event_attr * attr,char * name,struct list_head * config_terms,bool * hybrid,struct parse_events_state * parse_state)161 int parse_events__add_cache_hybrid(struct list_head *list, int *idx,
162 struct perf_event_attr *attr, char *name,
163 struct list_head *config_terms,
164 bool *hybrid,
165 struct parse_events_state *parse_state)
166 {
167 struct perf_pmu *pmu;
168 int ret;
169
170 *hybrid = false;
171 if (!perf_pmu__has_hybrid())
172 return 0;
173
174 *hybrid = true;
175 perf_pmu__for_each_hybrid_pmu(pmu) {
176 LIST_HEAD(terms);
177
178 if (pmu_cmp(parse_state, pmu))
179 continue;
180
181 copy_config_terms(&terms, config_terms);
182 ret = create_event_hybrid(PERF_TYPE_HW_CACHE, idx, list,
183 attr, name, &terms, pmu);
184 free_config_terms(&terms);
185 if (ret)
186 return ret;
187 }
188
189 return 0;
190 }
191