1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5 */
6
7 #include <linux/pid_namespace.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/sysfs.h>
10 #include "coresight-etm4x.h"
11 #include "coresight-priv.h"
12
etm4_set_mode_exclude(struct etmv4_drvdata * drvdata,bool exclude)13 static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
14 {
15 u8 idx;
16 struct etmv4_config *config = &drvdata->config;
17
18 idx = config->addr_idx;
19
20 /*
21 * TRCACATRn.TYPE bit[1:0]: type of comparison
22 * the trace unit performs
23 */
24 if (BMVAL(config->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
25 if (idx % 2 != 0)
26 return -EINVAL;
27
28 /*
29 * We are performing instruction address comparison. Set the
30 * relevant bit of ViewInst Include/Exclude Control register
31 * for corresponding address comparator pair.
32 */
33 if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
34 config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
35 return -EINVAL;
36
37 if (exclude == true) {
38 /*
39 * Set exclude bit and unset the include bit
40 * corresponding to comparator pair
41 */
42 config->viiectlr |= BIT(idx / 2 + 16);
43 config->viiectlr &= ~BIT(idx / 2);
44 } else {
45 /*
46 * Set include bit and unset exclude bit
47 * corresponding to comparator pair
48 */
49 config->viiectlr |= BIT(idx / 2);
50 config->viiectlr &= ~BIT(idx / 2 + 16);
51 }
52 }
53 return 0;
54 }
55
nr_pe_cmp_show(struct device * dev,struct device_attribute * attr,char * buf)56 static ssize_t nr_pe_cmp_show(struct device *dev,
57 struct device_attribute *attr,
58 char *buf)
59 {
60 unsigned long val;
61 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
62
63 val = drvdata->nr_pe_cmp;
64 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
65 }
66 static DEVICE_ATTR_RO(nr_pe_cmp);
67
nr_addr_cmp_show(struct device * dev,struct device_attribute * attr,char * buf)68 static ssize_t nr_addr_cmp_show(struct device *dev,
69 struct device_attribute *attr,
70 char *buf)
71 {
72 unsigned long val;
73 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
74
75 val = drvdata->nr_addr_cmp;
76 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
77 }
78 static DEVICE_ATTR_RO(nr_addr_cmp);
79
nr_cntr_show(struct device * dev,struct device_attribute * attr,char * buf)80 static ssize_t nr_cntr_show(struct device *dev,
81 struct device_attribute *attr,
82 char *buf)
83 {
84 unsigned long val;
85 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
86
87 val = drvdata->nr_cntr;
88 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
89 }
90 static DEVICE_ATTR_RO(nr_cntr);
91
nr_ext_inp_show(struct device * dev,struct device_attribute * attr,char * buf)92 static ssize_t nr_ext_inp_show(struct device *dev,
93 struct device_attribute *attr,
94 char *buf)
95 {
96 unsigned long val;
97 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
98
99 val = drvdata->nr_ext_inp;
100 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
101 }
102 static DEVICE_ATTR_RO(nr_ext_inp);
103
numcidc_show(struct device * dev,struct device_attribute * attr,char * buf)104 static ssize_t numcidc_show(struct device *dev,
105 struct device_attribute *attr,
106 char *buf)
107 {
108 unsigned long val;
109 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
110
111 val = drvdata->numcidc;
112 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
113 }
114 static DEVICE_ATTR_RO(numcidc);
115
numvmidc_show(struct device * dev,struct device_attribute * attr,char * buf)116 static ssize_t numvmidc_show(struct device *dev,
117 struct device_attribute *attr,
118 char *buf)
119 {
120 unsigned long val;
121 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
122
123 val = drvdata->numvmidc;
124 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
125 }
126 static DEVICE_ATTR_RO(numvmidc);
127
nrseqstate_show(struct device * dev,struct device_attribute * attr,char * buf)128 static ssize_t nrseqstate_show(struct device *dev,
129 struct device_attribute *attr,
130 char *buf)
131 {
132 unsigned long val;
133 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
134
135 val = drvdata->nrseqstate;
136 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
137 }
138 static DEVICE_ATTR_RO(nrseqstate);
139
nr_resource_show(struct device * dev,struct device_attribute * attr,char * buf)140 static ssize_t nr_resource_show(struct device *dev,
141 struct device_attribute *attr,
142 char *buf)
143 {
144 unsigned long val;
145 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
146
147 val = drvdata->nr_resource;
148 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
149 }
150 static DEVICE_ATTR_RO(nr_resource);
151
nr_ss_cmp_show(struct device * dev,struct device_attribute * attr,char * buf)152 static ssize_t nr_ss_cmp_show(struct device *dev,
153 struct device_attribute *attr,
154 char *buf)
155 {
156 unsigned long val;
157 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
158
159 val = drvdata->nr_ss_cmp;
160 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
161 }
162 static DEVICE_ATTR_RO(nr_ss_cmp);
163
reset_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)164 static ssize_t reset_store(struct device *dev,
165 struct device_attribute *attr,
166 const char *buf, size_t size)
167 {
168 int i;
169 unsigned long val;
170 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
171 struct etmv4_config *config = &drvdata->config;
172
173 if (kstrtoul(buf, 16, &val))
174 return -EINVAL;
175
176 spin_lock(&drvdata->spinlock);
177 if (val)
178 config->mode = 0x0;
179
180 /* Disable data tracing: do not trace load and store data transfers */
181 config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
182 config->cfg &= ~(BIT(1) | BIT(2));
183
184 /* Disable data value and data address tracing */
185 config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
186 ETM_MODE_DATA_TRACE_VAL);
187 config->cfg &= ~(BIT(16) | BIT(17));
188
189 /* Disable all events tracing */
190 config->eventctrl0 = 0x0;
191 config->eventctrl1 = 0x0;
192
193 /* Disable timestamp event */
194 config->ts_ctrl = 0x0;
195
196 /* Disable stalling */
197 config->stall_ctrl = 0x0;
198
199 /* Reset trace synchronization period to 2^8 = 256 bytes*/
200 if (drvdata->syncpr == false)
201 config->syncfreq = 0x8;
202
203 /*
204 * Enable ViewInst to trace everything with start-stop logic in
205 * started state. ARM recommends start-stop logic is set before
206 * each trace run.
207 */
208 config->vinst_ctrl = BIT(0);
209 if (drvdata->nr_addr_cmp > 0) {
210 config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
211 /* SSSTATUS, bit[9] */
212 config->vinst_ctrl |= BIT(9);
213 }
214
215 /* No address range filtering for ViewInst */
216 config->viiectlr = 0x0;
217
218 /* No start-stop filtering for ViewInst */
219 config->vissctlr = 0x0;
220 config->vipcssctlr = 0x0;
221
222 /* Disable seq events */
223 for (i = 0; i < drvdata->nrseqstate-1; i++)
224 config->seq_ctrl[i] = 0x0;
225 config->seq_rst = 0x0;
226 config->seq_state = 0x0;
227
228 /* Disable external input events */
229 config->ext_inp = 0x0;
230
231 config->cntr_idx = 0x0;
232 for (i = 0; i < drvdata->nr_cntr; i++) {
233 config->cntrldvr[i] = 0x0;
234 config->cntr_ctrl[i] = 0x0;
235 config->cntr_val[i] = 0x0;
236 }
237
238 config->res_idx = 0x0;
239 for (i = 2; i < 2 * drvdata->nr_resource; i++)
240 config->res_ctrl[i] = 0x0;
241
242 config->ss_idx = 0x0;
243 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
244 config->ss_ctrl[i] = 0x0;
245 config->ss_pe_cmp[i] = 0x0;
246 }
247
248 config->addr_idx = 0x0;
249 for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
250 config->addr_val[i] = 0x0;
251 config->addr_acc[i] = 0x0;
252 config->addr_type[i] = ETM_ADDR_TYPE_NONE;
253 }
254
255 config->ctxid_idx = 0x0;
256 for (i = 0; i < drvdata->numcidc; i++)
257 config->ctxid_pid[i] = 0x0;
258
259 config->ctxid_mask0 = 0x0;
260 config->ctxid_mask1 = 0x0;
261
262 config->vmid_idx = 0x0;
263 for (i = 0; i < drvdata->numvmidc; i++)
264 config->vmid_val[i] = 0x0;
265 config->vmid_mask0 = 0x0;
266 config->vmid_mask1 = 0x0;
267
268 drvdata->trcid = drvdata->cpu + 1;
269
270 spin_unlock(&drvdata->spinlock);
271
272 return size;
273 }
274 static DEVICE_ATTR_WO(reset);
275
mode_show(struct device * dev,struct device_attribute * attr,char * buf)276 static ssize_t mode_show(struct device *dev,
277 struct device_attribute *attr,
278 char *buf)
279 {
280 unsigned long val;
281 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
282 struct etmv4_config *config = &drvdata->config;
283
284 val = config->mode;
285 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
286 }
287
mode_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)288 static ssize_t mode_store(struct device *dev,
289 struct device_attribute *attr,
290 const char *buf, size_t size)
291 {
292 unsigned long val, mode;
293 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
294 struct etmv4_config *config = &drvdata->config;
295
296 if (kstrtoul(buf, 16, &val))
297 return -EINVAL;
298
299 spin_lock(&drvdata->spinlock);
300 config->mode = val & ETMv4_MODE_ALL;
301
302 if (drvdata->instrp0 == true) {
303 /* start by clearing instruction P0 field */
304 config->cfg &= ~(BIT(1) | BIT(2));
305 if (config->mode & ETM_MODE_LOAD)
306 /* 0b01 Trace load instructions as P0 instructions */
307 config->cfg |= BIT(1);
308 if (config->mode & ETM_MODE_STORE)
309 /* 0b10 Trace store instructions as P0 instructions */
310 config->cfg |= BIT(2);
311 if (config->mode & ETM_MODE_LOAD_STORE)
312 /*
313 * 0b11 Trace load and store instructions
314 * as P0 instructions
315 */
316 config->cfg |= BIT(1) | BIT(2);
317 }
318
319 /* bit[3], Branch broadcast mode */
320 if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
321 config->cfg |= BIT(3);
322 else
323 config->cfg &= ~BIT(3);
324
325 /* bit[4], Cycle counting instruction trace bit */
326 if ((config->mode & ETMv4_MODE_CYCACC) &&
327 (drvdata->trccci == true))
328 config->cfg |= BIT(4);
329 else
330 config->cfg &= ~BIT(4);
331
332 /* bit[6], Context ID tracing bit */
333 if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
334 config->cfg |= BIT(6);
335 else
336 config->cfg &= ~BIT(6);
337
338 if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
339 config->cfg |= BIT(7);
340 else
341 config->cfg &= ~BIT(7);
342
343 /* bits[10:8], Conditional instruction tracing bit */
344 mode = ETM_MODE_COND(config->mode);
345 if (drvdata->trccond == true) {
346 config->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
347 config->cfg |= mode << 8;
348 }
349
350 /* bit[11], Global timestamp tracing bit */
351 if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
352 config->cfg |= BIT(11);
353 else
354 config->cfg &= ~BIT(11);
355
356 /* bit[12], Return stack enable bit */
357 if ((config->mode & ETM_MODE_RETURNSTACK) &&
358 (drvdata->retstack == true))
359 config->cfg |= BIT(12);
360 else
361 config->cfg &= ~BIT(12);
362
363 /* bits[14:13], Q element enable field */
364 mode = ETM_MODE_QELEM(config->mode);
365 /* start by clearing QE bits */
366 config->cfg &= ~(BIT(13) | BIT(14));
367 /*
368 * if supported, Q elements with instruction counts are enabled.
369 * Always set the low bit for any requested mode. Valid combos are
370 * 0b00, 0b01 and 0b11.
371 */
372 if (mode && drvdata->q_support)
373 config->cfg |= BIT(13);
374 /*
375 * if supported, Q elements with and without instruction
376 * counts are enabled
377 */
378 if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
379 config->cfg |= BIT(14);
380
381 /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
382 if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
383 (drvdata->atbtrig == true))
384 config->eventctrl1 |= BIT(11);
385 else
386 config->eventctrl1 &= ~BIT(11);
387
388 /* bit[12], Low-power state behavior override bit */
389 if ((config->mode & ETM_MODE_LPOVERRIDE) &&
390 (drvdata->lpoverride == true))
391 config->eventctrl1 |= BIT(12);
392 else
393 config->eventctrl1 &= ~BIT(12);
394
395 /* bit[8], Instruction stall bit */
396 if ((config->mode & ETM_MODE_ISTALL_EN) && (drvdata->stallctl == true))
397 config->stall_ctrl |= BIT(8);
398 else
399 config->stall_ctrl &= ~BIT(8);
400
401 /* bit[10], Prioritize instruction trace bit */
402 if (config->mode & ETM_MODE_INSTPRIO)
403 config->stall_ctrl |= BIT(10);
404 else
405 config->stall_ctrl &= ~BIT(10);
406
407 /* bit[13], Trace overflow prevention bit */
408 if ((config->mode & ETM_MODE_NOOVERFLOW) &&
409 (drvdata->nooverflow == true))
410 config->stall_ctrl |= BIT(13);
411 else
412 config->stall_ctrl &= ~BIT(13);
413
414 /* bit[9] Start/stop logic control bit */
415 if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
416 config->vinst_ctrl |= BIT(9);
417 else
418 config->vinst_ctrl &= ~BIT(9);
419
420 /* bit[10], Whether a trace unit must trace a Reset exception */
421 if (config->mode & ETM_MODE_TRACE_RESET)
422 config->vinst_ctrl |= BIT(10);
423 else
424 config->vinst_ctrl &= ~BIT(10);
425
426 /* bit[11], Whether a trace unit must trace a system error exception */
427 if ((config->mode & ETM_MODE_TRACE_ERR) &&
428 (drvdata->trc_error == true))
429 config->vinst_ctrl |= BIT(11);
430 else
431 config->vinst_ctrl &= ~BIT(11);
432
433 if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
434 etm4_config_trace_mode(config);
435
436 spin_unlock(&drvdata->spinlock);
437
438 return size;
439 }
440 static DEVICE_ATTR_RW(mode);
441
pe_show(struct device * dev,struct device_attribute * attr,char * buf)442 static ssize_t pe_show(struct device *dev,
443 struct device_attribute *attr,
444 char *buf)
445 {
446 unsigned long val;
447 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
448 struct etmv4_config *config = &drvdata->config;
449
450 val = config->pe_sel;
451 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
452 }
453
pe_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)454 static ssize_t pe_store(struct device *dev,
455 struct device_attribute *attr,
456 const char *buf, size_t size)
457 {
458 unsigned long val;
459 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
460 struct etmv4_config *config = &drvdata->config;
461
462 if (kstrtoul(buf, 16, &val))
463 return -EINVAL;
464
465 spin_lock(&drvdata->spinlock);
466 if (val > drvdata->nr_pe) {
467 spin_unlock(&drvdata->spinlock);
468 return -EINVAL;
469 }
470
471 config->pe_sel = val;
472 spin_unlock(&drvdata->spinlock);
473 return size;
474 }
475 static DEVICE_ATTR_RW(pe);
476
event_show(struct device * dev,struct device_attribute * attr,char * buf)477 static ssize_t event_show(struct device *dev,
478 struct device_attribute *attr,
479 char *buf)
480 {
481 unsigned long val;
482 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
483 struct etmv4_config *config = &drvdata->config;
484
485 val = config->eventctrl0;
486 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
487 }
488
event_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)489 static ssize_t event_store(struct device *dev,
490 struct device_attribute *attr,
491 const char *buf, size_t size)
492 {
493 unsigned long val;
494 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
495 struct etmv4_config *config = &drvdata->config;
496
497 if (kstrtoul(buf, 16, &val))
498 return -EINVAL;
499
500 spin_lock(&drvdata->spinlock);
501 switch (drvdata->nr_event) {
502 case 0x0:
503 /* EVENT0, bits[7:0] */
504 config->eventctrl0 = val & 0xFF;
505 break;
506 case 0x1:
507 /* EVENT1, bits[15:8] */
508 config->eventctrl0 = val & 0xFFFF;
509 break;
510 case 0x2:
511 /* EVENT2, bits[23:16] */
512 config->eventctrl0 = val & 0xFFFFFF;
513 break;
514 case 0x3:
515 /* EVENT3, bits[31:24] */
516 config->eventctrl0 = val;
517 break;
518 default:
519 break;
520 }
521 spin_unlock(&drvdata->spinlock);
522 return size;
523 }
524 static DEVICE_ATTR_RW(event);
525
event_instren_show(struct device * dev,struct device_attribute * attr,char * buf)526 static ssize_t event_instren_show(struct device *dev,
527 struct device_attribute *attr,
528 char *buf)
529 {
530 unsigned long val;
531 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
532 struct etmv4_config *config = &drvdata->config;
533
534 val = BMVAL(config->eventctrl1, 0, 3);
535 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
536 }
537
event_instren_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)538 static ssize_t event_instren_store(struct device *dev,
539 struct device_attribute *attr,
540 const char *buf, size_t size)
541 {
542 unsigned long val;
543 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
544 struct etmv4_config *config = &drvdata->config;
545
546 if (kstrtoul(buf, 16, &val))
547 return -EINVAL;
548
549 spin_lock(&drvdata->spinlock);
550 /* start by clearing all instruction event enable bits */
551 config->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
552 switch (drvdata->nr_event) {
553 case 0x0:
554 /* generate Event element for event 1 */
555 config->eventctrl1 |= val & BIT(1);
556 break;
557 case 0x1:
558 /* generate Event element for event 1 and 2 */
559 config->eventctrl1 |= val & (BIT(0) | BIT(1));
560 break;
561 case 0x2:
562 /* generate Event element for event 1, 2 and 3 */
563 config->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
564 break;
565 case 0x3:
566 /* generate Event element for all 4 events */
567 config->eventctrl1 |= val & 0xF;
568 break;
569 default:
570 break;
571 }
572 spin_unlock(&drvdata->spinlock);
573 return size;
574 }
575 static DEVICE_ATTR_RW(event_instren);
576
event_ts_show(struct device * dev,struct device_attribute * attr,char * buf)577 static ssize_t event_ts_show(struct device *dev,
578 struct device_attribute *attr,
579 char *buf)
580 {
581 unsigned long val;
582 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
583 struct etmv4_config *config = &drvdata->config;
584
585 val = config->ts_ctrl;
586 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
587 }
588
event_ts_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)589 static ssize_t event_ts_store(struct device *dev,
590 struct device_attribute *attr,
591 const char *buf, size_t size)
592 {
593 unsigned long val;
594 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
595 struct etmv4_config *config = &drvdata->config;
596
597 if (kstrtoul(buf, 16, &val))
598 return -EINVAL;
599 if (!drvdata->ts_size)
600 return -EINVAL;
601
602 config->ts_ctrl = val & ETMv4_EVENT_MASK;
603 return size;
604 }
605 static DEVICE_ATTR_RW(event_ts);
606
syncfreq_show(struct device * dev,struct device_attribute * attr,char * buf)607 static ssize_t syncfreq_show(struct device *dev,
608 struct device_attribute *attr,
609 char *buf)
610 {
611 unsigned long val;
612 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
613 struct etmv4_config *config = &drvdata->config;
614
615 val = config->syncfreq;
616 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
617 }
618
syncfreq_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)619 static ssize_t syncfreq_store(struct device *dev,
620 struct device_attribute *attr,
621 const char *buf, size_t size)
622 {
623 unsigned long val;
624 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
625 struct etmv4_config *config = &drvdata->config;
626
627 if (kstrtoul(buf, 16, &val))
628 return -EINVAL;
629 if (drvdata->syncpr == true)
630 return -EINVAL;
631
632 config->syncfreq = val & ETMv4_SYNC_MASK;
633 return size;
634 }
635 static DEVICE_ATTR_RW(syncfreq);
636
cyc_threshold_show(struct device * dev,struct device_attribute * attr,char * buf)637 static ssize_t cyc_threshold_show(struct device *dev,
638 struct device_attribute *attr,
639 char *buf)
640 {
641 unsigned long val;
642 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
643 struct etmv4_config *config = &drvdata->config;
644
645 val = config->ccctlr;
646 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
647 }
648
cyc_threshold_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)649 static ssize_t cyc_threshold_store(struct device *dev,
650 struct device_attribute *attr,
651 const char *buf, size_t size)
652 {
653 unsigned long val;
654 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
655 struct etmv4_config *config = &drvdata->config;
656
657 if (kstrtoul(buf, 16, &val))
658 return -EINVAL;
659
660 /* mask off max threshold before checking min value */
661 val &= ETM_CYC_THRESHOLD_MASK;
662 if (val < drvdata->ccitmin)
663 return -EINVAL;
664
665 config->ccctlr = val;
666 return size;
667 }
668 static DEVICE_ATTR_RW(cyc_threshold);
669
bb_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)670 static ssize_t bb_ctrl_show(struct device *dev,
671 struct device_attribute *attr,
672 char *buf)
673 {
674 unsigned long val;
675 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
676 struct etmv4_config *config = &drvdata->config;
677
678 val = config->bb_ctrl;
679 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
680 }
681
bb_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)682 static ssize_t bb_ctrl_store(struct device *dev,
683 struct device_attribute *attr,
684 const char *buf, size_t size)
685 {
686 unsigned long val;
687 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
688 struct etmv4_config *config = &drvdata->config;
689
690 if (kstrtoul(buf, 16, &val))
691 return -EINVAL;
692 if (drvdata->trcbb == false)
693 return -EINVAL;
694 if (!drvdata->nr_addr_cmp)
695 return -EINVAL;
696
697 /*
698 * Bit[8] controls include(1) / exclude(0), bits[0-7] select
699 * individual range comparators. If include then at least 1
700 * range must be selected.
701 */
702 if ((val & BIT(8)) && (BMVAL(val, 0, 7) == 0))
703 return -EINVAL;
704
705 config->bb_ctrl = val & GENMASK(8, 0);
706 return size;
707 }
708 static DEVICE_ATTR_RW(bb_ctrl);
709
event_vinst_show(struct device * dev,struct device_attribute * attr,char * buf)710 static ssize_t event_vinst_show(struct device *dev,
711 struct device_attribute *attr,
712 char *buf)
713 {
714 unsigned long val;
715 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
716 struct etmv4_config *config = &drvdata->config;
717
718 val = config->vinst_ctrl & ETMv4_EVENT_MASK;
719 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
720 }
721
event_vinst_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)722 static ssize_t event_vinst_store(struct device *dev,
723 struct device_attribute *attr,
724 const char *buf, size_t size)
725 {
726 unsigned long val;
727 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
728 struct etmv4_config *config = &drvdata->config;
729
730 if (kstrtoul(buf, 16, &val))
731 return -EINVAL;
732
733 spin_lock(&drvdata->spinlock);
734 val &= ETMv4_EVENT_MASK;
735 config->vinst_ctrl &= ~ETMv4_EVENT_MASK;
736 config->vinst_ctrl |= val;
737 spin_unlock(&drvdata->spinlock);
738 return size;
739 }
740 static DEVICE_ATTR_RW(event_vinst);
741
s_exlevel_vinst_show(struct device * dev,struct device_attribute * attr,char * buf)742 static ssize_t s_exlevel_vinst_show(struct device *dev,
743 struct device_attribute *attr,
744 char *buf)
745 {
746 unsigned long val;
747 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
748 struct etmv4_config *config = &drvdata->config;
749
750 val = (config->vinst_ctrl & ETM_EXLEVEL_S_VICTLR_MASK) >> 16;
751 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
752 }
753
s_exlevel_vinst_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)754 static ssize_t s_exlevel_vinst_store(struct device *dev,
755 struct device_attribute *attr,
756 const char *buf, size_t size)
757 {
758 unsigned long val;
759 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
760 struct etmv4_config *config = &drvdata->config;
761
762 if (kstrtoul(buf, 16, &val))
763 return -EINVAL;
764
765 spin_lock(&drvdata->spinlock);
766 /* clear all EXLEVEL_S bits */
767 config->vinst_ctrl &= ~(ETM_EXLEVEL_S_VICTLR_MASK);
768 /* enable instruction tracing for corresponding exception level */
769 val &= drvdata->s_ex_level;
770 config->vinst_ctrl |= (val << 16);
771 spin_unlock(&drvdata->spinlock);
772 return size;
773 }
774 static DEVICE_ATTR_RW(s_exlevel_vinst);
775
ns_exlevel_vinst_show(struct device * dev,struct device_attribute * attr,char * buf)776 static ssize_t ns_exlevel_vinst_show(struct device *dev,
777 struct device_attribute *attr,
778 char *buf)
779 {
780 unsigned long val;
781 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
782 struct etmv4_config *config = &drvdata->config;
783
784 /* EXLEVEL_NS, bits[23:20] */
785 val = (config->vinst_ctrl & ETM_EXLEVEL_NS_VICTLR_MASK) >> 20;
786 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
787 }
788
ns_exlevel_vinst_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)789 static ssize_t ns_exlevel_vinst_store(struct device *dev,
790 struct device_attribute *attr,
791 const char *buf, size_t size)
792 {
793 unsigned long val;
794 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
795 struct etmv4_config *config = &drvdata->config;
796
797 if (kstrtoul(buf, 16, &val))
798 return -EINVAL;
799
800 spin_lock(&drvdata->spinlock);
801 /* clear EXLEVEL_NS bits */
802 config->vinst_ctrl &= ~(ETM_EXLEVEL_NS_VICTLR_MASK);
803 /* enable instruction tracing for corresponding exception level */
804 val &= drvdata->ns_ex_level;
805 config->vinst_ctrl |= (val << 20);
806 spin_unlock(&drvdata->spinlock);
807 return size;
808 }
809 static DEVICE_ATTR_RW(ns_exlevel_vinst);
810
addr_idx_show(struct device * dev,struct device_attribute * attr,char * buf)811 static ssize_t addr_idx_show(struct device *dev,
812 struct device_attribute *attr,
813 char *buf)
814 {
815 unsigned long val;
816 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
817 struct etmv4_config *config = &drvdata->config;
818
819 val = config->addr_idx;
820 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
821 }
822
addr_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)823 static ssize_t addr_idx_store(struct device *dev,
824 struct device_attribute *attr,
825 const char *buf, size_t size)
826 {
827 unsigned long val;
828 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
829 struct etmv4_config *config = &drvdata->config;
830
831 if (kstrtoul(buf, 16, &val))
832 return -EINVAL;
833 if (val >= drvdata->nr_addr_cmp * 2)
834 return -EINVAL;
835
836 /*
837 * Use spinlock to ensure index doesn't change while it gets
838 * dereferenced multiple times within a spinlock block elsewhere.
839 */
840 spin_lock(&drvdata->spinlock);
841 config->addr_idx = val;
842 spin_unlock(&drvdata->spinlock);
843 return size;
844 }
845 static DEVICE_ATTR_RW(addr_idx);
846
addr_instdatatype_show(struct device * dev,struct device_attribute * attr,char * buf)847 static ssize_t addr_instdatatype_show(struct device *dev,
848 struct device_attribute *attr,
849 char *buf)
850 {
851 ssize_t len;
852 u8 val, idx;
853 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
854 struct etmv4_config *config = &drvdata->config;
855
856 spin_lock(&drvdata->spinlock);
857 idx = config->addr_idx;
858 val = BMVAL(config->addr_acc[idx], 0, 1);
859 len = scnprintf(buf, PAGE_SIZE, "%s\n",
860 val == ETM_INSTR_ADDR ? "instr" :
861 (val == ETM_DATA_LOAD_ADDR ? "data_load" :
862 (val == ETM_DATA_STORE_ADDR ? "data_store" :
863 "data_load_store")));
864 spin_unlock(&drvdata->spinlock);
865 return len;
866 }
867
addr_instdatatype_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)868 static ssize_t addr_instdatatype_store(struct device *dev,
869 struct device_attribute *attr,
870 const char *buf, size_t size)
871 {
872 u8 idx;
873 char str[20] = "";
874 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
875 struct etmv4_config *config = &drvdata->config;
876
877 if (strlen(buf) >= 20)
878 return -EINVAL;
879 if (sscanf(buf, "%s", str) != 1)
880 return -EINVAL;
881
882 spin_lock(&drvdata->spinlock);
883 idx = config->addr_idx;
884 if (!strcmp(str, "instr"))
885 /* TYPE, bits[1:0] */
886 config->addr_acc[idx] &= ~(BIT(0) | BIT(1));
887
888 spin_unlock(&drvdata->spinlock);
889 return size;
890 }
891 static DEVICE_ATTR_RW(addr_instdatatype);
892
addr_single_show(struct device * dev,struct device_attribute * attr,char * buf)893 static ssize_t addr_single_show(struct device *dev,
894 struct device_attribute *attr,
895 char *buf)
896 {
897 u8 idx;
898 unsigned long val;
899 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
900 struct etmv4_config *config = &drvdata->config;
901
902 idx = config->addr_idx;
903 spin_lock(&drvdata->spinlock);
904 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
905 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
906 spin_unlock(&drvdata->spinlock);
907 return -EPERM;
908 }
909 val = (unsigned long)config->addr_val[idx];
910 spin_unlock(&drvdata->spinlock);
911 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
912 }
913
addr_single_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)914 static ssize_t addr_single_store(struct device *dev,
915 struct device_attribute *attr,
916 const char *buf, size_t size)
917 {
918 u8 idx;
919 unsigned long val;
920 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
921 struct etmv4_config *config = &drvdata->config;
922
923 if (kstrtoul(buf, 16, &val))
924 return -EINVAL;
925
926 spin_lock(&drvdata->spinlock);
927 idx = config->addr_idx;
928 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
929 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
930 spin_unlock(&drvdata->spinlock);
931 return -EPERM;
932 }
933
934 config->addr_val[idx] = (u64)val;
935 config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
936 spin_unlock(&drvdata->spinlock);
937 return size;
938 }
939 static DEVICE_ATTR_RW(addr_single);
940
addr_range_show(struct device * dev,struct device_attribute * attr,char * buf)941 static ssize_t addr_range_show(struct device *dev,
942 struct device_attribute *attr,
943 char *buf)
944 {
945 u8 idx;
946 unsigned long val1, val2;
947 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
948 struct etmv4_config *config = &drvdata->config;
949
950 spin_lock(&drvdata->spinlock);
951 idx = config->addr_idx;
952 if (idx % 2 != 0) {
953 spin_unlock(&drvdata->spinlock);
954 return -EPERM;
955 }
956 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
957 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
958 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
959 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
960 spin_unlock(&drvdata->spinlock);
961 return -EPERM;
962 }
963
964 val1 = (unsigned long)config->addr_val[idx];
965 val2 = (unsigned long)config->addr_val[idx + 1];
966 spin_unlock(&drvdata->spinlock);
967 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
968 }
969
addr_range_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)970 static ssize_t addr_range_store(struct device *dev,
971 struct device_attribute *attr,
972 const char *buf, size_t size)
973 {
974 u8 idx;
975 unsigned long val1, val2;
976 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
977 struct etmv4_config *config = &drvdata->config;
978 int elements, exclude;
979
980 elements = sscanf(buf, "%lx %lx %x", &val1, &val2, &exclude);
981
982 /* exclude is optional, but need at least two parameter */
983 if (elements < 2)
984 return -EINVAL;
985 /* lower address comparator cannot have a higher address value */
986 if (val1 > val2)
987 return -EINVAL;
988
989 spin_lock(&drvdata->spinlock);
990 idx = config->addr_idx;
991 if (idx % 2 != 0) {
992 spin_unlock(&drvdata->spinlock);
993 return -EPERM;
994 }
995
996 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
997 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
998 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
999 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
1000 spin_unlock(&drvdata->spinlock);
1001 return -EPERM;
1002 }
1003
1004 config->addr_val[idx] = (u64)val1;
1005 config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
1006 config->addr_val[idx + 1] = (u64)val2;
1007 config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
1008 /*
1009 * Program include or exclude control bits for vinst or vdata
1010 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
1011 * use supplied value, or default to bit set in 'mode'
1012 */
1013 if (elements != 3)
1014 exclude = config->mode & ETM_MODE_EXCLUDE;
1015 etm4_set_mode_exclude(drvdata, exclude ? true : false);
1016
1017 spin_unlock(&drvdata->spinlock);
1018 return size;
1019 }
1020 static DEVICE_ATTR_RW(addr_range);
1021
addr_start_show(struct device * dev,struct device_attribute * attr,char * buf)1022 static ssize_t addr_start_show(struct device *dev,
1023 struct device_attribute *attr,
1024 char *buf)
1025 {
1026 u8 idx;
1027 unsigned long val;
1028 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1029 struct etmv4_config *config = &drvdata->config;
1030
1031 spin_lock(&drvdata->spinlock);
1032 idx = config->addr_idx;
1033
1034 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1035 config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1036 spin_unlock(&drvdata->spinlock);
1037 return -EPERM;
1038 }
1039
1040 val = (unsigned long)config->addr_val[idx];
1041 spin_unlock(&drvdata->spinlock);
1042 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1043 }
1044
addr_start_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1045 static ssize_t addr_start_store(struct device *dev,
1046 struct device_attribute *attr,
1047 const char *buf, size_t size)
1048 {
1049 u8 idx;
1050 unsigned long val;
1051 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1052 struct etmv4_config *config = &drvdata->config;
1053
1054 if (kstrtoul(buf, 16, &val))
1055 return -EINVAL;
1056
1057 spin_lock(&drvdata->spinlock);
1058 idx = config->addr_idx;
1059 if (!drvdata->nr_addr_cmp) {
1060 spin_unlock(&drvdata->spinlock);
1061 return -EINVAL;
1062 }
1063 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1064 config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1065 spin_unlock(&drvdata->spinlock);
1066 return -EPERM;
1067 }
1068
1069 config->addr_val[idx] = (u64)val;
1070 config->addr_type[idx] = ETM_ADDR_TYPE_START;
1071 config->vissctlr |= BIT(idx);
1072 spin_unlock(&drvdata->spinlock);
1073 return size;
1074 }
1075 static DEVICE_ATTR_RW(addr_start);
1076
addr_stop_show(struct device * dev,struct device_attribute * attr,char * buf)1077 static ssize_t addr_stop_show(struct device *dev,
1078 struct device_attribute *attr,
1079 char *buf)
1080 {
1081 u8 idx;
1082 unsigned long val;
1083 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1084 struct etmv4_config *config = &drvdata->config;
1085
1086 spin_lock(&drvdata->spinlock);
1087 idx = config->addr_idx;
1088
1089 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1090 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1091 spin_unlock(&drvdata->spinlock);
1092 return -EPERM;
1093 }
1094
1095 val = (unsigned long)config->addr_val[idx];
1096 spin_unlock(&drvdata->spinlock);
1097 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1098 }
1099
addr_stop_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1100 static ssize_t addr_stop_store(struct device *dev,
1101 struct device_attribute *attr,
1102 const char *buf, size_t size)
1103 {
1104 u8 idx;
1105 unsigned long val;
1106 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1107 struct etmv4_config *config = &drvdata->config;
1108
1109 if (kstrtoul(buf, 16, &val))
1110 return -EINVAL;
1111
1112 spin_lock(&drvdata->spinlock);
1113 idx = config->addr_idx;
1114 if (!drvdata->nr_addr_cmp) {
1115 spin_unlock(&drvdata->spinlock);
1116 return -EINVAL;
1117 }
1118 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1119 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1120 spin_unlock(&drvdata->spinlock);
1121 return -EPERM;
1122 }
1123
1124 config->addr_val[idx] = (u64)val;
1125 config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
1126 config->vissctlr |= BIT(idx + 16);
1127 spin_unlock(&drvdata->spinlock);
1128 return size;
1129 }
1130 static DEVICE_ATTR_RW(addr_stop);
1131
addr_ctxtype_show(struct device * dev,struct device_attribute * attr,char * buf)1132 static ssize_t addr_ctxtype_show(struct device *dev,
1133 struct device_attribute *attr,
1134 char *buf)
1135 {
1136 ssize_t len;
1137 u8 idx, val;
1138 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1139 struct etmv4_config *config = &drvdata->config;
1140
1141 spin_lock(&drvdata->spinlock);
1142 idx = config->addr_idx;
1143 /* CONTEXTTYPE, bits[3:2] */
1144 val = BMVAL(config->addr_acc[idx], 2, 3);
1145 len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
1146 (val == ETM_CTX_CTXID ? "ctxid" :
1147 (val == ETM_CTX_VMID ? "vmid" : "all")));
1148 spin_unlock(&drvdata->spinlock);
1149 return len;
1150 }
1151
addr_ctxtype_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1152 static ssize_t addr_ctxtype_store(struct device *dev,
1153 struct device_attribute *attr,
1154 const char *buf, size_t size)
1155 {
1156 u8 idx;
1157 char str[10] = "";
1158 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1159 struct etmv4_config *config = &drvdata->config;
1160
1161 if (strlen(buf) >= 10)
1162 return -EINVAL;
1163 if (sscanf(buf, "%s", str) != 1)
1164 return -EINVAL;
1165
1166 spin_lock(&drvdata->spinlock);
1167 idx = config->addr_idx;
1168 if (!strcmp(str, "none"))
1169 /* start by clearing context type bits */
1170 config->addr_acc[idx] &= ~(BIT(2) | BIT(3));
1171 else if (!strcmp(str, "ctxid")) {
1172 /* 0b01 The trace unit performs a Context ID */
1173 if (drvdata->numcidc) {
1174 config->addr_acc[idx] |= BIT(2);
1175 config->addr_acc[idx] &= ~BIT(3);
1176 }
1177 } else if (!strcmp(str, "vmid")) {
1178 /* 0b10 The trace unit performs a VMID */
1179 if (drvdata->numvmidc) {
1180 config->addr_acc[idx] &= ~BIT(2);
1181 config->addr_acc[idx] |= BIT(3);
1182 }
1183 } else if (!strcmp(str, "all")) {
1184 /*
1185 * 0b11 The trace unit performs a Context ID
1186 * comparison and a VMID
1187 */
1188 if (drvdata->numcidc)
1189 config->addr_acc[idx] |= BIT(2);
1190 if (drvdata->numvmidc)
1191 config->addr_acc[idx] |= BIT(3);
1192 }
1193 spin_unlock(&drvdata->spinlock);
1194 return size;
1195 }
1196 static DEVICE_ATTR_RW(addr_ctxtype);
1197
addr_context_show(struct device * dev,struct device_attribute * attr,char * buf)1198 static ssize_t addr_context_show(struct device *dev,
1199 struct device_attribute *attr,
1200 char *buf)
1201 {
1202 u8 idx;
1203 unsigned long val;
1204 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1205 struct etmv4_config *config = &drvdata->config;
1206
1207 spin_lock(&drvdata->spinlock);
1208 idx = config->addr_idx;
1209 /* context ID comparator bits[6:4] */
1210 val = BMVAL(config->addr_acc[idx], 4, 6);
1211 spin_unlock(&drvdata->spinlock);
1212 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1213 }
1214
addr_context_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1215 static ssize_t addr_context_store(struct device *dev,
1216 struct device_attribute *attr,
1217 const char *buf, size_t size)
1218 {
1219 u8 idx;
1220 unsigned long val;
1221 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1222 struct etmv4_config *config = &drvdata->config;
1223
1224 if (kstrtoul(buf, 16, &val))
1225 return -EINVAL;
1226 if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
1227 return -EINVAL;
1228 if (val >= (drvdata->numcidc >= drvdata->numvmidc ?
1229 drvdata->numcidc : drvdata->numvmidc))
1230 return -EINVAL;
1231
1232 spin_lock(&drvdata->spinlock);
1233 idx = config->addr_idx;
1234 /* clear context ID comparator bits[6:4] */
1235 config->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
1236 config->addr_acc[idx] |= (val << 4);
1237 spin_unlock(&drvdata->spinlock);
1238 return size;
1239 }
1240 static DEVICE_ATTR_RW(addr_context);
1241
addr_exlevel_s_ns_show(struct device * dev,struct device_attribute * attr,char * buf)1242 static ssize_t addr_exlevel_s_ns_show(struct device *dev,
1243 struct device_attribute *attr,
1244 char *buf)
1245 {
1246 u8 idx;
1247 unsigned long val;
1248 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1249 struct etmv4_config *config = &drvdata->config;
1250
1251 spin_lock(&drvdata->spinlock);
1252 idx = config->addr_idx;
1253 val = BMVAL(config->addr_acc[idx], 8, 14);
1254 spin_unlock(&drvdata->spinlock);
1255 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1256 }
1257
addr_exlevel_s_ns_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1258 static ssize_t addr_exlevel_s_ns_store(struct device *dev,
1259 struct device_attribute *attr,
1260 const char *buf, size_t size)
1261 {
1262 u8 idx;
1263 unsigned long val;
1264 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1265 struct etmv4_config *config = &drvdata->config;
1266
1267 if (kstrtoul(buf, 0, &val))
1268 return -EINVAL;
1269
1270 if (val & ~((GENMASK(14, 8) >> 8)))
1271 return -EINVAL;
1272
1273 spin_lock(&drvdata->spinlock);
1274 idx = config->addr_idx;
1275 /* clear Exlevel_ns & Exlevel_s bits[14:12, 11:8], bit[15] is res0 */
1276 config->addr_acc[idx] &= ~(GENMASK(14, 8));
1277 config->addr_acc[idx] |= (val << 8);
1278 spin_unlock(&drvdata->spinlock);
1279 return size;
1280 }
1281 static DEVICE_ATTR_RW(addr_exlevel_s_ns);
1282
1283 static const char * const addr_type_names[] = {
1284 "unused",
1285 "single",
1286 "range",
1287 "start",
1288 "stop"
1289 };
1290
addr_cmp_view_show(struct device * dev,struct device_attribute * attr,char * buf)1291 static ssize_t addr_cmp_view_show(struct device *dev,
1292 struct device_attribute *attr, char *buf)
1293 {
1294 u8 idx, addr_type;
1295 unsigned long addr_v, addr_v2, addr_ctrl;
1296 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1297 struct etmv4_config *config = &drvdata->config;
1298 int size = 0;
1299 bool exclude = false;
1300
1301 spin_lock(&drvdata->spinlock);
1302 idx = config->addr_idx;
1303 addr_v = config->addr_val[idx];
1304 addr_ctrl = config->addr_acc[idx];
1305 addr_type = config->addr_type[idx];
1306 if (addr_type == ETM_ADDR_TYPE_RANGE) {
1307 if (idx & 0x1) {
1308 idx -= 1;
1309 addr_v2 = addr_v;
1310 addr_v = config->addr_val[idx];
1311 } else {
1312 addr_v2 = config->addr_val[idx + 1];
1313 }
1314 exclude = config->viiectlr & BIT(idx / 2 + 16);
1315 }
1316 spin_unlock(&drvdata->spinlock);
1317 if (addr_type) {
1318 size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] %s %#lx", idx,
1319 addr_type_names[addr_type], addr_v);
1320 if (addr_type == ETM_ADDR_TYPE_RANGE) {
1321 size += scnprintf(buf + size, PAGE_SIZE - size,
1322 " %#lx %s", addr_v2,
1323 exclude ? "exclude" : "include");
1324 }
1325 size += scnprintf(buf + size, PAGE_SIZE - size,
1326 " ctrl(%#lx)\n", addr_ctrl);
1327 } else {
1328 size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] unused\n", idx);
1329 }
1330 return size;
1331 }
1332 static DEVICE_ATTR_RO(addr_cmp_view);
1333
vinst_pe_cmp_start_stop_show(struct device * dev,struct device_attribute * attr,char * buf)1334 static ssize_t vinst_pe_cmp_start_stop_show(struct device *dev,
1335 struct device_attribute *attr,
1336 char *buf)
1337 {
1338 unsigned long val;
1339 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1340 struct etmv4_config *config = &drvdata->config;
1341
1342 if (!drvdata->nr_pe_cmp)
1343 return -EINVAL;
1344 val = config->vipcssctlr;
1345 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1346 }
vinst_pe_cmp_start_stop_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1347 static ssize_t vinst_pe_cmp_start_stop_store(struct device *dev,
1348 struct device_attribute *attr,
1349 const char *buf, size_t size)
1350 {
1351 unsigned long val;
1352 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1353 struct etmv4_config *config = &drvdata->config;
1354
1355 if (kstrtoul(buf, 16, &val))
1356 return -EINVAL;
1357 if (!drvdata->nr_pe_cmp)
1358 return -EINVAL;
1359
1360 spin_lock(&drvdata->spinlock);
1361 config->vipcssctlr = val;
1362 spin_unlock(&drvdata->spinlock);
1363 return size;
1364 }
1365 static DEVICE_ATTR_RW(vinst_pe_cmp_start_stop);
1366
seq_idx_show(struct device * dev,struct device_attribute * attr,char * buf)1367 static ssize_t seq_idx_show(struct device *dev,
1368 struct device_attribute *attr,
1369 char *buf)
1370 {
1371 unsigned long val;
1372 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1373 struct etmv4_config *config = &drvdata->config;
1374
1375 val = config->seq_idx;
1376 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1377 }
1378
seq_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1379 static ssize_t seq_idx_store(struct device *dev,
1380 struct device_attribute *attr,
1381 const char *buf, size_t size)
1382 {
1383 unsigned long val;
1384 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1385 struct etmv4_config *config = &drvdata->config;
1386
1387 if (kstrtoul(buf, 16, &val))
1388 return -EINVAL;
1389 if (val >= drvdata->nrseqstate - 1)
1390 return -EINVAL;
1391
1392 /*
1393 * Use spinlock to ensure index doesn't change while it gets
1394 * dereferenced multiple times within a spinlock block elsewhere.
1395 */
1396 spin_lock(&drvdata->spinlock);
1397 config->seq_idx = val;
1398 spin_unlock(&drvdata->spinlock);
1399 return size;
1400 }
1401 static DEVICE_ATTR_RW(seq_idx);
1402
seq_state_show(struct device * dev,struct device_attribute * attr,char * buf)1403 static ssize_t seq_state_show(struct device *dev,
1404 struct device_attribute *attr,
1405 char *buf)
1406 {
1407 unsigned long val;
1408 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1409 struct etmv4_config *config = &drvdata->config;
1410
1411 val = config->seq_state;
1412 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1413 }
1414
seq_state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1415 static ssize_t seq_state_store(struct device *dev,
1416 struct device_attribute *attr,
1417 const char *buf, size_t size)
1418 {
1419 unsigned long val;
1420 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1421 struct etmv4_config *config = &drvdata->config;
1422
1423 if (kstrtoul(buf, 16, &val))
1424 return -EINVAL;
1425 if (val >= drvdata->nrseqstate)
1426 return -EINVAL;
1427
1428 config->seq_state = val;
1429 return size;
1430 }
1431 static DEVICE_ATTR_RW(seq_state);
1432
seq_event_show(struct device * dev,struct device_attribute * attr,char * buf)1433 static ssize_t seq_event_show(struct device *dev,
1434 struct device_attribute *attr,
1435 char *buf)
1436 {
1437 u8 idx;
1438 unsigned long val;
1439 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1440 struct etmv4_config *config = &drvdata->config;
1441
1442 spin_lock(&drvdata->spinlock);
1443 idx = config->seq_idx;
1444 val = config->seq_ctrl[idx];
1445 spin_unlock(&drvdata->spinlock);
1446 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1447 }
1448
seq_event_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1449 static ssize_t seq_event_store(struct device *dev,
1450 struct device_attribute *attr,
1451 const char *buf, size_t size)
1452 {
1453 u8 idx;
1454 unsigned long val;
1455 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1456 struct etmv4_config *config = &drvdata->config;
1457
1458 if (kstrtoul(buf, 16, &val))
1459 return -EINVAL;
1460
1461 spin_lock(&drvdata->spinlock);
1462 idx = config->seq_idx;
1463 /* Seq control has two masks B[15:8] F[7:0] */
1464 config->seq_ctrl[idx] = val & 0xFFFF;
1465 spin_unlock(&drvdata->spinlock);
1466 return size;
1467 }
1468 static DEVICE_ATTR_RW(seq_event);
1469
seq_reset_event_show(struct device * dev,struct device_attribute * attr,char * buf)1470 static ssize_t seq_reset_event_show(struct device *dev,
1471 struct device_attribute *attr,
1472 char *buf)
1473 {
1474 unsigned long val;
1475 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1476 struct etmv4_config *config = &drvdata->config;
1477
1478 val = config->seq_rst;
1479 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1480 }
1481
seq_reset_event_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1482 static ssize_t seq_reset_event_store(struct device *dev,
1483 struct device_attribute *attr,
1484 const char *buf, size_t size)
1485 {
1486 unsigned long val;
1487 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1488 struct etmv4_config *config = &drvdata->config;
1489
1490 if (kstrtoul(buf, 16, &val))
1491 return -EINVAL;
1492 if (!(drvdata->nrseqstate))
1493 return -EINVAL;
1494
1495 config->seq_rst = val & ETMv4_EVENT_MASK;
1496 return size;
1497 }
1498 static DEVICE_ATTR_RW(seq_reset_event);
1499
cntr_idx_show(struct device * dev,struct device_attribute * attr,char * buf)1500 static ssize_t cntr_idx_show(struct device *dev,
1501 struct device_attribute *attr,
1502 char *buf)
1503 {
1504 unsigned long val;
1505 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1506 struct etmv4_config *config = &drvdata->config;
1507
1508 val = config->cntr_idx;
1509 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1510 }
1511
cntr_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1512 static ssize_t cntr_idx_store(struct device *dev,
1513 struct device_attribute *attr,
1514 const char *buf, size_t size)
1515 {
1516 unsigned long val;
1517 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1518 struct etmv4_config *config = &drvdata->config;
1519
1520 if (kstrtoul(buf, 16, &val))
1521 return -EINVAL;
1522 if (val >= drvdata->nr_cntr)
1523 return -EINVAL;
1524
1525 /*
1526 * Use spinlock to ensure index doesn't change while it gets
1527 * dereferenced multiple times within a spinlock block elsewhere.
1528 */
1529 spin_lock(&drvdata->spinlock);
1530 config->cntr_idx = val;
1531 spin_unlock(&drvdata->spinlock);
1532 return size;
1533 }
1534 static DEVICE_ATTR_RW(cntr_idx);
1535
cntrldvr_show(struct device * dev,struct device_attribute * attr,char * buf)1536 static ssize_t cntrldvr_show(struct device *dev,
1537 struct device_attribute *attr,
1538 char *buf)
1539 {
1540 u8 idx;
1541 unsigned long val;
1542 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1543 struct etmv4_config *config = &drvdata->config;
1544
1545 spin_lock(&drvdata->spinlock);
1546 idx = config->cntr_idx;
1547 val = config->cntrldvr[idx];
1548 spin_unlock(&drvdata->spinlock);
1549 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1550 }
1551
cntrldvr_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1552 static ssize_t cntrldvr_store(struct device *dev,
1553 struct device_attribute *attr,
1554 const char *buf, size_t size)
1555 {
1556 u8 idx;
1557 unsigned long val;
1558 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1559 struct etmv4_config *config = &drvdata->config;
1560
1561 if (kstrtoul(buf, 16, &val))
1562 return -EINVAL;
1563 if (val > ETM_CNTR_MAX_VAL)
1564 return -EINVAL;
1565
1566 spin_lock(&drvdata->spinlock);
1567 idx = config->cntr_idx;
1568 config->cntrldvr[idx] = val;
1569 spin_unlock(&drvdata->spinlock);
1570 return size;
1571 }
1572 static DEVICE_ATTR_RW(cntrldvr);
1573
cntr_val_show(struct device * dev,struct device_attribute * attr,char * buf)1574 static ssize_t cntr_val_show(struct device *dev,
1575 struct device_attribute *attr,
1576 char *buf)
1577 {
1578 u8 idx;
1579 unsigned long val;
1580 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1581 struct etmv4_config *config = &drvdata->config;
1582
1583 spin_lock(&drvdata->spinlock);
1584 idx = config->cntr_idx;
1585 val = config->cntr_val[idx];
1586 spin_unlock(&drvdata->spinlock);
1587 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1588 }
1589
cntr_val_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1590 static ssize_t cntr_val_store(struct device *dev,
1591 struct device_attribute *attr,
1592 const char *buf, size_t size)
1593 {
1594 u8 idx;
1595 unsigned long val;
1596 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1597 struct etmv4_config *config = &drvdata->config;
1598
1599 if (kstrtoul(buf, 16, &val))
1600 return -EINVAL;
1601 if (val > ETM_CNTR_MAX_VAL)
1602 return -EINVAL;
1603
1604 spin_lock(&drvdata->spinlock);
1605 idx = config->cntr_idx;
1606 config->cntr_val[idx] = val;
1607 spin_unlock(&drvdata->spinlock);
1608 return size;
1609 }
1610 static DEVICE_ATTR_RW(cntr_val);
1611
cntr_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)1612 static ssize_t cntr_ctrl_show(struct device *dev,
1613 struct device_attribute *attr,
1614 char *buf)
1615 {
1616 u8 idx;
1617 unsigned long val;
1618 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1619 struct etmv4_config *config = &drvdata->config;
1620
1621 spin_lock(&drvdata->spinlock);
1622 idx = config->cntr_idx;
1623 val = config->cntr_ctrl[idx];
1624 spin_unlock(&drvdata->spinlock);
1625 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1626 }
1627
cntr_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1628 static ssize_t cntr_ctrl_store(struct device *dev,
1629 struct device_attribute *attr,
1630 const char *buf, size_t size)
1631 {
1632 u8 idx;
1633 unsigned long val;
1634 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1635 struct etmv4_config *config = &drvdata->config;
1636
1637 if (kstrtoul(buf, 16, &val))
1638 return -EINVAL;
1639
1640 spin_lock(&drvdata->spinlock);
1641 idx = config->cntr_idx;
1642 config->cntr_ctrl[idx] = val;
1643 spin_unlock(&drvdata->spinlock);
1644 return size;
1645 }
1646 static DEVICE_ATTR_RW(cntr_ctrl);
1647
res_idx_show(struct device * dev,struct device_attribute * attr,char * buf)1648 static ssize_t res_idx_show(struct device *dev,
1649 struct device_attribute *attr,
1650 char *buf)
1651 {
1652 unsigned long val;
1653 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1654 struct etmv4_config *config = &drvdata->config;
1655
1656 val = config->res_idx;
1657 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1658 }
1659
res_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1660 static ssize_t res_idx_store(struct device *dev,
1661 struct device_attribute *attr,
1662 const char *buf, size_t size)
1663 {
1664 unsigned long val;
1665 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1666 struct etmv4_config *config = &drvdata->config;
1667
1668 if (kstrtoul(buf, 16, &val))
1669 return -EINVAL;
1670 /*
1671 * Resource selector pair 0 is always implemented and reserved,
1672 * namely an idx with 0 and 1 is illegal.
1673 */
1674 if ((val < 2) || (val >= 2 * drvdata->nr_resource))
1675 return -EINVAL;
1676
1677 /*
1678 * Use spinlock to ensure index doesn't change while it gets
1679 * dereferenced multiple times within a spinlock block elsewhere.
1680 */
1681 spin_lock(&drvdata->spinlock);
1682 config->res_idx = val;
1683 spin_unlock(&drvdata->spinlock);
1684 return size;
1685 }
1686 static DEVICE_ATTR_RW(res_idx);
1687
res_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)1688 static ssize_t res_ctrl_show(struct device *dev,
1689 struct device_attribute *attr,
1690 char *buf)
1691 {
1692 u8 idx;
1693 unsigned long val;
1694 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1695 struct etmv4_config *config = &drvdata->config;
1696
1697 spin_lock(&drvdata->spinlock);
1698 idx = config->res_idx;
1699 val = config->res_ctrl[idx];
1700 spin_unlock(&drvdata->spinlock);
1701 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1702 }
1703
res_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1704 static ssize_t res_ctrl_store(struct device *dev,
1705 struct device_attribute *attr,
1706 const char *buf, size_t size)
1707 {
1708 u8 idx;
1709 unsigned long val;
1710 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1711 struct etmv4_config *config = &drvdata->config;
1712
1713 if (kstrtoul(buf, 16, &val))
1714 return -EINVAL;
1715
1716 spin_lock(&drvdata->spinlock);
1717 idx = config->res_idx;
1718 /* For odd idx pair inversal bit is RES0 */
1719 if (idx % 2 != 0)
1720 /* PAIRINV, bit[21] */
1721 val &= ~BIT(21);
1722 config->res_ctrl[idx] = val & GENMASK(21, 0);
1723 spin_unlock(&drvdata->spinlock);
1724 return size;
1725 }
1726 static DEVICE_ATTR_RW(res_ctrl);
1727
sshot_idx_show(struct device * dev,struct device_attribute * attr,char * buf)1728 static ssize_t sshot_idx_show(struct device *dev,
1729 struct device_attribute *attr, char *buf)
1730 {
1731 unsigned long val;
1732 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1733 struct etmv4_config *config = &drvdata->config;
1734
1735 val = config->ss_idx;
1736 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1737 }
1738
sshot_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1739 static ssize_t sshot_idx_store(struct device *dev,
1740 struct device_attribute *attr,
1741 const char *buf, size_t size)
1742 {
1743 unsigned long val;
1744 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1745 struct etmv4_config *config = &drvdata->config;
1746
1747 if (kstrtoul(buf, 16, &val))
1748 return -EINVAL;
1749 if (val >= drvdata->nr_ss_cmp)
1750 return -EINVAL;
1751
1752 spin_lock(&drvdata->spinlock);
1753 config->ss_idx = val;
1754 spin_unlock(&drvdata->spinlock);
1755 return size;
1756 }
1757 static DEVICE_ATTR_RW(sshot_idx);
1758
sshot_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)1759 static ssize_t sshot_ctrl_show(struct device *dev,
1760 struct device_attribute *attr,
1761 char *buf)
1762 {
1763 unsigned long val;
1764 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1765 struct etmv4_config *config = &drvdata->config;
1766
1767 spin_lock(&drvdata->spinlock);
1768 val = config->ss_ctrl[config->ss_idx];
1769 spin_unlock(&drvdata->spinlock);
1770 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1771 }
1772
sshot_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1773 static ssize_t sshot_ctrl_store(struct device *dev,
1774 struct device_attribute *attr,
1775 const char *buf, size_t size)
1776 {
1777 u8 idx;
1778 unsigned long val;
1779 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1780 struct etmv4_config *config = &drvdata->config;
1781
1782 if (kstrtoul(buf, 16, &val))
1783 return -EINVAL;
1784
1785 spin_lock(&drvdata->spinlock);
1786 idx = config->ss_idx;
1787 config->ss_ctrl[idx] = val & GENMASK(24, 0);
1788 /* must clear bit 31 in related status register on programming */
1789 config->ss_status[idx] &= ~BIT(31);
1790 spin_unlock(&drvdata->spinlock);
1791 return size;
1792 }
1793 static DEVICE_ATTR_RW(sshot_ctrl);
1794
sshot_status_show(struct device * dev,struct device_attribute * attr,char * buf)1795 static ssize_t sshot_status_show(struct device *dev,
1796 struct device_attribute *attr, char *buf)
1797 {
1798 unsigned long val;
1799 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1800 struct etmv4_config *config = &drvdata->config;
1801
1802 spin_lock(&drvdata->spinlock);
1803 val = config->ss_status[config->ss_idx];
1804 spin_unlock(&drvdata->spinlock);
1805 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1806 }
1807 static DEVICE_ATTR_RO(sshot_status);
1808
sshot_pe_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)1809 static ssize_t sshot_pe_ctrl_show(struct device *dev,
1810 struct device_attribute *attr,
1811 char *buf)
1812 {
1813 unsigned long val;
1814 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1815 struct etmv4_config *config = &drvdata->config;
1816
1817 spin_lock(&drvdata->spinlock);
1818 val = config->ss_pe_cmp[config->ss_idx];
1819 spin_unlock(&drvdata->spinlock);
1820 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1821 }
1822
sshot_pe_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1823 static ssize_t sshot_pe_ctrl_store(struct device *dev,
1824 struct device_attribute *attr,
1825 const char *buf, size_t size)
1826 {
1827 u8 idx;
1828 unsigned long val;
1829 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1830 struct etmv4_config *config = &drvdata->config;
1831
1832 if (kstrtoul(buf, 16, &val))
1833 return -EINVAL;
1834
1835 spin_lock(&drvdata->spinlock);
1836 idx = config->ss_idx;
1837 config->ss_pe_cmp[idx] = val & GENMASK(7, 0);
1838 /* must clear bit 31 in related status register on programming */
1839 config->ss_status[idx] &= ~BIT(31);
1840 spin_unlock(&drvdata->spinlock);
1841 return size;
1842 }
1843 static DEVICE_ATTR_RW(sshot_pe_ctrl);
1844
ctxid_idx_show(struct device * dev,struct device_attribute * attr,char * buf)1845 static ssize_t ctxid_idx_show(struct device *dev,
1846 struct device_attribute *attr,
1847 char *buf)
1848 {
1849 unsigned long val;
1850 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1851 struct etmv4_config *config = &drvdata->config;
1852
1853 val = config->ctxid_idx;
1854 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1855 }
1856
ctxid_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1857 static ssize_t ctxid_idx_store(struct device *dev,
1858 struct device_attribute *attr,
1859 const char *buf, size_t size)
1860 {
1861 unsigned long val;
1862 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1863 struct etmv4_config *config = &drvdata->config;
1864
1865 if (kstrtoul(buf, 16, &val))
1866 return -EINVAL;
1867 if (val >= drvdata->numcidc)
1868 return -EINVAL;
1869
1870 /*
1871 * Use spinlock to ensure index doesn't change while it gets
1872 * dereferenced multiple times within a spinlock block elsewhere.
1873 */
1874 spin_lock(&drvdata->spinlock);
1875 config->ctxid_idx = val;
1876 spin_unlock(&drvdata->spinlock);
1877 return size;
1878 }
1879 static DEVICE_ATTR_RW(ctxid_idx);
1880
ctxid_pid_show(struct device * dev,struct device_attribute * attr,char * buf)1881 static ssize_t ctxid_pid_show(struct device *dev,
1882 struct device_attribute *attr,
1883 char *buf)
1884 {
1885 u8 idx;
1886 unsigned long val;
1887 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1888 struct etmv4_config *config = &drvdata->config;
1889
1890 /*
1891 * Don't use contextID tracing if coming from a PID namespace. See
1892 * comment in ctxid_pid_store().
1893 */
1894 if (task_active_pid_ns(current) != &init_pid_ns)
1895 return -EINVAL;
1896
1897 spin_lock(&drvdata->spinlock);
1898 idx = config->ctxid_idx;
1899 val = (unsigned long)config->ctxid_pid[idx];
1900 spin_unlock(&drvdata->spinlock);
1901 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1902 }
1903
ctxid_pid_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1904 static ssize_t ctxid_pid_store(struct device *dev,
1905 struct device_attribute *attr,
1906 const char *buf, size_t size)
1907 {
1908 u8 idx;
1909 unsigned long pid;
1910 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1911 struct etmv4_config *config = &drvdata->config;
1912
1913 /*
1914 * When contextID tracing is enabled the tracers will insert the
1915 * value found in the contextID register in the trace stream. But if
1916 * a process is in a namespace the PID of that process as seen from the
1917 * namespace won't be what the kernel sees, something that makes the
1918 * feature confusing and can potentially leak kernel only information.
1919 * As such refuse to use the feature if @current is not in the initial
1920 * PID namespace.
1921 */
1922 if (task_active_pid_ns(current) != &init_pid_ns)
1923 return -EINVAL;
1924
1925 /*
1926 * only implemented when ctxid tracing is enabled, i.e. at least one
1927 * ctxid comparator is implemented and ctxid is greater than 0 bits
1928 * in length
1929 */
1930 if (!drvdata->ctxid_size || !drvdata->numcidc)
1931 return -EINVAL;
1932 if (kstrtoul(buf, 16, &pid))
1933 return -EINVAL;
1934
1935 spin_lock(&drvdata->spinlock);
1936 idx = config->ctxid_idx;
1937 config->ctxid_pid[idx] = (u64)pid;
1938 spin_unlock(&drvdata->spinlock);
1939 return size;
1940 }
1941 static DEVICE_ATTR_RW(ctxid_pid);
1942
ctxid_masks_show(struct device * dev,struct device_attribute * attr,char * buf)1943 static ssize_t ctxid_masks_show(struct device *dev,
1944 struct device_attribute *attr,
1945 char *buf)
1946 {
1947 unsigned long val1, val2;
1948 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1949 struct etmv4_config *config = &drvdata->config;
1950
1951 /*
1952 * Don't use contextID tracing if coming from a PID namespace. See
1953 * comment in ctxid_pid_store().
1954 */
1955 if (task_active_pid_ns(current) != &init_pid_ns)
1956 return -EINVAL;
1957
1958 spin_lock(&drvdata->spinlock);
1959 val1 = config->ctxid_mask0;
1960 val2 = config->ctxid_mask1;
1961 spin_unlock(&drvdata->spinlock);
1962 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1963 }
1964
ctxid_masks_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1965 static ssize_t ctxid_masks_store(struct device *dev,
1966 struct device_attribute *attr,
1967 const char *buf, size_t size)
1968 {
1969 u8 i, j, maskbyte;
1970 unsigned long val1, val2, mask;
1971 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1972 struct etmv4_config *config = &drvdata->config;
1973 int nr_inputs;
1974
1975 /*
1976 * Don't use contextID tracing if coming from a PID namespace. See
1977 * comment in ctxid_pid_store().
1978 */
1979 if (task_active_pid_ns(current) != &init_pid_ns)
1980 return -EINVAL;
1981
1982 /*
1983 * only implemented when ctxid tracing is enabled, i.e. at least one
1984 * ctxid comparator is implemented and ctxid is greater than 0 bits
1985 * in length
1986 */
1987 if (!drvdata->ctxid_size || !drvdata->numcidc)
1988 return -EINVAL;
1989 /* one mask if <= 4 comparators, two for up to 8 */
1990 nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2);
1991 if ((drvdata->numcidc > 4) && (nr_inputs != 2))
1992 return -EINVAL;
1993
1994 spin_lock(&drvdata->spinlock);
1995 /*
1996 * each byte[0..3] controls mask value applied to ctxid
1997 * comparator[0..3]
1998 */
1999 switch (drvdata->numcidc) {
2000 case 0x1:
2001 /* COMP0, bits[7:0] */
2002 config->ctxid_mask0 = val1 & 0xFF;
2003 break;
2004 case 0x2:
2005 /* COMP1, bits[15:8] */
2006 config->ctxid_mask0 = val1 & 0xFFFF;
2007 break;
2008 case 0x3:
2009 /* COMP2, bits[23:16] */
2010 config->ctxid_mask0 = val1 & 0xFFFFFF;
2011 break;
2012 case 0x4:
2013 /* COMP3, bits[31:24] */
2014 config->ctxid_mask0 = val1;
2015 break;
2016 case 0x5:
2017 /* COMP4, bits[7:0] */
2018 config->ctxid_mask0 = val1;
2019 config->ctxid_mask1 = val2 & 0xFF;
2020 break;
2021 case 0x6:
2022 /* COMP5, bits[15:8] */
2023 config->ctxid_mask0 = val1;
2024 config->ctxid_mask1 = val2 & 0xFFFF;
2025 break;
2026 case 0x7:
2027 /* COMP6, bits[23:16] */
2028 config->ctxid_mask0 = val1;
2029 config->ctxid_mask1 = val2 & 0xFFFFFF;
2030 break;
2031 case 0x8:
2032 /* COMP7, bits[31:24] */
2033 config->ctxid_mask0 = val1;
2034 config->ctxid_mask1 = val2;
2035 break;
2036 default:
2037 break;
2038 }
2039 /*
2040 * If software sets a mask bit to 1, it must program relevant byte
2041 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
2042 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
2043 * of ctxid comparator0 value (corresponding to byte 0) register.
2044 */
2045 mask = config->ctxid_mask0;
2046 for (i = 0; i < drvdata->numcidc; i++) {
2047 /* mask value of corresponding ctxid comparator */
2048 maskbyte = mask & ETMv4_EVENT_MASK;
2049 /*
2050 * each bit corresponds to a byte of respective ctxid comparator
2051 * value register
2052 */
2053 for (j = 0; j < 8; j++) {
2054 if (maskbyte & 1)
2055 config->ctxid_pid[i] &= ~(0xFFUL << (j * 8));
2056 maskbyte >>= 1;
2057 }
2058 /* Select the next ctxid comparator mask value */
2059 if (i == 3)
2060 /* ctxid comparators[4-7] */
2061 mask = config->ctxid_mask1;
2062 else
2063 mask >>= 0x8;
2064 }
2065
2066 spin_unlock(&drvdata->spinlock);
2067 return size;
2068 }
2069 static DEVICE_ATTR_RW(ctxid_masks);
2070
vmid_idx_show(struct device * dev,struct device_attribute * attr,char * buf)2071 static ssize_t vmid_idx_show(struct device *dev,
2072 struct device_attribute *attr,
2073 char *buf)
2074 {
2075 unsigned long val;
2076 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2077 struct etmv4_config *config = &drvdata->config;
2078
2079 val = config->vmid_idx;
2080 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
2081 }
2082
vmid_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)2083 static ssize_t vmid_idx_store(struct device *dev,
2084 struct device_attribute *attr,
2085 const char *buf, size_t size)
2086 {
2087 unsigned long val;
2088 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2089 struct etmv4_config *config = &drvdata->config;
2090
2091 if (kstrtoul(buf, 16, &val))
2092 return -EINVAL;
2093 if (val >= drvdata->numvmidc)
2094 return -EINVAL;
2095
2096 /*
2097 * Use spinlock to ensure index doesn't change while it gets
2098 * dereferenced multiple times within a spinlock block elsewhere.
2099 */
2100 spin_lock(&drvdata->spinlock);
2101 config->vmid_idx = val;
2102 spin_unlock(&drvdata->spinlock);
2103 return size;
2104 }
2105 static DEVICE_ATTR_RW(vmid_idx);
2106
vmid_val_show(struct device * dev,struct device_attribute * attr,char * buf)2107 static ssize_t vmid_val_show(struct device *dev,
2108 struct device_attribute *attr,
2109 char *buf)
2110 {
2111 unsigned long val;
2112 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2113 struct etmv4_config *config = &drvdata->config;
2114
2115 val = (unsigned long)config->vmid_val[config->vmid_idx];
2116 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
2117 }
2118
vmid_val_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)2119 static ssize_t vmid_val_store(struct device *dev,
2120 struct device_attribute *attr,
2121 const char *buf, size_t size)
2122 {
2123 unsigned long val;
2124 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2125 struct etmv4_config *config = &drvdata->config;
2126
2127 /*
2128 * only implemented when vmid tracing is enabled, i.e. at least one
2129 * vmid comparator is implemented and at least 8 bit vmid size
2130 */
2131 if (!drvdata->vmid_size || !drvdata->numvmidc)
2132 return -EINVAL;
2133 if (kstrtoul(buf, 16, &val))
2134 return -EINVAL;
2135
2136 spin_lock(&drvdata->spinlock);
2137 config->vmid_val[config->vmid_idx] = (u64)val;
2138 spin_unlock(&drvdata->spinlock);
2139 return size;
2140 }
2141 static DEVICE_ATTR_RW(vmid_val);
2142
vmid_masks_show(struct device * dev,struct device_attribute * attr,char * buf)2143 static ssize_t vmid_masks_show(struct device *dev,
2144 struct device_attribute *attr, char *buf)
2145 {
2146 unsigned long val1, val2;
2147 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2148 struct etmv4_config *config = &drvdata->config;
2149
2150 spin_lock(&drvdata->spinlock);
2151 val1 = config->vmid_mask0;
2152 val2 = config->vmid_mask1;
2153 spin_unlock(&drvdata->spinlock);
2154 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
2155 }
2156
vmid_masks_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)2157 static ssize_t vmid_masks_store(struct device *dev,
2158 struct device_attribute *attr,
2159 const char *buf, size_t size)
2160 {
2161 u8 i, j, maskbyte;
2162 unsigned long val1, val2, mask;
2163 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2164 struct etmv4_config *config = &drvdata->config;
2165 int nr_inputs;
2166
2167 /*
2168 * only implemented when vmid tracing is enabled, i.e. at least one
2169 * vmid comparator is implemented and at least 8 bit vmid size
2170 */
2171 if (!drvdata->vmid_size || !drvdata->numvmidc)
2172 return -EINVAL;
2173 /* one mask if <= 4 comparators, two for up to 8 */
2174 nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2);
2175 if ((drvdata->numvmidc > 4) && (nr_inputs != 2))
2176 return -EINVAL;
2177
2178 spin_lock(&drvdata->spinlock);
2179
2180 /*
2181 * each byte[0..3] controls mask value applied to vmid
2182 * comparator[0..3]
2183 */
2184 switch (drvdata->numvmidc) {
2185 case 0x1:
2186 /* COMP0, bits[7:0] */
2187 config->vmid_mask0 = val1 & 0xFF;
2188 break;
2189 case 0x2:
2190 /* COMP1, bits[15:8] */
2191 config->vmid_mask0 = val1 & 0xFFFF;
2192 break;
2193 case 0x3:
2194 /* COMP2, bits[23:16] */
2195 config->vmid_mask0 = val1 & 0xFFFFFF;
2196 break;
2197 case 0x4:
2198 /* COMP3, bits[31:24] */
2199 config->vmid_mask0 = val1;
2200 break;
2201 case 0x5:
2202 /* COMP4, bits[7:0] */
2203 config->vmid_mask0 = val1;
2204 config->vmid_mask1 = val2 & 0xFF;
2205 break;
2206 case 0x6:
2207 /* COMP5, bits[15:8] */
2208 config->vmid_mask0 = val1;
2209 config->vmid_mask1 = val2 & 0xFFFF;
2210 break;
2211 case 0x7:
2212 /* COMP6, bits[23:16] */
2213 config->vmid_mask0 = val1;
2214 config->vmid_mask1 = val2 & 0xFFFFFF;
2215 break;
2216 case 0x8:
2217 /* COMP7, bits[31:24] */
2218 config->vmid_mask0 = val1;
2219 config->vmid_mask1 = val2;
2220 break;
2221 default:
2222 break;
2223 }
2224
2225 /*
2226 * If software sets a mask bit to 1, it must program relevant byte
2227 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
2228 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
2229 * of vmid comparator0 value (corresponding to byte 0) register.
2230 */
2231 mask = config->vmid_mask0;
2232 for (i = 0; i < drvdata->numvmidc; i++) {
2233 /* mask value of corresponding vmid comparator */
2234 maskbyte = mask & ETMv4_EVENT_MASK;
2235 /*
2236 * each bit corresponds to a byte of respective vmid comparator
2237 * value register
2238 */
2239 for (j = 0; j < 8; j++) {
2240 if (maskbyte & 1)
2241 config->vmid_val[i] &= ~(0xFFUL << (j * 8));
2242 maskbyte >>= 1;
2243 }
2244 /* Select the next vmid comparator mask value */
2245 if (i == 3)
2246 /* vmid comparators[4-7] */
2247 mask = config->vmid_mask1;
2248 else
2249 mask >>= 0x8;
2250 }
2251 spin_unlock(&drvdata->spinlock);
2252 return size;
2253 }
2254 static DEVICE_ATTR_RW(vmid_masks);
2255
cpu_show(struct device * dev,struct device_attribute * attr,char * buf)2256 static ssize_t cpu_show(struct device *dev,
2257 struct device_attribute *attr, char *buf)
2258 {
2259 int val;
2260 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2261
2262 val = drvdata->cpu;
2263 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
2264
2265 }
2266 static DEVICE_ATTR_RO(cpu);
2267
2268 static struct attribute *coresight_etmv4_attrs[] = {
2269 &dev_attr_nr_pe_cmp.attr,
2270 &dev_attr_nr_addr_cmp.attr,
2271 &dev_attr_nr_cntr.attr,
2272 &dev_attr_nr_ext_inp.attr,
2273 &dev_attr_numcidc.attr,
2274 &dev_attr_numvmidc.attr,
2275 &dev_attr_nrseqstate.attr,
2276 &dev_attr_nr_resource.attr,
2277 &dev_attr_nr_ss_cmp.attr,
2278 &dev_attr_reset.attr,
2279 &dev_attr_mode.attr,
2280 &dev_attr_pe.attr,
2281 &dev_attr_event.attr,
2282 &dev_attr_event_instren.attr,
2283 &dev_attr_event_ts.attr,
2284 &dev_attr_syncfreq.attr,
2285 &dev_attr_cyc_threshold.attr,
2286 &dev_attr_bb_ctrl.attr,
2287 &dev_attr_event_vinst.attr,
2288 &dev_attr_s_exlevel_vinst.attr,
2289 &dev_attr_ns_exlevel_vinst.attr,
2290 &dev_attr_addr_idx.attr,
2291 &dev_attr_addr_instdatatype.attr,
2292 &dev_attr_addr_single.attr,
2293 &dev_attr_addr_range.attr,
2294 &dev_attr_addr_start.attr,
2295 &dev_attr_addr_stop.attr,
2296 &dev_attr_addr_ctxtype.attr,
2297 &dev_attr_addr_context.attr,
2298 &dev_attr_addr_exlevel_s_ns.attr,
2299 &dev_attr_addr_cmp_view.attr,
2300 &dev_attr_vinst_pe_cmp_start_stop.attr,
2301 &dev_attr_sshot_idx.attr,
2302 &dev_attr_sshot_ctrl.attr,
2303 &dev_attr_sshot_pe_ctrl.attr,
2304 &dev_attr_sshot_status.attr,
2305 &dev_attr_seq_idx.attr,
2306 &dev_attr_seq_state.attr,
2307 &dev_attr_seq_event.attr,
2308 &dev_attr_seq_reset_event.attr,
2309 &dev_attr_cntr_idx.attr,
2310 &dev_attr_cntrldvr.attr,
2311 &dev_attr_cntr_val.attr,
2312 &dev_attr_cntr_ctrl.attr,
2313 &dev_attr_res_idx.attr,
2314 &dev_attr_res_ctrl.attr,
2315 &dev_attr_ctxid_idx.attr,
2316 &dev_attr_ctxid_pid.attr,
2317 &dev_attr_ctxid_masks.attr,
2318 &dev_attr_vmid_idx.attr,
2319 &dev_attr_vmid_val.attr,
2320 &dev_attr_vmid_masks.attr,
2321 &dev_attr_cpu.attr,
2322 NULL,
2323 };
2324
2325 struct etmv4_reg {
2326 void __iomem *addr;
2327 u32 data;
2328 };
2329
do_smp_cross_read(void * data)2330 static void do_smp_cross_read(void *data)
2331 {
2332 struct etmv4_reg *reg = data;
2333
2334 reg->data = readl_relaxed(reg->addr);
2335 }
2336
etmv4_cross_read(const struct device * dev,u32 offset)2337 static u32 etmv4_cross_read(const struct device *dev, u32 offset)
2338 {
2339 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
2340 struct etmv4_reg reg;
2341
2342 reg.addr = drvdata->base + offset;
2343 /*
2344 * smp cross call ensures the CPU will be powered up before
2345 * accessing the ETMv4 trace core registers
2346 */
2347 smp_call_function_single(drvdata->cpu, do_smp_cross_read, ®, 1);
2348 return reg.data;
2349 }
2350
2351 #define coresight_etm4x_reg(name, offset) \
2352 coresight_simple_reg32(struct etmv4_drvdata, name, offset)
2353
2354 #define coresight_etm4x_cross_read(name, offset) \
2355 coresight_simple_func(struct etmv4_drvdata, etmv4_cross_read, \
2356 name, offset)
2357
2358 coresight_etm4x_reg(trcpdcr, TRCPDCR);
2359 coresight_etm4x_reg(trcpdsr, TRCPDSR);
2360 coresight_etm4x_reg(trclsr, TRCLSR);
2361 coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS);
2362 coresight_etm4x_reg(trcdevid, TRCDEVID);
2363 coresight_etm4x_reg(trcdevtype, TRCDEVTYPE);
2364 coresight_etm4x_reg(trcpidr0, TRCPIDR0);
2365 coresight_etm4x_reg(trcpidr1, TRCPIDR1);
2366 coresight_etm4x_reg(trcpidr2, TRCPIDR2);
2367 coresight_etm4x_reg(trcpidr3, TRCPIDR3);
2368 coresight_etm4x_cross_read(trcoslsr, TRCOSLSR);
2369 coresight_etm4x_cross_read(trcconfig, TRCCONFIGR);
2370 coresight_etm4x_cross_read(trctraceid, TRCTRACEIDR);
2371
2372 static struct attribute *coresight_etmv4_mgmt_attrs[] = {
2373 &dev_attr_trcoslsr.attr,
2374 &dev_attr_trcpdcr.attr,
2375 &dev_attr_trcpdsr.attr,
2376 &dev_attr_trclsr.attr,
2377 &dev_attr_trcconfig.attr,
2378 &dev_attr_trctraceid.attr,
2379 &dev_attr_trcauthstatus.attr,
2380 &dev_attr_trcdevid.attr,
2381 &dev_attr_trcdevtype.attr,
2382 &dev_attr_trcpidr0.attr,
2383 &dev_attr_trcpidr1.attr,
2384 &dev_attr_trcpidr2.attr,
2385 &dev_attr_trcpidr3.attr,
2386 NULL,
2387 };
2388
2389 coresight_etm4x_cross_read(trcidr0, TRCIDR0);
2390 coresight_etm4x_cross_read(trcidr1, TRCIDR1);
2391 coresight_etm4x_cross_read(trcidr2, TRCIDR2);
2392 coresight_etm4x_cross_read(trcidr3, TRCIDR3);
2393 coresight_etm4x_cross_read(trcidr4, TRCIDR4);
2394 coresight_etm4x_cross_read(trcidr5, TRCIDR5);
2395 /* trcidr[6,7] are reserved */
2396 coresight_etm4x_cross_read(trcidr8, TRCIDR8);
2397 coresight_etm4x_cross_read(trcidr9, TRCIDR9);
2398 coresight_etm4x_cross_read(trcidr10, TRCIDR10);
2399 coresight_etm4x_cross_read(trcidr11, TRCIDR11);
2400 coresight_etm4x_cross_read(trcidr12, TRCIDR12);
2401 coresight_etm4x_cross_read(trcidr13, TRCIDR13);
2402
2403 static struct attribute *coresight_etmv4_trcidr_attrs[] = {
2404 &dev_attr_trcidr0.attr,
2405 &dev_attr_trcidr1.attr,
2406 &dev_attr_trcidr2.attr,
2407 &dev_attr_trcidr3.attr,
2408 &dev_attr_trcidr4.attr,
2409 &dev_attr_trcidr5.attr,
2410 /* trcidr[6,7] are reserved */
2411 &dev_attr_trcidr8.attr,
2412 &dev_attr_trcidr9.attr,
2413 &dev_attr_trcidr10.attr,
2414 &dev_attr_trcidr11.attr,
2415 &dev_attr_trcidr12.attr,
2416 &dev_attr_trcidr13.attr,
2417 NULL,
2418 };
2419
2420 static const struct attribute_group coresight_etmv4_group = {
2421 .attrs = coresight_etmv4_attrs,
2422 };
2423
2424 static const struct attribute_group coresight_etmv4_mgmt_group = {
2425 .attrs = coresight_etmv4_mgmt_attrs,
2426 .name = "mgmt",
2427 };
2428
2429 static const struct attribute_group coresight_etmv4_trcidr_group = {
2430 .attrs = coresight_etmv4_trcidr_attrs,
2431 .name = "trcidr",
2432 };
2433
2434 const struct attribute_group *coresight_etmv4_groups[] = {
2435 &coresight_etmv4_group,
2436 &coresight_etmv4_mgmt_group,
2437 &coresight_etmv4_trcidr_group,
2438 NULL,
2439 };
2440