1 /*
2 * Intel Cache Quality-of-Service Monitoring (CQM) support.
3 *
4 * Based very, very heavily on work by Peter Zijlstra.
5 */
6
7 #include <linux/perf_event.h>
8 #include <linux/slab.h>
9 #include <asm/cpu_device_id.h>
10 #include "perf_event.h"
11
12 #define MSR_IA32_PQR_ASSOC 0x0c8f
13 #define MSR_IA32_QM_CTR 0x0c8e
14 #define MSR_IA32_QM_EVTSEL 0x0c8d
15
16 static u32 cqm_max_rmid = -1;
17 static unsigned int cqm_l3_scale; /* supposedly cacheline size */
18
19 /**
20 * struct intel_pqr_state - State cache for the PQR MSR
21 * @rmid: The cached Resource Monitoring ID
22 * @closid: The cached Class Of Service ID
23 * @rmid_usecnt: The usage counter for rmid
24 *
25 * The upper 32 bits of MSR_IA32_PQR_ASSOC contain closid and the
26 * lower 10 bits rmid. The update to MSR_IA32_PQR_ASSOC always
27 * contains both parts, so we need to cache them.
28 *
29 * The cache also helps to avoid pointless updates if the value does
30 * not change.
31 */
32 struct intel_pqr_state {
33 u32 rmid;
34 u32 closid;
35 int rmid_usecnt;
36 };
37
38 /*
39 * The cached intel_pqr_state is strictly per CPU and can never be
40 * updated from a remote CPU. Both functions which modify the state
41 * (intel_cqm_event_start and intel_cqm_event_stop) are called with
42 * interrupts disabled, which is sufficient for the protection.
43 */
44 static DEFINE_PER_CPU(struct intel_pqr_state, pqr_state);
45
46 /*
47 * Protects cache_cgroups and cqm_rmid_free_lru and cqm_rmid_limbo_lru.
48 * Also protects event->hw.cqm_rmid
49 *
50 * Hold either for stability, both for modification of ->hw.cqm_rmid.
51 */
52 static DEFINE_MUTEX(cache_mutex);
53 static DEFINE_RAW_SPINLOCK(cache_lock);
54
55 /*
56 * Groups of events that have the same target(s), one RMID per group.
57 */
58 static LIST_HEAD(cache_groups);
59
60 /*
61 * Mask of CPUs for reading CQM values. We only need one per-socket.
62 */
63 static cpumask_t cqm_cpumask;
64
65 #define RMID_VAL_ERROR (1ULL << 63)
66 #define RMID_VAL_UNAVAIL (1ULL << 62)
67
68 #define QOS_L3_OCCUP_EVENT_ID (1 << 0)
69
70 #define QOS_EVENT_MASK QOS_L3_OCCUP_EVENT_ID
71
72 /*
73 * This is central to the rotation algorithm in __intel_cqm_rmid_rotate().
74 *
75 * This rmid is always free and is guaranteed to have an associated
76 * near-zero occupancy value, i.e. no cachelines are tagged with this
77 * RMID, once __intel_cqm_rmid_rotate() returns.
78 */
79 static u32 intel_cqm_rotation_rmid;
80
81 #define INVALID_RMID (-1)
82
83 /*
84 * Is @rmid valid for programming the hardware?
85 *
86 * rmid 0 is reserved by the hardware for all non-monitored tasks, which
87 * means that we should never come across an rmid with that value.
88 * Likewise, an rmid value of -1 is used to indicate "no rmid currently
89 * assigned" and is used as part of the rotation code.
90 */
__rmid_valid(u32 rmid)91 static inline bool __rmid_valid(u32 rmid)
92 {
93 if (!rmid || rmid == INVALID_RMID)
94 return false;
95
96 return true;
97 }
98
__rmid_read(u32 rmid)99 static u64 __rmid_read(u32 rmid)
100 {
101 u64 val;
102
103 /*
104 * Ignore the SDM, this thing is _NOTHING_ like a regular perfcnt,
105 * it just says that to increase confusion.
106 */
107 wrmsr(MSR_IA32_QM_EVTSEL, QOS_L3_OCCUP_EVENT_ID, rmid);
108 rdmsrl(MSR_IA32_QM_CTR, val);
109
110 /*
111 * Aside from the ERROR and UNAVAIL bits, assume this thing returns
112 * the number of cachelines tagged with @rmid.
113 */
114 return val;
115 }
116
117 enum rmid_recycle_state {
118 RMID_YOUNG = 0,
119 RMID_AVAILABLE,
120 RMID_DIRTY,
121 };
122
123 struct cqm_rmid_entry {
124 u32 rmid;
125 enum rmid_recycle_state state;
126 struct list_head list;
127 unsigned long queue_time;
128 };
129
130 /*
131 * cqm_rmid_free_lru - A least recently used list of RMIDs.
132 *
133 * Oldest entry at the head, newest (most recently used) entry at the
134 * tail. This list is never traversed, it's only used to keep track of
135 * the lru order. That is, we only pick entries of the head or insert
136 * them on the tail.
137 *
138 * All entries on the list are 'free', and their RMIDs are not currently
139 * in use. To mark an RMID as in use, remove its entry from the lru
140 * list.
141 *
142 *
143 * cqm_rmid_limbo_lru - list of currently unused but (potentially) dirty RMIDs.
144 *
145 * This list is contains RMIDs that no one is currently using but that
146 * may have a non-zero occupancy value associated with them. The
147 * rotation worker moves RMIDs from the limbo list to the free list once
148 * the occupancy value drops below __intel_cqm_threshold.
149 *
150 * Both lists are protected by cache_mutex.
151 */
152 static LIST_HEAD(cqm_rmid_free_lru);
153 static LIST_HEAD(cqm_rmid_limbo_lru);
154
155 /*
156 * We use a simple array of pointers so that we can lookup a struct
157 * cqm_rmid_entry in O(1). This alleviates the callers of __get_rmid()
158 * and __put_rmid() from having to worry about dealing with struct
159 * cqm_rmid_entry - they just deal with rmids, i.e. integers.
160 *
161 * Once this array is initialized it is read-only. No locks are required
162 * to access it.
163 *
164 * All entries for all RMIDs can be looked up in the this array at all
165 * times.
166 */
167 static struct cqm_rmid_entry **cqm_rmid_ptrs;
168
__rmid_entry(u32 rmid)169 static inline struct cqm_rmid_entry *__rmid_entry(u32 rmid)
170 {
171 struct cqm_rmid_entry *entry;
172
173 entry = cqm_rmid_ptrs[rmid];
174 WARN_ON(entry->rmid != rmid);
175
176 return entry;
177 }
178
179 /*
180 * Returns < 0 on fail.
181 *
182 * We expect to be called with cache_mutex held.
183 */
__get_rmid(void)184 static u32 __get_rmid(void)
185 {
186 struct cqm_rmid_entry *entry;
187
188 lockdep_assert_held(&cache_mutex);
189
190 if (list_empty(&cqm_rmid_free_lru))
191 return INVALID_RMID;
192
193 entry = list_first_entry(&cqm_rmid_free_lru, struct cqm_rmid_entry, list);
194 list_del(&entry->list);
195
196 return entry->rmid;
197 }
198
__put_rmid(u32 rmid)199 static void __put_rmid(u32 rmid)
200 {
201 struct cqm_rmid_entry *entry;
202
203 lockdep_assert_held(&cache_mutex);
204
205 WARN_ON(!__rmid_valid(rmid));
206 entry = __rmid_entry(rmid);
207
208 entry->queue_time = jiffies;
209 entry->state = RMID_YOUNG;
210
211 list_add_tail(&entry->list, &cqm_rmid_limbo_lru);
212 }
213
cqm_cleanup(void)214 static void cqm_cleanup(void)
215 {
216 int i;
217
218 if (!cqm_rmid_ptrs)
219 return;
220
221 for (i = 0; i < cqm_max_rmid; i++)
222 kfree(cqm_rmid_ptrs[i]);
223
224 kfree(cqm_rmid_ptrs);
225 cqm_rmid_ptrs = NULL;
226 }
227
intel_cqm_setup_rmid_cache(void)228 static int intel_cqm_setup_rmid_cache(void)
229 {
230 struct cqm_rmid_entry *entry;
231 unsigned int nr_rmids;
232 int r = 0;
233
234 nr_rmids = cqm_max_rmid + 1;
235 cqm_rmid_ptrs = kzalloc(sizeof(struct cqm_rmid_entry *) *
236 nr_rmids, GFP_KERNEL);
237 if (!cqm_rmid_ptrs)
238 return -ENOMEM;
239
240 for (; r <= cqm_max_rmid; r++) {
241 struct cqm_rmid_entry *entry;
242
243 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
244 if (!entry)
245 goto fail;
246
247 INIT_LIST_HEAD(&entry->list);
248 entry->rmid = r;
249 cqm_rmid_ptrs[r] = entry;
250
251 list_add_tail(&entry->list, &cqm_rmid_free_lru);
252 }
253
254 /*
255 * RMID 0 is special and is always allocated. It's used for all
256 * tasks that are not monitored.
257 */
258 entry = __rmid_entry(0);
259 list_del(&entry->list);
260
261 mutex_lock(&cache_mutex);
262 intel_cqm_rotation_rmid = __get_rmid();
263 mutex_unlock(&cache_mutex);
264
265 return 0;
266
267 fail:
268 cqm_cleanup();
269 return -ENOMEM;
270 }
271
272 /*
273 * Determine if @a and @b measure the same set of tasks.
274 *
275 * If @a and @b measure the same set of tasks then we want to share a
276 * single RMID.
277 */
__match_event(struct perf_event * a,struct perf_event * b)278 static bool __match_event(struct perf_event *a, struct perf_event *b)
279 {
280 /* Per-cpu and task events don't mix */
281 if ((a->attach_state & PERF_ATTACH_TASK) !=
282 (b->attach_state & PERF_ATTACH_TASK))
283 return false;
284
285 #ifdef CONFIG_CGROUP_PERF
286 if (a->cgrp != b->cgrp)
287 return false;
288 #endif
289
290 /* If not task event, we're machine wide */
291 if (!(b->attach_state & PERF_ATTACH_TASK))
292 return true;
293
294 /*
295 * Events that target same task are placed into the same cache group.
296 * Mark it as a multi event group, so that we update ->count
297 * for every event rather than just the group leader later.
298 */
299 if (a->hw.target == b->hw.target) {
300 b->hw.is_group_event = true;
301 return true;
302 }
303
304 /*
305 * Are we an inherited event?
306 */
307 if (b->parent == a)
308 return true;
309
310 return false;
311 }
312
313 #ifdef CONFIG_CGROUP_PERF
event_to_cgroup(struct perf_event * event)314 static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event)
315 {
316 if (event->attach_state & PERF_ATTACH_TASK)
317 return perf_cgroup_from_task(event->hw.target, event->ctx);
318
319 return event->cgrp;
320 }
321 #endif
322
323 /*
324 * Determine if @a's tasks intersect with @b's tasks
325 *
326 * There are combinations of events that we explicitly prohibit,
327 *
328 * PROHIBITS
329 * system-wide -> cgroup and task
330 * cgroup -> system-wide
331 * -> task in cgroup
332 * task -> system-wide
333 * -> task in cgroup
334 *
335 * Call this function before allocating an RMID.
336 */
__conflict_event(struct perf_event * a,struct perf_event * b)337 static bool __conflict_event(struct perf_event *a, struct perf_event *b)
338 {
339 #ifdef CONFIG_CGROUP_PERF
340 /*
341 * We can have any number of cgroups but only one system-wide
342 * event at a time.
343 */
344 if (a->cgrp && b->cgrp) {
345 struct perf_cgroup *ac = a->cgrp;
346 struct perf_cgroup *bc = b->cgrp;
347
348 /*
349 * This condition should have been caught in
350 * __match_event() and we should be sharing an RMID.
351 */
352 WARN_ON_ONCE(ac == bc);
353
354 if (cgroup_is_descendant(ac->css.cgroup, bc->css.cgroup) ||
355 cgroup_is_descendant(bc->css.cgroup, ac->css.cgroup))
356 return true;
357
358 return false;
359 }
360
361 if (a->cgrp || b->cgrp) {
362 struct perf_cgroup *ac, *bc;
363
364 /*
365 * cgroup and system-wide events are mutually exclusive
366 */
367 if ((a->cgrp && !(b->attach_state & PERF_ATTACH_TASK)) ||
368 (b->cgrp && !(a->attach_state & PERF_ATTACH_TASK)))
369 return true;
370
371 /*
372 * Ensure neither event is part of the other's cgroup
373 */
374 ac = event_to_cgroup(a);
375 bc = event_to_cgroup(b);
376 if (ac == bc)
377 return true;
378
379 /*
380 * Must have cgroup and non-intersecting task events.
381 */
382 if (!ac || !bc)
383 return false;
384
385 /*
386 * We have cgroup and task events, and the task belongs
387 * to a cgroup. Check for for overlap.
388 */
389 if (cgroup_is_descendant(ac->css.cgroup, bc->css.cgroup) ||
390 cgroup_is_descendant(bc->css.cgroup, ac->css.cgroup))
391 return true;
392
393 return false;
394 }
395 #endif
396 /*
397 * If one of them is not a task, same story as above with cgroups.
398 */
399 if (!(a->attach_state & PERF_ATTACH_TASK) ||
400 !(b->attach_state & PERF_ATTACH_TASK))
401 return true;
402
403 /*
404 * Must be non-overlapping.
405 */
406 return false;
407 }
408
409 struct rmid_read {
410 u32 rmid;
411 atomic64_t value;
412 };
413
414 static void __intel_cqm_event_count(void *info);
415
416 /*
417 * Exchange the RMID of a group of events.
418 */
intel_cqm_xchg_rmid(struct perf_event * group,u32 rmid)419 static u32 intel_cqm_xchg_rmid(struct perf_event *group, u32 rmid)
420 {
421 struct perf_event *event;
422 struct list_head *head = &group->hw.cqm_group_entry;
423 u32 old_rmid = group->hw.cqm_rmid;
424
425 lockdep_assert_held(&cache_mutex);
426
427 /*
428 * If our RMID is being deallocated, perform a read now.
429 */
430 if (__rmid_valid(old_rmid) && !__rmid_valid(rmid)) {
431 struct rmid_read rr = {
432 .value = ATOMIC64_INIT(0),
433 .rmid = old_rmid,
434 };
435
436 on_each_cpu_mask(&cqm_cpumask, __intel_cqm_event_count,
437 &rr, 1);
438 local64_set(&group->count, atomic64_read(&rr.value));
439 }
440
441 raw_spin_lock_irq(&cache_lock);
442
443 group->hw.cqm_rmid = rmid;
444 list_for_each_entry(event, head, hw.cqm_group_entry)
445 event->hw.cqm_rmid = rmid;
446
447 raw_spin_unlock_irq(&cache_lock);
448
449 return old_rmid;
450 }
451
452 /*
453 * If we fail to assign a new RMID for intel_cqm_rotation_rmid because
454 * cachelines are still tagged with RMIDs in limbo, we progressively
455 * increment the threshold until we find an RMID in limbo with <=
456 * __intel_cqm_threshold lines tagged. This is designed to mitigate the
457 * problem where cachelines tagged with an RMID are not steadily being
458 * evicted.
459 *
460 * On successful rotations we decrease the threshold back towards zero.
461 *
462 * __intel_cqm_max_threshold provides an upper bound on the threshold,
463 * and is measured in bytes because it's exposed to userland.
464 */
465 static unsigned int __intel_cqm_threshold;
466 static unsigned int __intel_cqm_max_threshold;
467
468 /*
469 * Test whether an RMID has a zero occupancy value on this cpu.
470 */
intel_cqm_stable(void * arg)471 static void intel_cqm_stable(void *arg)
472 {
473 struct cqm_rmid_entry *entry;
474
475 list_for_each_entry(entry, &cqm_rmid_limbo_lru, list) {
476 if (entry->state != RMID_AVAILABLE)
477 break;
478
479 if (__rmid_read(entry->rmid) > __intel_cqm_threshold)
480 entry->state = RMID_DIRTY;
481 }
482 }
483
484 /*
485 * If we have group events waiting for an RMID that don't conflict with
486 * events already running, assign @rmid.
487 */
intel_cqm_sched_in_event(u32 rmid)488 static bool intel_cqm_sched_in_event(u32 rmid)
489 {
490 struct perf_event *leader, *event;
491
492 lockdep_assert_held(&cache_mutex);
493
494 leader = list_first_entry(&cache_groups, struct perf_event,
495 hw.cqm_groups_entry);
496 event = leader;
497
498 list_for_each_entry_continue(event, &cache_groups,
499 hw.cqm_groups_entry) {
500 if (__rmid_valid(event->hw.cqm_rmid))
501 continue;
502
503 if (__conflict_event(event, leader))
504 continue;
505
506 intel_cqm_xchg_rmid(event, rmid);
507 return true;
508 }
509
510 return false;
511 }
512
513 /*
514 * Initially use this constant for both the limbo queue time and the
515 * rotation timer interval, pmu::hrtimer_interval_ms.
516 *
517 * They don't need to be the same, but the two are related since if you
518 * rotate faster than you recycle RMIDs, you may run out of available
519 * RMIDs.
520 */
521 #define RMID_DEFAULT_QUEUE_TIME 250 /* ms */
522
523 static unsigned int __rmid_queue_time_ms = RMID_DEFAULT_QUEUE_TIME;
524
525 /*
526 * intel_cqm_rmid_stabilize - move RMIDs from limbo to free list
527 * @nr_available: number of freeable RMIDs on the limbo list
528 *
529 * Quiescent state; wait for all 'freed' RMIDs to become unused, i.e. no
530 * cachelines are tagged with those RMIDs. After this we can reuse them
531 * and know that the current set of active RMIDs is stable.
532 *
533 * Return %true or %false depending on whether stabilization needs to be
534 * reattempted.
535 *
536 * If we return %true then @nr_available is updated to indicate the
537 * number of RMIDs on the limbo list that have been queued for the
538 * minimum queue time (RMID_AVAILABLE), but whose data occupancy values
539 * are above __intel_cqm_threshold.
540 */
intel_cqm_rmid_stabilize(unsigned int * available)541 static bool intel_cqm_rmid_stabilize(unsigned int *available)
542 {
543 struct cqm_rmid_entry *entry, *tmp;
544
545 lockdep_assert_held(&cache_mutex);
546
547 *available = 0;
548 list_for_each_entry(entry, &cqm_rmid_limbo_lru, list) {
549 unsigned long min_queue_time;
550 unsigned long now = jiffies;
551
552 /*
553 * We hold RMIDs placed into limbo for a minimum queue
554 * time. Before the minimum queue time has elapsed we do
555 * not recycle RMIDs.
556 *
557 * The reasoning is that until a sufficient time has
558 * passed since we stopped using an RMID, any RMID
559 * placed onto the limbo list will likely still have
560 * data tagged in the cache, which means we'll probably
561 * fail to recycle it anyway.
562 *
563 * We can save ourselves an expensive IPI by skipping
564 * any RMIDs that have not been queued for the minimum
565 * time.
566 */
567 min_queue_time = entry->queue_time +
568 msecs_to_jiffies(__rmid_queue_time_ms);
569
570 if (time_after(min_queue_time, now))
571 break;
572
573 entry->state = RMID_AVAILABLE;
574 (*available)++;
575 }
576
577 /*
578 * Fast return if none of the RMIDs on the limbo list have been
579 * sitting on the queue for the minimum queue time.
580 */
581 if (!*available)
582 return false;
583
584 /*
585 * Test whether an RMID is free for each package.
586 */
587 on_each_cpu_mask(&cqm_cpumask, intel_cqm_stable, NULL, true);
588
589 list_for_each_entry_safe(entry, tmp, &cqm_rmid_limbo_lru, list) {
590 /*
591 * Exhausted all RMIDs that have waited min queue time.
592 */
593 if (entry->state == RMID_YOUNG)
594 break;
595
596 if (entry->state == RMID_DIRTY)
597 continue;
598
599 list_del(&entry->list); /* remove from limbo */
600
601 /*
602 * The rotation RMID gets priority if it's
603 * currently invalid. In which case, skip adding
604 * the RMID to the the free lru.
605 */
606 if (!__rmid_valid(intel_cqm_rotation_rmid)) {
607 intel_cqm_rotation_rmid = entry->rmid;
608 continue;
609 }
610
611 /*
612 * If we have groups waiting for RMIDs, hand
613 * them one now provided they don't conflict.
614 */
615 if (intel_cqm_sched_in_event(entry->rmid))
616 continue;
617
618 /*
619 * Otherwise place it onto the free list.
620 */
621 list_add_tail(&entry->list, &cqm_rmid_free_lru);
622 }
623
624
625 return __rmid_valid(intel_cqm_rotation_rmid);
626 }
627
628 /*
629 * Pick a victim group and move it to the tail of the group list.
630 * @next: The first group without an RMID
631 */
__intel_cqm_pick_and_rotate(struct perf_event * next)632 static void __intel_cqm_pick_and_rotate(struct perf_event *next)
633 {
634 struct perf_event *rotor;
635 u32 rmid;
636
637 lockdep_assert_held(&cache_mutex);
638
639 rotor = list_first_entry(&cache_groups, struct perf_event,
640 hw.cqm_groups_entry);
641
642 /*
643 * The group at the front of the list should always have a valid
644 * RMID. If it doesn't then no groups have RMIDs assigned and we
645 * don't need to rotate the list.
646 */
647 if (next == rotor)
648 return;
649
650 rmid = intel_cqm_xchg_rmid(rotor, INVALID_RMID);
651 __put_rmid(rmid);
652
653 list_rotate_left(&cache_groups);
654 }
655
656 /*
657 * Deallocate the RMIDs from any events that conflict with @event, and
658 * place them on the back of the group list.
659 */
intel_cqm_sched_out_conflicting_events(struct perf_event * event)660 static void intel_cqm_sched_out_conflicting_events(struct perf_event *event)
661 {
662 struct perf_event *group, *g;
663 u32 rmid;
664
665 lockdep_assert_held(&cache_mutex);
666
667 list_for_each_entry_safe(group, g, &cache_groups, hw.cqm_groups_entry) {
668 if (group == event)
669 continue;
670
671 rmid = group->hw.cqm_rmid;
672
673 /*
674 * Skip events that don't have a valid RMID.
675 */
676 if (!__rmid_valid(rmid))
677 continue;
678
679 /*
680 * No conflict? No problem! Leave the event alone.
681 */
682 if (!__conflict_event(group, event))
683 continue;
684
685 intel_cqm_xchg_rmid(group, INVALID_RMID);
686 __put_rmid(rmid);
687 }
688 }
689
690 /*
691 * Attempt to rotate the groups and assign new RMIDs.
692 *
693 * We rotate for two reasons,
694 * 1. To handle the scheduling of conflicting events
695 * 2. To recycle RMIDs
696 *
697 * Rotating RMIDs is complicated because the hardware doesn't give us
698 * any clues.
699 *
700 * There's problems with the hardware interface; when you change the
701 * task:RMID map cachelines retain their 'old' tags, giving a skewed
702 * picture. In order to work around this, we must always keep one free
703 * RMID - intel_cqm_rotation_rmid.
704 *
705 * Rotation works by taking away an RMID from a group (the old RMID),
706 * and assigning the free RMID to another group (the new RMID). We must
707 * then wait for the old RMID to not be used (no cachelines tagged).
708 * This ensure that all cachelines are tagged with 'active' RMIDs. At
709 * this point we can start reading values for the new RMID and treat the
710 * old RMID as the free RMID for the next rotation.
711 *
712 * Return %true or %false depending on whether we did any rotating.
713 */
__intel_cqm_rmid_rotate(void)714 static bool __intel_cqm_rmid_rotate(void)
715 {
716 struct perf_event *group, *start = NULL;
717 unsigned int threshold_limit;
718 unsigned int nr_needed = 0;
719 unsigned int nr_available;
720 bool rotated = false;
721
722 mutex_lock(&cache_mutex);
723
724 again:
725 /*
726 * Fast path through this function if there are no groups and no
727 * RMIDs that need cleaning.
728 */
729 if (list_empty(&cache_groups) && list_empty(&cqm_rmid_limbo_lru))
730 goto out;
731
732 list_for_each_entry(group, &cache_groups, hw.cqm_groups_entry) {
733 if (!__rmid_valid(group->hw.cqm_rmid)) {
734 if (!start)
735 start = group;
736 nr_needed++;
737 }
738 }
739
740 /*
741 * We have some event groups, but they all have RMIDs assigned
742 * and no RMIDs need cleaning.
743 */
744 if (!nr_needed && list_empty(&cqm_rmid_limbo_lru))
745 goto out;
746
747 if (!nr_needed)
748 goto stabilize;
749
750 /*
751 * We have more event groups without RMIDs than available RMIDs,
752 * or we have event groups that conflict with the ones currently
753 * scheduled.
754 *
755 * We force deallocate the rmid of the group at the head of
756 * cache_groups. The first event group without an RMID then gets
757 * assigned intel_cqm_rotation_rmid. This ensures we always make
758 * forward progress.
759 *
760 * Rotate the cache_groups list so the previous head is now the
761 * tail.
762 */
763 __intel_cqm_pick_and_rotate(start);
764
765 /*
766 * If the rotation is going to succeed, reduce the threshold so
767 * that we don't needlessly reuse dirty RMIDs.
768 */
769 if (__rmid_valid(intel_cqm_rotation_rmid)) {
770 intel_cqm_xchg_rmid(start, intel_cqm_rotation_rmid);
771 intel_cqm_rotation_rmid = __get_rmid();
772
773 intel_cqm_sched_out_conflicting_events(start);
774
775 if (__intel_cqm_threshold)
776 __intel_cqm_threshold--;
777 }
778
779 rotated = true;
780
781 stabilize:
782 /*
783 * We now need to stablize the RMID we freed above (if any) to
784 * ensure that the next time we rotate we have an RMID with zero
785 * occupancy value.
786 *
787 * Alternatively, if we didn't need to perform any rotation,
788 * we'll have a bunch of RMIDs in limbo that need stabilizing.
789 */
790 threshold_limit = __intel_cqm_max_threshold / cqm_l3_scale;
791
792 while (intel_cqm_rmid_stabilize(&nr_available) &&
793 __intel_cqm_threshold < threshold_limit) {
794 unsigned int steal_limit;
795
796 /*
797 * Don't spin if nobody is actively waiting for an RMID,
798 * the rotation worker will be kicked as soon as an
799 * event needs an RMID anyway.
800 */
801 if (!nr_needed)
802 break;
803
804 /* Allow max 25% of RMIDs to be in limbo. */
805 steal_limit = (cqm_max_rmid + 1) / 4;
806
807 /*
808 * We failed to stabilize any RMIDs so our rotation
809 * logic is now stuck. In order to make forward progress
810 * we have a few options:
811 *
812 * 1. rotate ("steal") another RMID
813 * 2. increase the threshold
814 * 3. do nothing
815 *
816 * We do both of 1. and 2. until we hit the steal limit.
817 *
818 * The steal limit prevents all RMIDs ending up on the
819 * limbo list. This can happen if every RMID has a
820 * non-zero occupancy above threshold_limit, and the
821 * occupancy values aren't dropping fast enough.
822 *
823 * Note that there is prioritisation at work here - we'd
824 * rather increase the number of RMIDs on the limbo list
825 * than increase the threshold, because increasing the
826 * threshold skews the event data (because we reuse
827 * dirty RMIDs) - threshold bumps are a last resort.
828 */
829 if (nr_available < steal_limit)
830 goto again;
831
832 __intel_cqm_threshold++;
833 }
834
835 out:
836 mutex_unlock(&cache_mutex);
837 return rotated;
838 }
839
840 static void intel_cqm_rmid_rotate(struct work_struct *work);
841
842 static DECLARE_DELAYED_WORK(intel_cqm_rmid_work, intel_cqm_rmid_rotate);
843
844 static struct pmu intel_cqm_pmu;
845
intel_cqm_rmid_rotate(struct work_struct * work)846 static void intel_cqm_rmid_rotate(struct work_struct *work)
847 {
848 unsigned long delay;
849
850 __intel_cqm_rmid_rotate();
851
852 delay = msecs_to_jiffies(intel_cqm_pmu.hrtimer_interval_ms);
853 schedule_delayed_work(&intel_cqm_rmid_work, delay);
854 }
855
856 /*
857 * Find a group and setup RMID.
858 *
859 * If we're part of a group, we use the group's RMID.
860 */
intel_cqm_setup_event(struct perf_event * event,struct perf_event ** group)861 static void intel_cqm_setup_event(struct perf_event *event,
862 struct perf_event **group)
863 {
864 struct perf_event *iter;
865 bool conflict = false;
866 u32 rmid;
867
868 event->hw.is_group_event = false;
869 list_for_each_entry(iter, &cache_groups, hw.cqm_groups_entry) {
870 rmid = iter->hw.cqm_rmid;
871
872 if (__match_event(iter, event)) {
873 /* All tasks in a group share an RMID */
874 event->hw.cqm_rmid = rmid;
875 *group = iter;
876 return;
877 }
878
879 /*
880 * We only care about conflicts for events that are
881 * actually scheduled in (and hence have a valid RMID).
882 */
883 if (__conflict_event(iter, event) && __rmid_valid(rmid))
884 conflict = true;
885 }
886
887 if (conflict)
888 rmid = INVALID_RMID;
889 else
890 rmid = __get_rmid();
891
892 event->hw.cqm_rmid = rmid;
893 }
894
intel_cqm_event_read(struct perf_event * event)895 static void intel_cqm_event_read(struct perf_event *event)
896 {
897 unsigned long flags;
898 u32 rmid;
899 u64 val;
900
901 /*
902 * Task events are handled by intel_cqm_event_count().
903 */
904 if (event->cpu == -1)
905 return;
906
907 raw_spin_lock_irqsave(&cache_lock, flags);
908 rmid = event->hw.cqm_rmid;
909
910 if (!__rmid_valid(rmid))
911 goto out;
912
913 val = __rmid_read(rmid);
914
915 /*
916 * Ignore this reading on error states and do not update the value.
917 */
918 if (val & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL))
919 goto out;
920
921 local64_set(&event->count, val);
922 out:
923 raw_spin_unlock_irqrestore(&cache_lock, flags);
924 }
925
__intel_cqm_event_count(void * info)926 static void __intel_cqm_event_count(void *info)
927 {
928 struct rmid_read *rr = info;
929 u64 val;
930
931 val = __rmid_read(rr->rmid);
932
933 if (val & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL))
934 return;
935
936 atomic64_add(val, &rr->value);
937 }
938
cqm_group_leader(struct perf_event * event)939 static inline bool cqm_group_leader(struct perf_event *event)
940 {
941 return !list_empty(&event->hw.cqm_groups_entry);
942 }
943
intel_cqm_event_count(struct perf_event * event)944 static u64 intel_cqm_event_count(struct perf_event *event)
945 {
946 unsigned long flags;
947 struct rmid_read rr = {
948 .value = ATOMIC64_INIT(0),
949 };
950
951 /*
952 * We only need to worry about task events. System-wide events
953 * are handled like usual, i.e. entirely with
954 * intel_cqm_event_read().
955 */
956 if (event->cpu != -1)
957 return __perf_event_count(event);
958
959 /*
960 * Only the group leader gets to report values except in case of
961 * multiple events in the same group, we still need to read the
962 * other events.This stops us
963 * reporting duplicate values to userspace, and gives us a clear
964 * rule for which task gets to report the values.
965 *
966 * Note that it is impossible to attribute these values to
967 * specific packages - we forfeit that ability when we create
968 * task events.
969 */
970 if (!cqm_group_leader(event) && !event->hw.is_group_event)
971 return 0;
972
973 /*
974 * Getting up-to-date values requires an SMP IPI which is not
975 * possible if we're being called in interrupt context. Return
976 * the cached values instead.
977 */
978 if (unlikely(in_interrupt()))
979 goto out;
980
981 /*
982 * Notice that we don't perform the reading of an RMID
983 * atomically, because we can't hold a spin lock across the
984 * IPIs.
985 *
986 * Speculatively perform the read, since @event might be
987 * assigned a different (possibly invalid) RMID while we're
988 * busying performing the IPI calls. It's therefore necessary to
989 * check @event's RMID afterwards, and if it has changed,
990 * discard the result of the read.
991 */
992 rr.rmid = ACCESS_ONCE(event->hw.cqm_rmid);
993
994 if (!__rmid_valid(rr.rmid))
995 goto out;
996
997 on_each_cpu_mask(&cqm_cpumask, __intel_cqm_event_count, &rr, 1);
998
999 raw_spin_lock_irqsave(&cache_lock, flags);
1000 if (event->hw.cqm_rmid == rr.rmid)
1001 local64_set(&event->count, atomic64_read(&rr.value));
1002 raw_spin_unlock_irqrestore(&cache_lock, flags);
1003 out:
1004 return __perf_event_count(event);
1005 }
1006
intel_cqm_event_start(struct perf_event * event,int mode)1007 static void intel_cqm_event_start(struct perf_event *event, int mode)
1008 {
1009 struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
1010 u32 rmid = event->hw.cqm_rmid;
1011
1012 if (!(event->hw.cqm_state & PERF_HES_STOPPED))
1013 return;
1014
1015 event->hw.cqm_state &= ~PERF_HES_STOPPED;
1016
1017 if (state->rmid_usecnt++) {
1018 if (!WARN_ON_ONCE(state->rmid != rmid))
1019 return;
1020 } else {
1021 WARN_ON_ONCE(state->rmid);
1022 }
1023
1024 state->rmid = rmid;
1025 wrmsr(MSR_IA32_PQR_ASSOC, rmid, state->closid);
1026 }
1027
intel_cqm_event_stop(struct perf_event * event,int mode)1028 static void intel_cqm_event_stop(struct perf_event *event, int mode)
1029 {
1030 struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
1031
1032 if (event->hw.cqm_state & PERF_HES_STOPPED)
1033 return;
1034
1035 event->hw.cqm_state |= PERF_HES_STOPPED;
1036
1037 intel_cqm_event_read(event);
1038
1039 if (!--state->rmid_usecnt) {
1040 state->rmid = 0;
1041 wrmsr(MSR_IA32_PQR_ASSOC, 0, state->closid);
1042 } else {
1043 WARN_ON_ONCE(!state->rmid);
1044 }
1045 }
1046
intel_cqm_event_add(struct perf_event * event,int mode)1047 static int intel_cqm_event_add(struct perf_event *event, int mode)
1048 {
1049 unsigned long flags;
1050 u32 rmid;
1051
1052 raw_spin_lock_irqsave(&cache_lock, flags);
1053
1054 event->hw.cqm_state = PERF_HES_STOPPED;
1055 rmid = event->hw.cqm_rmid;
1056
1057 if (__rmid_valid(rmid) && (mode & PERF_EF_START))
1058 intel_cqm_event_start(event, mode);
1059
1060 raw_spin_unlock_irqrestore(&cache_lock, flags);
1061
1062 return 0;
1063 }
1064
intel_cqm_event_destroy(struct perf_event * event)1065 static void intel_cqm_event_destroy(struct perf_event *event)
1066 {
1067 struct perf_event *group_other = NULL;
1068
1069 mutex_lock(&cache_mutex);
1070
1071 /*
1072 * If there's another event in this group...
1073 */
1074 if (!list_empty(&event->hw.cqm_group_entry)) {
1075 group_other = list_first_entry(&event->hw.cqm_group_entry,
1076 struct perf_event,
1077 hw.cqm_group_entry);
1078 list_del(&event->hw.cqm_group_entry);
1079 }
1080
1081 /*
1082 * And we're the group leader..
1083 */
1084 if (cqm_group_leader(event)) {
1085 /*
1086 * If there was a group_other, make that leader, otherwise
1087 * destroy the group and return the RMID.
1088 */
1089 if (group_other) {
1090 list_replace(&event->hw.cqm_groups_entry,
1091 &group_other->hw.cqm_groups_entry);
1092 } else {
1093 u32 rmid = event->hw.cqm_rmid;
1094
1095 if (__rmid_valid(rmid))
1096 __put_rmid(rmid);
1097 list_del(&event->hw.cqm_groups_entry);
1098 }
1099 }
1100
1101 mutex_unlock(&cache_mutex);
1102 }
1103
intel_cqm_event_init(struct perf_event * event)1104 static int intel_cqm_event_init(struct perf_event *event)
1105 {
1106 struct perf_event *group = NULL;
1107 bool rotate = false;
1108
1109 if (event->attr.type != intel_cqm_pmu.type)
1110 return -ENOENT;
1111
1112 if (event->attr.config & ~QOS_EVENT_MASK)
1113 return -EINVAL;
1114
1115 /* unsupported modes and filters */
1116 if (event->attr.exclude_user ||
1117 event->attr.exclude_kernel ||
1118 event->attr.exclude_hv ||
1119 event->attr.exclude_idle ||
1120 event->attr.exclude_host ||
1121 event->attr.exclude_guest ||
1122 event->attr.sample_period) /* no sampling */
1123 return -EINVAL;
1124
1125 INIT_LIST_HEAD(&event->hw.cqm_group_entry);
1126 INIT_LIST_HEAD(&event->hw.cqm_groups_entry);
1127
1128 event->destroy = intel_cqm_event_destroy;
1129
1130 mutex_lock(&cache_mutex);
1131
1132 /* Will also set rmid */
1133 intel_cqm_setup_event(event, &group);
1134
1135 if (group) {
1136 list_add_tail(&event->hw.cqm_group_entry,
1137 &group->hw.cqm_group_entry);
1138 } else {
1139 list_add_tail(&event->hw.cqm_groups_entry,
1140 &cache_groups);
1141
1142 /*
1143 * All RMIDs are either in use or have recently been
1144 * used. Kick the rotation worker to clean/free some.
1145 *
1146 * We only do this for the group leader, rather than for
1147 * every event in a group to save on needless work.
1148 */
1149 if (!__rmid_valid(event->hw.cqm_rmid))
1150 rotate = true;
1151 }
1152
1153 mutex_unlock(&cache_mutex);
1154
1155 if (rotate)
1156 schedule_delayed_work(&intel_cqm_rmid_work, 0);
1157
1158 return 0;
1159 }
1160
1161 EVENT_ATTR_STR(llc_occupancy, intel_cqm_llc, "event=0x01");
1162 EVENT_ATTR_STR(llc_occupancy.per-pkg, intel_cqm_llc_pkg, "1");
1163 EVENT_ATTR_STR(llc_occupancy.unit, intel_cqm_llc_unit, "Bytes");
1164 EVENT_ATTR_STR(llc_occupancy.scale, intel_cqm_llc_scale, NULL);
1165 EVENT_ATTR_STR(llc_occupancy.snapshot, intel_cqm_llc_snapshot, "1");
1166
1167 static struct attribute *intel_cqm_events_attr[] = {
1168 EVENT_PTR(intel_cqm_llc),
1169 EVENT_PTR(intel_cqm_llc_pkg),
1170 EVENT_PTR(intel_cqm_llc_unit),
1171 EVENT_PTR(intel_cqm_llc_scale),
1172 EVENT_PTR(intel_cqm_llc_snapshot),
1173 NULL,
1174 };
1175
1176 static struct attribute_group intel_cqm_events_group = {
1177 .name = "events",
1178 .attrs = intel_cqm_events_attr,
1179 };
1180
1181 PMU_FORMAT_ATTR(event, "config:0-7");
1182 static struct attribute *intel_cqm_formats_attr[] = {
1183 &format_attr_event.attr,
1184 NULL,
1185 };
1186
1187 static struct attribute_group intel_cqm_format_group = {
1188 .name = "format",
1189 .attrs = intel_cqm_formats_attr,
1190 };
1191
1192 static ssize_t
max_recycle_threshold_show(struct device * dev,struct device_attribute * attr,char * page)1193 max_recycle_threshold_show(struct device *dev, struct device_attribute *attr,
1194 char *page)
1195 {
1196 ssize_t rv;
1197
1198 mutex_lock(&cache_mutex);
1199 rv = snprintf(page, PAGE_SIZE-1, "%u\n", __intel_cqm_max_threshold);
1200 mutex_unlock(&cache_mutex);
1201
1202 return rv;
1203 }
1204
1205 static ssize_t
max_recycle_threshold_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1206 max_recycle_threshold_store(struct device *dev,
1207 struct device_attribute *attr,
1208 const char *buf, size_t count)
1209 {
1210 unsigned int bytes, cachelines;
1211 int ret;
1212
1213 ret = kstrtouint(buf, 0, &bytes);
1214 if (ret)
1215 return ret;
1216
1217 mutex_lock(&cache_mutex);
1218
1219 __intel_cqm_max_threshold = bytes;
1220 cachelines = bytes / cqm_l3_scale;
1221
1222 /*
1223 * The new maximum takes effect immediately.
1224 */
1225 if (__intel_cqm_threshold > cachelines)
1226 __intel_cqm_threshold = cachelines;
1227
1228 mutex_unlock(&cache_mutex);
1229
1230 return count;
1231 }
1232
1233 static DEVICE_ATTR_RW(max_recycle_threshold);
1234
1235 static struct attribute *intel_cqm_attrs[] = {
1236 &dev_attr_max_recycle_threshold.attr,
1237 NULL,
1238 };
1239
1240 static const struct attribute_group intel_cqm_group = {
1241 .attrs = intel_cqm_attrs,
1242 };
1243
1244 static const struct attribute_group *intel_cqm_attr_groups[] = {
1245 &intel_cqm_events_group,
1246 &intel_cqm_format_group,
1247 &intel_cqm_group,
1248 NULL,
1249 };
1250
1251 static struct pmu intel_cqm_pmu = {
1252 .hrtimer_interval_ms = RMID_DEFAULT_QUEUE_TIME,
1253 .attr_groups = intel_cqm_attr_groups,
1254 .task_ctx_nr = perf_sw_context,
1255 .event_init = intel_cqm_event_init,
1256 .add = intel_cqm_event_add,
1257 .del = intel_cqm_event_stop,
1258 .start = intel_cqm_event_start,
1259 .stop = intel_cqm_event_stop,
1260 .read = intel_cqm_event_read,
1261 .count = intel_cqm_event_count,
1262 };
1263
cqm_pick_event_reader(int cpu)1264 static inline void cqm_pick_event_reader(int cpu)
1265 {
1266 int phys_id = topology_physical_package_id(cpu);
1267 int i;
1268
1269 for_each_cpu(i, &cqm_cpumask) {
1270 if (phys_id == topology_physical_package_id(i))
1271 return; /* already got reader for this socket */
1272 }
1273
1274 cpumask_set_cpu(cpu, &cqm_cpumask);
1275 }
1276
intel_cqm_cpu_starting(unsigned int cpu)1277 static void intel_cqm_cpu_starting(unsigned int cpu)
1278 {
1279 struct intel_pqr_state *state = &per_cpu(pqr_state, cpu);
1280 struct cpuinfo_x86 *c = &cpu_data(cpu);
1281
1282 state->rmid = 0;
1283 state->closid = 0;
1284 state->rmid_usecnt = 0;
1285
1286 WARN_ON(c->x86_cache_max_rmid != cqm_max_rmid);
1287 WARN_ON(c->x86_cache_occ_scale != cqm_l3_scale);
1288 }
1289
intel_cqm_cpu_exit(unsigned int cpu)1290 static void intel_cqm_cpu_exit(unsigned int cpu)
1291 {
1292 int phys_id = topology_physical_package_id(cpu);
1293 int i;
1294
1295 /*
1296 * Is @cpu a designated cqm reader?
1297 */
1298 if (!cpumask_test_and_clear_cpu(cpu, &cqm_cpumask))
1299 return;
1300
1301 for_each_online_cpu(i) {
1302 if (i == cpu)
1303 continue;
1304
1305 if (phys_id == topology_physical_package_id(i)) {
1306 cpumask_set_cpu(i, &cqm_cpumask);
1307 break;
1308 }
1309 }
1310 }
1311
intel_cqm_cpu_notifier(struct notifier_block * nb,unsigned long action,void * hcpu)1312 static int intel_cqm_cpu_notifier(struct notifier_block *nb,
1313 unsigned long action, void *hcpu)
1314 {
1315 unsigned int cpu = (unsigned long)hcpu;
1316
1317 switch (action & ~CPU_TASKS_FROZEN) {
1318 case CPU_DOWN_PREPARE:
1319 intel_cqm_cpu_exit(cpu);
1320 break;
1321 case CPU_STARTING:
1322 intel_cqm_cpu_starting(cpu);
1323 cqm_pick_event_reader(cpu);
1324 break;
1325 }
1326
1327 return NOTIFY_OK;
1328 }
1329
1330 static const struct x86_cpu_id intel_cqm_match[] = {
1331 { .vendor = X86_VENDOR_INTEL, .feature = X86_FEATURE_CQM_OCCUP_LLC },
1332 {}
1333 };
1334
intel_cqm_init(void)1335 static int __init intel_cqm_init(void)
1336 {
1337 char *str = NULL, scale[20];
1338 int i, cpu, ret;
1339
1340 if (!x86_match_cpu(intel_cqm_match))
1341 return -ENODEV;
1342
1343 cqm_l3_scale = boot_cpu_data.x86_cache_occ_scale;
1344
1345 /*
1346 * It's possible that not all resources support the same number
1347 * of RMIDs. Instead of making scheduling much more complicated
1348 * (where we have to match a task's RMID to a cpu that supports
1349 * that many RMIDs) just find the minimum RMIDs supported across
1350 * all cpus.
1351 *
1352 * Also, check that the scales match on all cpus.
1353 */
1354 cpu_notifier_register_begin();
1355
1356 for_each_online_cpu(cpu) {
1357 struct cpuinfo_x86 *c = &cpu_data(cpu);
1358
1359 if (c->x86_cache_max_rmid < cqm_max_rmid)
1360 cqm_max_rmid = c->x86_cache_max_rmid;
1361
1362 if (c->x86_cache_occ_scale != cqm_l3_scale) {
1363 pr_err("Multiple LLC scale values, disabling\n");
1364 ret = -EINVAL;
1365 goto out;
1366 }
1367 }
1368
1369 /*
1370 * A reasonable upper limit on the max threshold is the number
1371 * of lines tagged per RMID if all RMIDs have the same number of
1372 * lines tagged in the LLC.
1373 *
1374 * For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC.
1375 */
1376 __intel_cqm_max_threshold =
1377 boot_cpu_data.x86_cache_size * 1024 / (cqm_max_rmid + 1);
1378
1379 snprintf(scale, sizeof(scale), "%u", cqm_l3_scale);
1380 str = kstrdup(scale, GFP_KERNEL);
1381 if (!str) {
1382 ret = -ENOMEM;
1383 goto out;
1384 }
1385
1386 event_attr_intel_cqm_llc_scale.event_str = str;
1387
1388 ret = intel_cqm_setup_rmid_cache();
1389 if (ret)
1390 goto out;
1391
1392 for_each_online_cpu(i) {
1393 intel_cqm_cpu_starting(i);
1394 cqm_pick_event_reader(i);
1395 }
1396
1397 ret = perf_pmu_register(&intel_cqm_pmu, "intel_cqm", -1);
1398 if (ret) {
1399 pr_err("Intel CQM perf registration failed: %d\n", ret);
1400 goto out;
1401 }
1402
1403 pr_info("Intel CQM monitoring enabled\n");
1404
1405 /*
1406 * Register the hot cpu notifier once we are sure cqm
1407 * is enabled to avoid notifier leak.
1408 */
1409 __perf_cpu_notifier(intel_cqm_cpu_notifier);
1410 out:
1411 cpu_notifier_register_done();
1412 if (ret) {
1413 kfree(str);
1414 cqm_cleanup();
1415 }
1416
1417 return ret;
1418 }
1419 device_initcall(intel_cqm_init);
1420