1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * User interface for Resource Alloction in Resource Director Technology(RDT)
4 *
5 * Copyright (C) 2016 Intel Corporation
6 *
7 * Author: Fenghua Yu <fenghua.yu@intel.com>
8 *
9 * More information about RDT be found in the Intel (R) x86 Architecture
10 * Software Developer Manual.
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/cacheinfo.h>
16 #include <linux/cpu.h>
17 #include <linux/debugfs.h>
18 #include <linux/fs.h>
19 #include <linux/fs_parser.h>
20 #include <linux/sysfs.h>
21 #include <linux/kernfs.h>
22 #include <linux/seq_buf.h>
23 #include <linux/seq_file.h>
24 #include <linux/sched/signal.h>
25 #include <linux/sched/task.h>
26 #include <linux/slab.h>
27 #include <linux/task_work.h>
28 #include <linux/user_namespace.h>
29
30 #include <uapi/linux/magic.h>
31
32 #include <asm/resctrl.h>
33 #include "internal.h"
34
35 DEFINE_STATIC_KEY_FALSE(rdt_enable_key);
36 DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key);
37 DEFINE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
38 static struct kernfs_root *rdt_root;
39 struct rdtgroup rdtgroup_default;
40 LIST_HEAD(rdt_all_groups);
41
42 /* Kernel fs node for "info" directory under root */
43 static struct kernfs_node *kn_info;
44
45 /* Kernel fs node for "mon_groups" directory under root */
46 static struct kernfs_node *kn_mongrp;
47
48 /* Kernel fs node for "mon_data" directory under root */
49 static struct kernfs_node *kn_mondata;
50
51 static struct seq_buf last_cmd_status;
52 static char last_cmd_status_buf[512];
53
54 struct dentry *debugfs_resctrl;
55
rdt_last_cmd_clear(void)56 void rdt_last_cmd_clear(void)
57 {
58 lockdep_assert_held(&rdtgroup_mutex);
59 seq_buf_clear(&last_cmd_status);
60 }
61
rdt_last_cmd_puts(const char * s)62 void rdt_last_cmd_puts(const char *s)
63 {
64 lockdep_assert_held(&rdtgroup_mutex);
65 seq_buf_puts(&last_cmd_status, s);
66 }
67
rdt_last_cmd_printf(const char * fmt,...)68 void rdt_last_cmd_printf(const char *fmt, ...)
69 {
70 va_list ap;
71
72 va_start(ap, fmt);
73 lockdep_assert_held(&rdtgroup_mutex);
74 seq_buf_vprintf(&last_cmd_status, fmt, ap);
75 va_end(ap);
76 }
77
78 /*
79 * Trivial allocator for CLOSIDs. Since h/w only supports a small number,
80 * we can keep a bitmap of free CLOSIDs in a single integer.
81 *
82 * Using a global CLOSID across all resources has some advantages and
83 * some drawbacks:
84 * + We can simply set "current->closid" to assign a task to a resource
85 * group.
86 * + Context switch code can avoid extra memory references deciding which
87 * CLOSID to load into the PQR_ASSOC MSR
88 * - We give up some options in configuring resource groups across multi-socket
89 * systems.
90 * - Our choices on how to configure each resource become progressively more
91 * limited as the number of resources grows.
92 */
93 static int closid_free_map;
94 static int closid_free_map_len;
95
closids_supported(void)96 int closids_supported(void)
97 {
98 return closid_free_map_len;
99 }
100
closid_init(void)101 static void closid_init(void)
102 {
103 struct rdt_resource *r;
104 int rdt_min_closid = 32;
105
106 /* Compute rdt_min_closid across all resources */
107 for_each_alloc_enabled_rdt_resource(r)
108 rdt_min_closid = min(rdt_min_closid, r->num_closid);
109
110 closid_free_map = BIT_MASK(rdt_min_closid) - 1;
111
112 /* CLOSID 0 is always reserved for the default group */
113 closid_free_map &= ~1;
114 closid_free_map_len = rdt_min_closid;
115 }
116
closid_alloc(void)117 static int closid_alloc(void)
118 {
119 u32 closid = ffs(closid_free_map);
120
121 if (closid == 0)
122 return -ENOSPC;
123 closid--;
124 closid_free_map &= ~(1 << closid);
125
126 return closid;
127 }
128
closid_free(int closid)129 void closid_free(int closid)
130 {
131 closid_free_map |= 1 << closid;
132 }
133
134 /**
135 * closid_allocated - test if provided closid is in use
136 * @closid: closid to be tested
137 *
138 * Return: true if @closid is currently associated with a resource group,
139 * false if @closid is free
140 */
closid_allocated(unsigned int closid)141 static bool closid_allocated(unsigned int closid)
142 {
143 return (closid_free_map & (1 << closid)) == 0;
144 }
145
146 /**
147 * rdtgroup_mode_by_closid - Return mode of resource group with closid
148 * @closid: closid if the resource group
149 *
150 * Each resource group is associated with a @closid. Here the mode
151 * of a resource group can be queried by searching for it using its closid.
152 *
153 * Return: mode as &enum rdtgrp_mode of resource group with closid @closid
154 */
rdtgroup_mode_by_closid(int closid)155 enum rdtgrp_mode rdtgroup_mode_by_closid(int closid)
156 {
157 struct rdtgroup *rdtgrp;
158
159 list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
160 if (rdtgrp->closid == closid)
161 return rdtgrp->mode;
162 }
163
164 return RDT_NUM_MODES;
165 }
166
167 static const char * const rdt_mode_str[] = {
168 [RDT_MODE_SHAREABLE] = "shareable",
169 [RDT_MODE_EXCLUSIVE] = "exclusive",
170 [RDT_MODE_PSEUDO_LOCKSETUP] = "pseudo-locksetup",
171 [RDT_MODE_PSEUDO_LOCKED] = "pseudo-locked",
172 };
173
174 /**
175 * rdtgroup_mode_str - Return the string representation of mode
176 * @mode: the resource group mode as &enum rdtgroup_mode
177 *
178 * Return: string representation of valid mode, "unknown" otherwise
179 */
rdtgroup_mode_str(enum rdtgrp_mode mode)180 static const char *rdtgroup_mode_str(enum rdtgrp_mode mode)
181 {
182 if (mode < RDT_MODE_SHAREABLE || mode >= RDT_NUM_MODES)
183 return "unknown";
184
185 return rdt_mode_str[mode];
186 }
187
188 /* set uid and gid of rdtgroup dirs and files to that of the creator */
rdtgroup_kn_set_ugid(struct kernfs_node * kn)189 static int rdtgroup_kn_set_ugid(struct kernfs_node *kn)
190 {
191 struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID,
192 .ia_uid = current_fsuid(),
193 .ia_gid = current_fsgid(), };
194
195 if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) &&
196 gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID))
197 return 0;
198
199 return kernfs_setattr(kn, &iattr);
200 }
201
rdtgroup_add_file(struct kernfs_node * parent_kn,struct rftype * rft)202 static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft)
203 {
204 struct kernfs_node *kn;
205 int ret;
206
207 kn = __kernfs_create_file(parent_kn, rft->name, rft->mode,
208 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
209 0, rft->kf_ops, rft, NULL, NULL);
210 if (IS_ERR(kn))
211 return PTR_ERR(kn);
212
213 ret = rdtgroup_kn_set_ugid(kn);
214 if (ret) {
215 kernfs_remove(kn);
216 return ret;
217 }
218
219 return 0;
220 }
221
rdtgroup_seqfile_show(struct seq_file * m,void * arg)222 static int rdtgroup_seqfile_show(struct seq_file *m, void *arg)
223 {
224 struct kernfs_open_file *of = m->private;
225 struct rftype *rft = of->kn->priv;
226
227 if (rft->seq_show)
228 return rft->seq_show(of, m, arg);
229 return 0;
230 }
231
rdtgroup_file_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)232 static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf,
233 size_t nbytes, loff_t off)
234 {
235 struct rftype *rft = of->kn->priv;
236
237 if (rft->write)
238 return rft->write(of, buf, nbytes, off);
239
240 return -EINVAL;
241 }
242
243 static struct kernfs_ops rdtgroup_kf_single_ops = {
244 .atomic_write_len = PAGE_SIZE,
245 .write = rdtgroup_file_write,
246 .seq_show = rdtgroup_seqfile_show,
247 };
248
249 static struct kernfs_ops kf_mondata_ops = {
250 .atomic_write_len = PAGE_SIZE,
251 .seq_show = rdtgroup_mondata_show,
252 };
253
is_cpu_list(struct kernfs_open_file * of)254 static bool is_cpu_list(struct kernfs_open_file *of)
255 {
256 struct rftype *rft = of->kn->priv;
257
258 return rft->flags & RFTYPE_FLAGS_CPUS_LIST;
259 }
260
rdtgroup_cpus_show(struct kernfs_open_file * of,struct seq_file * s,void * v)261 static int rdtgroup_cpus_show(struct kernfs_open_file *of,
262 struct seq_file *s, void *v)
263 {
264 struct rdtgroup *rdtgrp;
265 struct cpumask *mask;
266 int ret = 0;
267
268 rdtgrp = rdtgroup_kn_lock_live(of->kn);
269
270 if (rdtgrp) {
271 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
272 if (!rdtgrp->plr->d) {
273 rdt_last_cmd_clear();
274 rdt_last_cmd_puts("Cache domain offline\n");
275 ret = -ENODEV;
276 } else {
277 mask = &rdtgrp->plr->d->cpu_mask;
278 seq_printf(s, is_cpu_list(of) ?
279 "%*pbl\n" : "%*pb\n",
280 cpumask_pr_args(mask));
281 }
282 } else {
283 seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
284 cpumask_pr_args(&rdtgrp->cpu_mask));
285 }
286 } else {
287 ret = -ENOENT;
288 }
289 rdtgroup_kn_unlock(of->kn);
290
291 return ret;
292 }
293
294 /*
295 * This is safe against resctrl_sched_in() called from __switch_to()
296 * because __switch_to() is executed with interrupts disabled. A local call
297 * from update_closid_rmid() is proteced against __switch_to() because
298 * preemption is disabled.
299 */
update_cpu_closid_rmid(void * info)300 static void update_cpu_closid_rmid(void *info)
301 {
302 struct rdtgroup *r = info;
303
304 if (r) {
305 this_cpu_write(pqr_state.default_closid, r->closid);
306 this_cpu_write(pqr_state.default_rmid, r->mon.rmid);
307 }
308
309 /*
310 * We cannot unconditionally write the MSR because the current
311 * executing task might have its own closid selected. Just reuse
312 * the context switch code.
313 */
314 resctrl_sched_in(current);
315 }
316
317 /*
318 * Update the PGR_ASSOC MSR on all cpus in @cpu_mask,
319 *
320 * Per task closids/rmids must have been set up before calling this function.
321 */
322 static void
update_closid_rmid(const struct cpumask * cpu_mask,struct rdtgroup * r)323 update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r)
324 {
325 int cpu = get_cpu();
326
327 if (cpumask_test_cpu(cpu, cpu_mask))
328 update_cpu_closid_rmid(r);
329 smp_call_function_many(cpu_mask, update_cpu_closid_rmid, r, 1);
330 put_cpu();
331 }
332
cpus_mon_write(struct rdtgroup * rdtgrp,cpumask_var_t newmask,cpumask_var_t tmpmask)333 static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
334 cpumask_var_t tmpmask)
335 {
336 struct rdtgroup *prgrp = rdtgrp->mon.parent, *crgrp;
337 struct list_head *head;
338
339 /* Check whether cpus belong to parent ctrl group */
340 cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask);
341 if (cpumask_weight(tmpmask)) {
342 rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n");
343 return -EINVAL;
344 }
345
346 /* Check whether cpus are dropped from this group */
347 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
348 if (cpumask_weight(tmpmask)) {
349 /* Give any dropped cpus to parent rdtgroup */
350 cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask);
351 update_closid_rmid(tmpmask, prgrp);
352 }
353
354 /*
355 * If we added cpus, remove them from previous group that owned them
356 * and update per-cpu rmid
357 */
358 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
359 if (cpumask_weight(tmpmask)) {
360 head = &prgrp->mon.crdtgrp_list;
361 list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
362 if (crgrp == rdtgrp)
363 continue;
364 cpumask_andnot(&crgrp->cpu_mask, &crgrp->cpu_mask,
365 tmpmask);
366 }
367 update_closid_rmid(tmpmask, rdtgrp);
368 }
369
370 /* Done pushing/pulling - update this group with new mask */
371 cpumask_copy(&rdtgrp->cpu_mask, newmask);
372
373 return 0;
374 }
375
cpumask_rdtgrp_clear(struct rdtgroup * r,struct cpumask * m)376 static void cpumask_rdtgrp_clear(struct rdtgroup *r, struct cpumask *m)
377 {
378 struct rdtgroup *crgrp;
379
380 cpumask_andnot(&r->cpu_mask, &r->cpu_mask, m);
381 /* update the child mon group masks as well*/
382 list_for_each_entry(crgrp, &r->mon.crdtgrp_list, mon.crdtgrp_list)
383 cpumask_and(&crgrp->cpu_mask, &r->cpu_mask, &crgrp->cpu_mask);
384 }
385
cpus_ctrl_write(struct rdtgroup * rdtgrp,cpumask_var_t newmask,cpumask_var_t tmpmask,cpumask_var_t tmpmask1)386 static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
387 cpumask_var_t tmpmask, cpumask_var_t tmpmask1)
388 {
389 struct rdtgroup *r, *crgrp;
390 struct list_head *head;
391
392 /* Check whether cpus are dropped from this group */
393 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
394 if (cpumask_weight(tmpmask)) {
395 /* Can't drop from default group */
396 if (rdtgrp == &rdtgroup_default) {
397 rdt_last_cmd_puts("Can't drop CPUs from default group\n");
398 return -EINVAL;
399 }
400
401 /* Give any dropped cpus to rdtgroup_default */
402 cpumask_or(&rdtgroup_default.cpu_mask,
403 &rdtgroup_default.cpu_mask, tmpmask);
404 update_closid_rmid(tmpmask, &rdtgroup_default);
405 }
406
407 /*
408 * If we added cpus, remove them from previous group and
409 * the prev group's child groups that owned them
410 * and update per-cpu closid/rmid.
411 */
412 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
413 if (cpumask_weight(tmpmask)) {
414 list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) {
415 if (r == rdtgrp)
416 continue;
417 cpumask_and(tmpmask1, &r->cpu_mask, tmpmask);
418 if (cpumask_weight(tmpmask1))
419 cpumask_rdtgrp_clear(r, tmpmask1);
420 }
421 update_closid_rmid(tmpmask, rdtgrp);
422 }
423
424 /* Done pushing/pulling - update this group with new mask */
425 cpumask_copy(&rdtgrp->cpu_mask, newmask);
426
427 /*
428 * Clear child mon group masks since there is a new parent mask
429 * now and update the rmid for the cpus the child lost.
430 */
431 head = &rdtgrp->mon.crdtgrp_list;
432 list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
433 cpumask_and(tmpmask, &rdtgrp->cpu_mask, &crgrp->cpu_mask);
434 update_closid_rmid(tmpmask, rdtgrp);
435 cpumask_clear(&crgrp->cpu_mask);
436 }
437
438 return 0;
439 }
440
rdtgroup_cpus_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)441 static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
442 char *buf, size_t nbytes, loff_t off)
443 {
444 cpumask_var_t tmpmask, newmask, tmpmask1;
445 struct rdtgroup *rdtgrp;
446 int ret;
447
448 if (!buf)
449 return -EINVAL;
450
451 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
452 return -ENOMEM;
453 if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) {
454 free_cpumask_var(tmpmask);
455 return -ENOMEM;
456 }
457 if (!zalloc_cpumask_var(&tmpmask1, GFP_KERNEL)) {
458 free_cpumask_var(tmpmask);
459 free_cpumask_var(newmask);
460 return -ENOMEM;
461 }
462
463 rdtgrp = rdtgroup_kn_lock_live(of->kn);
464 if (!rdtgrp) {
465 ret = -ENOENT;
466 goto unlock;
467 }
468
469 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
470 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
471 ret = -EINVAL;
472 rdt_last_cmd_puts("Pseudo-locking in progress\n");
473 goto unlock;
474 }
475
476 if (is_cpu_list(of))
477 ret = cpulist_parse(buf, newmask);
478 else
479 ret = cpumask_parse(buf, newmask);
480
481 if (ret) {
482 rdt_last_cmd_puts("Bad CPU list/mask\n");
483 goto unlock;
484 }
485
486 /* check that user didn't specify any offline cpus */
487 cpumask_andnot(tmpmask, newmask, cpu_online_mask);
488 if (cpumask_weight(tmpmask)) {
489 ret = -EINVAL;
490 rdt_last_cmd_puts("Can only assign online CPUs\n");
491 goto unlock;
492 }
493
494 if (rdtgrp->type == RDTCTRL_GROUP)
495 ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask, tmpmask1);
496 else if (rdtgrp->type == RDTMON_GROUP)
497 ret = cpus_mon_write(rdtgrp, newmask, tmpmask);
498 else
499 ret = -EINVAL;
500
501 unlock:
502 rdtgroup_kn_unlock(of->kn);
503 free_cpumask_var(tmpmask);
504 free_cpumask_var(newmask);
505 free_cpumask_var(tmpmask1);
506
507 return ret ?: nbytes;
508 }
509
510 /**
511 * rdtgroup_remove - the helper to remove resource group safely
512 * @rdtgrp: resource group to remove
513 *
514 * On resource group creation via a mkdir, an extra kernfs_node reference is
515 * taken to ensure that the rdtgroup structure remains accessible for the
516 * rdtgroup_kn_unlock() calls where it is removed.
517 *
518 * Drop the extra reference here, then free the rdtgroup structure.
519 *
520 * Return: void
521 */
rdtgroup_remove(struct rdtgroup * rdtgrp)522 static void rdtgroup_remove(struct rdtgroup *rdtgrp)
523 {
524 kernfs_put(rdtgrp->kn);
525 kfree(rdtgrp);
526 }
527
_update_task_closid_rmid(void * task)528 static void _update_task_closid_rmid(void *task)
529 {
530 /*
531 * If the task is still current on this CPU, update PQR_ASSOC MSR.
532 * Otherwise, the MSR is updated when the task is scheduled in.
533 */
534 if (task == current)
535 resctrl_sched_in(task);
536 }
537
update_task_closid_rmid(struct task_struct * t)538 static void update_task_closid_rmid(struct task_struct *t)
539 {
540 if (IS_ENABLED(CONFIG_SMP) && task_curr(t))
541 smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1);
542 else
543 _update_task_closid_rmid(t);
544 }
545
__rdtgroup_move_task(struct task_struct * tsk,struct rdtgroup * rdtgrp)546 static int __rdtgroup_move_task(struct task_struct *tsk,
547 struct rdtgroup *rdtgrp)
548 {
549 /* If the task is already in rdtgrp, no need to move the task. */
550 if ((rdtgrp->type == RDTCTRL_GROUP && tsk->closid == rdtgrp->closid &&
551 tsk->rmid == rdtgrp->mon.rmid) ||
552 (rdtgrp->type == RDTMON_GROUP && tsk->rmid == rdtgrp->mon.rmid &&
553 tsk->closid == rdtgrp->mon.parent->closid))
554 return 0;
555
556 /*
557 * Set the task's closid/rmid before the PQR_ASSOC MSR can be
558 * updated by them.
559 *
560 * For ctrl_mon groups, move both closid and rmid.
561 * For monitor groups, can move the tasks only from
562 * their parent CTRL group.
563 */
564
565 if (rdtgrp->type == RDTCTRL_GROUP) {
566 WRITE_ONCE(tsk->closid, rdtgrp->closid);
567 WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid);
568 } else if (rdtgrp->type == RDTMON_GROUP) {
569 if (rdtgrp->mon.parent->closid == tsk->closid) {
570 WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid);
571 } else {
572 rdt_last_cmd_puts("Can't move task to different control group\n");
573 return -EINVAL;
574 }
575 }
576
577 /*
578 * Ensure the task's closid and rmid are written before determining if
579 * the task is current that will decide if it will be interrupted.
580 * This pairs with the full barrier between the rq->curr update and
581 * resctrl_sched_in() during context switch.
582 */
583 smp_mb();
584
585 /*
586 * By now, the task's closid and rmid are set. If the task is current
587 * on a CPU, the PQR_ASSOC MSR needs to be updated to make the resource
588 * group go into effect. If the task is not current, the MSR will be
589 * updated when the task is scheduled in.
590 */
591 update_task_closid_rmid(tsk);
592
593 return 0;
594 }
595
is_closid_match(struct task_struct * t,struct rdtgroup * r)596 static bool is_closid_match(struct task_struct *t, struct rdtgroup *r)
597 {
598 return (rdt_alloc_capable &&
599 (r->type == RDTCTRL_GROUP) && (t->closid == r->closid));
600 }
601
is_rmid_match(struct task_struct * t,struct rdtgroup * r)602 static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r)
603 {
604 return (rdt_mon_capable &&
605 (r->type == RDTMON_GROUP) && (t->rmid == r->mon.rmid));
606 }
607
608 /**
609 * rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group
610 * @r: Resource group
611 *
612 * Return: 1 if tasks have been assigned to @r, 0 otherwise
613 */
rdtgroup_tasks_assigned(struct rdtgroup * r)614 int rdtgroup_tasks_assigned(struct rdtgroup *r)
615 {
616 struct task_struct *p, *t;
617 int ret = 0;
618
619 lockdep_assert_held(&rdtgroup_mutex);
620
621 rcu_read_lock();
622 for_each_process_thread(p, t) {
623 if (is_closid_match(t, r) || is_rmid_match(t, r)) {
624 ret = 1;
625 break;
626 }
627 }
628 rcu_read_unlock();
629
630 return ret;
631 }
632
rdtgroup_task_write_permission(struct task_struct * task,struct kernfs_open_file * of)633 static int rdtgroup_task_write_permission(struct task_struct *task,
634 struct kernfs_open_file *of)
635 {
636 const struct cred *tcred = get_task_cred(task);
637 const struct cred *cred = current_cred();
638 int ret = 0;
639
640 /*
641 * Even if we're attaching all tasks in the thread group, we only
642 * need to check permissions on one of them.
643 */
644 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
645 !uid_eq(cred->euid, tcred->uid) &&
646 !uid_eq(cred->euid, tcred->suid)) {
647 rdt_last_cmd_printf("No permission to move task %d\n", task->pid);
648 ret = -EPERM;
649 }
650
651 put_cred(tcred);
652 return ret;
653 }
654
rdtgroup_move_task(pid_t pid,struct rdtgroup * rdtgrp,struct kernfs_open_file * of)655 static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp,
656 struct kernfs_open_file *of)
657 {
658 struct task_struct *tsk;
659 int ret;
660
661 rcu_read_lock();
662 if (pid) {
663 tsk = find_task_by_vpid(pid);
664 if (!tsk) {
665 rcu_read_unlock();
666 rdt_last_cmd_printf("No task %d\n", pid);
667 return -ESRCH;
668 }
669 } else {
670 tsk = current;
671 }
672
673 get_task_struct(tsk);
674 rcu_read_unlock();
675
676 ret = rdtgroup_task_write_permission(tsk, of);
677 if (!ret)
678 ret = __rdtgroup_move_task(tsk, rdtgrp);
679
680 put_task_struct(tsk);
681 return ret;
682 }
683
rdtgroup_tasks_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)684 static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
685 char *buf, size_t nbytes, loff_t off)
686 {
687 struct rdtgroup *rdtgrp;
688 int ret = 0;
689 pid_t pid;
690
691 if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
692 return -EINVAL;
693 rdtgrp = rdtgroup_kn_lock_live(of->kn);
694 if (!rdtgrp) {
695 rdtgroup_kn_unlock(of->kn);
696 return -ENOENT;
697 }
698 rdt_last_cmd_clear();
699
700 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
701 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
702 ret = -EINVAL;
703 rdt_last_cmd_puts("Pseudo-locking in progress\n");
704 goto unlock;
705 }
706
707 ret = rdtgroup_move_task(pid, rdtgrp, of);
708
709 unlock:
710 rdtgroup_kn_unlock(of->kn);
711
712 return ret ?: nbytes;
713 }
714
show_rdt_tasks(struct rdtgroup * r,struct seq_file * s)715 static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s)
716 {
717 struct task_struct *p, *t;
718
719 rcu_read_lock();
720 for_each_process_thread(p, t) {
721 if (is_closid_match(t, r) || is_rmid_match(t, r))
722 seq_printf(s, "%d\n", t->pid);
723 }
724 rcu_read_unlock();
725 }
726
rdtgroup_tasks_show(struct kernfs_open_file * of,struct seq_file * s,void * v)727 static int rdtgroup_tasks_show(struct kernfs_open_file *of,
728 struct seq_file *s, void *v)
729 {
730 struct rdtgroup *rdtgrp;
731 int ret = 0;
732
733 rdtgrp = rdtgroup_kn_lock_live(of->kn);
734 if (rdtgrp)
735 show_rdt_tasks(rdtgrp, s);
736 else
737 ret = -ENOENT;
738 rdtgroup_kn_unlock(of->kn);
739
740 return ret;
741 }
742
743 #ifdef CONFIG_PROC_CPU_RESCTRL
744
745 /*
746 * A task can only be part of one resctrl control group and of one monitor
747 * group which is associated to that control group.
748 *
749 * 1) res:
750 * mon:
751 *
752 * resctrl is not available.
753 *
754 * 2) res:/
755 * mon:
756 *
757 * Task is part of the root resctrl control group, and it is not associated
758 * to any monitor group.
759 *
760 * 3) res:/
761 * mon:mon0
762 *
763 * Task is part of the root resctrl control group and monitor group mon0.
764 *
765 * 4) res:group0
766 * mon:
767 *
768 * Task is part of resctrl control group group0, and it is not associated
769 * to any monitor group.
770 *
771 * 5) res:group0
772 * mon:mon1
773 *
774 * Task is part of resctrl control group group0 and monitor group mon1.
775 */
proc_resctrl_show(struct seq_file * s,struct pid_namespace * ns,struct pid * pid,struct task_struct * tsk)776 int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns,
777 struct pid *pid, struct task_struct *tsk)
778 {
779 struct rdtgroup *rdtg;
780 int ret = 0;
781
782 mutex_lock(&rdtgroup_mutex);
783
784 /* Return empty if resctrl has not been mounted. */
785 if (!static_branch_unlikely(&rdt_enable_key)) {
786 seq_puts(s, "res:\nmon:\n");
787 goto unlock;
788 }
789
790 list_for_each_entry(rdtg, &rdt_all_groups, rdtgroup_list) {
791 struct rdtgroup *crg;
792
793 /*
794 * Task information is only relevant for shareable
795 * and exclusive groups.
796 */
797 if (rdtg->mode != RDT_MODE_SHAREABLE &&
798 rdtg->mode != RDT_MODE_EXCLUSIVE)
799 continue;
800
801 if (rdtg->closid != tsk->closid)
802 continue;
803
804 seq_printf(s, "res:%s%s\n", (rdtg == &rdtgroup_default) ? "/" : "",
805 rdtg->kn->name);
806 seq_puts(s, "mon:");
807 list_for_each_entry(crg, &rdtg->mon.crdtgrp_list,
808 mon.crdtgrp_list) {
809 if (tsk->rmid != crg->mon.rmid)
810 continue;
811 seq_printf(s, "%s", crg->kn->name);
812 break;
813 }
814 seq_putc(s, '\n');
815 goto unlock;
816 }
817 /*
818 * The above search should succeed. Otherwise return
819 * with an error.
820 */
821 ret = -ENOENT;
822 unlock:
823 mutex_unlock(&rdtgroup_mutex);
824
825 return ret;
826 }
827 #endif
828
rdt_last_cmd_status_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)829 static int rdt_last_cmd_status_show(struct kernfs_open_file *of,
830 struct seq_file *seq, void *v)
831 {
832 int len;
833
834 mutex_lock(&rdtgroup_mutex);
835 len = seq_buf_used(&last_cmd_status);
836 if (len)
837 seq_printf(seq, "%.*s", len, last_cmd_status_buf);
838 else
839 seq_puts(seq, "ok\n");
840 mutex_unlock(&rdtgroup_mutex);
841 return 0;
842 }
843
rdt_num_closids_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)844 static int rdt_num_closids_show(struct kernfs_open_file *of,
845 struct seq_file *seq, void *v)
846 {
847 struct rdt_resource *r = of->kn->parent->priv;
848
849 seq_printf(seq, "%d\n", r->num_closid);
850 return 0;
851 }
852
rdt_default_ctrl_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)853 static int rdt_default_ctrl_show(struct kernfs_open_file *of,
854 struct seq_file *seq, void *v)
855 {
856 struct rdt_resource *r = of->kn->parent->priv;
857
858 seq_printf(seq, "%x\n", r->default_ctrl);
859 return 0;
860 }
861
rdt_min_cbm_bits_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)862 static int rdt_min_cbm_bits_show(struct kernfs_open_file *of,
863 struct seq_file *seq, void *v)
864 {
865 struct rdt_resource *r = of->kn->parent->priv;
866
867 seq_printf(seq, "%u\n", r->cache.min_cbm_bits);
868 return 0;
869 }
870
rdt_shareable_bits_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)871 static int rdt_shareable_bits_show(struct kernfs_open_file *of,
872 struct seq_file *seq, void *v)
873 {
874 struct rdt_resource *r = of->kn->parent->priv;
875
876 seq_printf(seq, "%x\n", r->cache.shareable_bits);
877 return 0;
878 }
879
880 /**
881 * rdt_bit_usage_show - Display current usage of resources
882 *
883 * A domain is a shared resource that can now be allocated differently. Here
884 * we display the current regions of the domain as an annotated bitmask.
885 * For each domain of this resource its allocation bitmask
886 * is annotated as below to indicate the current usage of the corresponding bit:
887 * 0 - currently unused
888 * X - currently available for sharing and used by software and hardware
889 * H - currently used by hardware only but available for software use
890 * S - currently used and shareable by software only
891 * E - currently used exclusively by one resource group
892 * P - currently pseudo-locked by one resource group
893 */
rdt_bit_usage_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)894 static int rdt_bit_usage_show(struct kernfs_open_file *of,
895 struct seq_file *seq, void *v)
896 {
897 struct rdt_resource *r = of->kn->parent->priv;
898 /*
899 * Use unsigned long even though only 32 bits are used to ensure
900 * test_bit() is used safely.
901 */
902 unsigned long sw_shareable = 0, hw_shareable = 0;
903 unsigned long exclusive = 0, pseudo_locked = 0;
904 struct rdt_domain *dom;
905 int i, hwb, swb, excl, psl;
906 enum rdtgrp_mode mode;
907 bool sep = false;
908 u32 *ctrl;
909
910 mutex_lock(&rdtgroup_mutex);
911 hw_shareable = r->cache.shareable_bits;
912 list_for_each_entry(dom, &r->domains, list) {
913 if (sep)
914 seq_putc(seq, ';');
915 ctrl = dom->ctrl_val;
916 sw_shareable = 0;
917 exclusive = 0;
918 seq_printf(seq, "%d=", dom->id);
919 for (i = 0; i < closids_supported(); i++, ctrl++) {
920 if (!closid_allocated(i))
921 continue;
922 mode = rdtgroup_mode_by_closid(i);
923 switch (mode) {
924 case RDT_MODE_SHAREABLE:
925 sw_shareable |= *ctrl;
926 break;
927 case RDT_MODE_EXCLUSIVE:
928 exclusive |= *ctrl;
929 break;
930 case RDT_MODE_PSEUDO_LOCKSETUP:
931 /*
932 * RDT_MODE_PSEUDO_LOCKSETUP is possible
933 * here but not included since the CBM
934 * associated with this CLOSID in this mode
935 * is not initialized and no task or cpu can be
936 * assigned this CLOSID.
937 */
938 break;
939 case RDT_MODE_PSEUDO_LOCKED:
940 case RDT_NUM_MODES:
941 WARN(1,
942 "invalid mode for closid %d\n", i);
943 break;
944 }
945 }
946 for (i = r->cache.cbm_len - 1; i >= 0; i--) {
947 pseudo_locked = dom->plr ? dom->plr->cbm : 0;
948 hwb = test_bit(i, &hw_shareable);
949 swb = test_bit(i, &sw_shareable);
950 excl = test_bit(i, &exclusive);
951 psl = test_bit(i, &pseudo_locked);
952 if (hwb && swb)
953 seq_putc(seq, 'X');
954 else if (hwb && !swb)
955 seq_putc(seq, 'H');
956 else if (!hwb && swb)
957 seq_putc(seq, 'S');
958 else if (excl)
959 seq_putc(seq, 'E');
960 else if (psl)
961 seq_putc(seq, 'P');
962 else /* Unused bits remain */
963 seq_putc(seq, '0');
964 }
965 sep = true;
966 }
967 seq_putc(seq, '\n');
968 mutex_unlock(&rdtgroup_mutex);
969 return 0;
970 }
971
rdt_min_bw_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)972 static int rdt_min_bw_show(struct kernfs_open_file *of,
973 struct seq_file *seq, void *v)
974 {
975 struct rdt_resource *r = of->kn->parent->priv;
976
977 seq_printf(seq, "%u\n", r->membw.min_bw);
978 return 0;
979 }
980
rdt_num_rmids_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)981 static int rdt_num_rmids_show(struct kernfs_open_file *of,
982 struct seq_file *seq, void *v)
983 {
984 struct rdt_resource *r = of->kn->parent->priv;
985
986 seq_printf(seq, "%d\n", r->num_rmid);
987
988 return 0;
989 }
990
rdt_mon_features_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)991 static int rdt_mon_features_show(struct kernfs_open_file *of,
992 struct seq_file *seq, void *v)
993 {
994 struct rdt_resource *r = of->kn->parent->priv;
995 struct mon_evt *mevt;
996
997 list_for_each_entry(mevt, &r->evt_list, list)
998 seq_printf(seq, "%s\n", mevt->name);
999
1000 return 0;
1001 }
1002
rdt_bw_gran_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)1003 static int rdt_bw_gran_show(struct kernfs_open_file *of,
1004 struct seq_file *seq, void *v)
1005 {
1006 struct rdt_resource *r = of->kn->parent->priv;
1007
1008 seq_printf(seq, "%u\n", r->membw.bw_gran);
1009 return 0;
1010 }
1011
rdt_delay_linear_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)1012 static int rdt_delay_linear_show(struct kernfs_open_file *of,
1013 struct seq_file *seq, void *v)
1014 {
1015 struct rdt_resource *r = of->kn->parent->priv;
1016
1017 seq_printf(seq, "%u\n", r->membw.delay_linear);
1018 return 0;
1019 }
1020
max_threshold_occ_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)1021 static int max_threshold_occ_show(struct kernfs_open_file *of,
1022 struct seq_file *seq, void *v)
1023 {
1024 struct rdt_resource *r = of->kn->parent->priv;
1025
1026 seq_printf(seq, "%u\n", resctrl_cqm_threshold * r->mon_scale);
1027
1028 return 0;
1029 }
1030
rdt_thread_throttle_mode_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)1031 static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of,
1032 struct seq_file *seq, void *v)
1033 {
1034 struct rdt_resource *r = of->kn->parent->priv;
1035
1036 if (r->membw.throttle_mode == THREAD_THROTTLE_PER_THREAD)
1037 seq_puts(seq, "per-thread\n");
1038 else
1039 seq_puts(seq, "max\n");
1040
1041 return 0;
1042 }
1043
max_threshold_occ_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)1044 static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
1045 char *buf, size_t nbytes, loff_t off)
1046 {
1047 struct rdt_resource *r = of->kn->parent->priv;
1048 unsigned int bytes;
1049 int ret;
1050
1051 ret = kstrtouint(buf, 0, &bytes);
1052 if (ret)
1053 return ret;
1054
1055 if (bytes > (boot_cpu_data.x86_cache_size * 1024))
1056 return -EINVAL;
1057
1058 resctrl_cqm_threshold = bytes / r->mon_scale;
1059
1060 return nbytes;
1061 }
1062
1063 /*
1064 * rdtgroup_mode_show - Display mode of this resource group
1065 */
rdtgroup_mode_show(struct kernfs_open_file * of,struct seq_file * s,void * v)1066 static int rdtgroup_mode_show(struct kernfs_open_file *of,
1067 struct seq_file *s, void *v)
1068 {
1069 struct rdtgroup *rdtgrp;
1070
1071 rdtgrp = rdtgroup_kn_lock_live(of->kn);
1072 if (!rdtgrp) {
1073 rdtgroup_kn_unlock(of->kn);
1074 return -ENOENT;
1075 }
1076
1077 seq_printf(s, "%s\n", rdtgroup_mode_str(rdtgrp->mode));
1078
1079 rdtgroup_kn_unlock(of->kn);
1080 return 0;
1081 }
1082
1083 /**
1084 * rdt_cdp_peer_get - Retrieve CDP peer if it exists
1085 * @r: RDT resource to which RDT domain @d belongs
1086 * @d: Cache instance for which a CDP peer is requested
1087 * @r_cdp: RDT resource that shares hardware with @r (RDT resource peer)
1088 * Used to return the result.
1089 * @d_cdp: RDT domain that shares hardware with @d (RDT domain peer)
1090 * Used to return the result.
1091 *
1092 * RDT resources are managed independently and by extension the RDT domains
1093 * (RDT resource instances) are managed independently also. The Code and
1094 * Data Prioritization (CDP) RDT resources, while managed independently,
1095 * could refer to the same underlying hardware. For example,
1096 * RDT_RESOURCE_L2CODE and RDT_RESOURCE_L2DATA both refer to the L2 cache.
1097 *
1098 * When provided with an RDT resource @r and an instance of that RDT
1099 * resource @d rdt_cdp_peer_get() will return if there is a peer RDT
1100 * resource and the exact instance that shares the same hardware.
1101 *
1102 * Return: 0 if a CDP peer was found, <0 on error or if no CDP peer exists.
1103 * If a CDP peer was found, @r_cdp will point to the peer RDT resource
1104 * and @d_cdp will point to the peer RDT domain.
1105 */
rdt_cdp_peer_get(struct rdt_resource * r,struct rdt_domain * d,struct rdt_resource ** r_cdp,struct rdt_domain ** d_cdp)1106 static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d,
1107 struct rdt_resource **r_cdp,
1108 struct rdt_domain **d_cdp)
1109 {
1110 struct rdt_resource *_r_cdp = NULL;
1111 struct rdt_domain *_d_cdp = NULL;
1112 int ret = 0;
1113
1114 switch (r->rid) {
1115 case RDT_RESOURCE_L3DATA:
1116 _r_cdp = &rdt_resources_all[RDT_RESOURCE_L3CODE];
1117 break;
1118 case RDT_RESOURCE_L3CODE:
1119 _r_cdp = &rdt_resources_all[RDT_RESOURCE_L3DATA];
1120 break;
1121 case RDT_RESOURCE_L2DATA:
1122 _r_cdp = &rdt_resources_all[RDT_RESOURCE_L2CODE];
1123 break;
1124 case RDT_RESOURCE_L2CODE:
1125 _r_cdp = &rdt_resources_all[RDT_RESOURCE_L2DATA];
1126 break;
1127 default:
1128 ret = -ENOENT;
1129 goto out;
1130 }
1131
1132 /*
1133 * When a new CPU comes online and CDP is enabled then the new
1134 * RDT domains (if any) associated with both CDP RDT resources
1135 * are added in the same CPU online routine while the
1136 * rdtgroup_mutex is held. It should thus not happen for one
1137 * RDT domain to exist and be associated with its RDT CDP
1138 * resource but there is no RDT domain associated with the
1139 * peer RDT CDP resource. Hence the WARN.
1140 */
1141 _d_cdp = rdt_find_domain(_r_cdp, d->id, NULL);
1142 if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) {
1143 _r_cdp = NULL;
1144 _d_cdp = NULL;
1145 ret = -EINVAL;
1146 }
1147
1148 out:
1149 *r_cdp = _r_cdp;
1150 *d_cdp = _d_cdp;
1151
1152 return ret;
1153 }
1154
1155 /**
1156 * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other
1157 * @r: Resource to which domain instance @d belongs.
1158 * @d: The domain instance for which @closid is being tested.
1159 * @cbm: Capacity bitmask being tested.
1160 * @closid: Intended closid for @cbm.
1161 * @exclusive: Only check if overlaps with exclusive resource groups
1162 *
1163 * Checks if provided @cbm intended to be used for @closid on domain
1164 * @d overlaps with any other closids or other hardware usage associated
1165 * with this domain. If @exclusive is true then only overlaps with
1166 * resource groups in exclusive mode will be considered. If @exclusive
1167 * is false then overlaps with any resource group or hardware entities
1168 * will be considered.
1169 *
1170 * @cbm is unsigned long, even if only 32 bits are used, to make the
1171 * bitmap functions work correctly.
1172 *
1173 * Return: false if CBM does not overlap, true if it does.
1174 */
__rdtgroup_cbm_overlaps(struct rdt_resource * r,struct rdt_domain * d,unsigned long cbm,int closid,bool exclusive)1175 static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
1176 unsigned long cbm, int closid, bool exclusive)
1177 {
1178 enum rdtgrp_mode mode;
1179 unsigned long ctrl_b;
1180 u32 *ctrl;
1181 int i;
1182
1183 /* Check for any overlap with regions used by hardware directly */
1184 if (!exclusive) {
1185 ctrl_b = r->cache.shareable_bits;
1186 if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len))
1187 return true;
1188 }
1189
1190 /* Check for overlap with other resource groups */
1191 ctrl = d->ctrl_val;
1192 for (i = 0; i < closids_supported(); i++, ctrl++) {
1193 ctrl_b = *ctrl;
1194 mode = rdtgroup_mode_by_closid(i);
1195 if (closid_allocated(i) && i != closid &&
1196 mode != RDT_MODE_PSEUDO_LOCKSETUP) {
1197 if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) {
1198 if (exclusive) {
1199 if (mode == RDT_MODE_EXCLUSIVE)
1200 return true;
1201 continue;
1202 }
1203 return true;
1204 }
1205 }
1206 }
1207
1208 return false;
1209 }
1210
1211 /**
1212 * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware
1213 * @r: Resource to which domain instance @d belongs.
1214 * @d: The domain instance for which @closid is being tested.
1215 * @cbm: Capacity bitmask being tested.
1216 * @closid: Intended closid for @cbm.
1217 * @exclusive: Only check if overlaps with exclusive resource groups
1218 *
1219 * Resources that can be allocated using a CBM can use the CBM to control
1220 * the overlap of these allocations. rdtgroup_cmb_overlaps() is the test
1221 * for overlap. Overlap test is not limited to the specific resource for
1222 * which the CBM is intended though - when dealing with CDP resources that
1223 * share the underlying hardware the overlap check should be performed on
1224 * the CDP resource sharing the hardware also.
1225 *
1226 * Refer to description of __rdtgroup_cbm_overlaps() for the details of the
1227 * overlap test.
1228 *
1229 * Return: true if CBM overlap detected, false if there is no overlap
1230 */
rdtgroup_cbm_overlaps(struct rdt_resource * r,struct rdt_domain * d,unsigned long cbm,int closid,bool exclusive)1231 bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
1232 unsigned long cbm, int closid, bool exclusive)
1233 {
1234 struct rdt_resource *r_cdp;
1235 struct rdt_domain *d_cdp;
1236
1237 if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, exclusive))
1238 return true;
1239
1240 if (rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp) < 0)
1241 return false;
1242
1243 return __rdtgroup_cbm_overlaps(r_cdp, d_cdp, cbm, closid, exclusive);
1244 }
1245
1246 /**
1247 * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive
1248 *
1249 * An exclusive resource group implies that there should be no sharing of
1250 * its allocated resources. At the time this group is considered to be
1251 * exclusive this test can determine if its current schemata supports this
1252 * setting by testing for overlap with all other resource groups.
1253 *
1254 * Return: true if resource group can be exclusive, false if there is overlap
1255 * with allocations of other resource groups and thus this resource group
1256 * cannot be exclusive.
1257 */
rdtgroup_mode_test_exclusive(struct rdtgroup * rdtgrp)1258 static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp)
1259 {
1260 int closid = rdtgrp->closid;
1261 struct rdt_resource *r;
1262 bool has_cache = false;
1263 struct rdt_domain *d;
1264
1265 for_each_alloc_enabled_rdt_resource(r) {
1266 if (r->rid == RDT_RESOURCE_MBA)
1267 continue;
1268 has_cache = true;
1269 list_for_each_entry(d, &r->domains, list) {
1270 if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid],
1271 rdtgrp->closid, false)) {
1272 rdt_last_cmd_puts("Schemata overlaps\n");
1273 return false;
1274 }
1275 }
1276 }
1277
1278 if (!has_cache) {
1279 rdt_last_cmd_puts("Cannot be exclusive without CAT/CDP\n");
1280 return false;
1281 }
1282
1283 return true;
1284 }
1285
1286 /**
1287 * rdtgroup_mode_write - Modify the resource group's mode
1288 *
1289 */
rdtgroup_mode_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)1290 static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of,
1291 char *buf, size_t nbytes, loff_t off)
1292 {
1293 struct rdtgroup *rdtgrp;
1294 enum rdtgrp_mode mode;
1295 int ret = 0;
1296
1297 /* Valid input requires a trailing newline */
1298 if (nbytes == 0 || buf[nbytes - 1] != '\n')
1299 return -EINVAL;
1300 buf[nbytes - 1] = '\0';
1301
1302 rdtgrp = rdtgroup_kn_lock_live(of->kn);
1303 if (!rdtgrp) {
1304 rdtgroup_kn_unlock(of->kn);
1305 return -ENOENT;
1306 }
1307
1308 rdt_last_cmd_clear();
1309
1310 mode = rdtgrp->mode;
1311
1312 if ((!strcmp(buf, "shareable") && mode == RDT_MODE_SHAREABLE) ||
1313 (!strcmp(buf, "exclusive") && mode == RDT_MODE_EXCLUSIVE) ||
1314 (!strcmp(buf, "pseudo-locksetup") &&
1315 mode == RDT_MODE_PSEUDO_LOCKSETUP) ||
1316 (!strcmp(buf, "pseudo-locked") && mode == RDT_MODE_PSEUDO_LOCKED))
1317 goto out;
1318
1319 if (mode == RDT_MODE_PSEUDO_LOCKED) {
1320 rdt_last_cmd_puts("Cannot change pseudo-locked group\n");
1321 ret = -EINVAL;
1322 goto out;
1323 }
1324
1325 if (!strcmp(buf, "shareable")) {
1326 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
1327 ret = rdtgroup_locksetup_exit(rdtgrp);
1328 if (ret)
1329 goto out;
1330 }
1331 rdtgrp->mode = RDT_MODE_SHAREABLE;
1332 } else if (!strcmp(buf, "exclusive")) {
1333 if (!rdtgroup_mode_test_exclusive(rdtgrp)) {
1334 ret = -EINVAL;
1335 goto out;
1336 }
1337 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
1338 ret = rdtgroup_locksetup_exit(rdtgrp);
1339 if (ret)
1340 goto out;
1341 }
1342 rdtgrp->mode = RDT_MODE_EXCLUSIVE;
1343 } else if (!strcmp(buf, "pseudo-locksetup")) {
1344 ret = rdtgroup_locksetup_enter(rdtgrp);
1345 if (ret)
1346 goto out;
1347 rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP;
1348 } else {
1349 rdt_last_cmd_puts("Unknown or unsupported mode\n");
1350 ret = -EINVAL;
1351 }
1352
1353 out:
1354 rdtgroup_kn_unlock(of->kn);
1355 return ret ?: nbytes;
1356 }
1357
1358 /**
1359 * rdtgroup_cbm_to_size - Translate CBM to size in bytes
1360 * @r: RDT resource to which @d belongs.
1361 * @d: RDT domain instance.
1362 * @cbm: bitmask for which the size should be computed.
1363 *
1364 * The bitmask provided associated with the RDT domain instance @d will be
1365 * translated into how many bytes it represents. The size in bytes is
1366 * computed by first dividing the total cache size by the CBM length to
1367 * determine how many bytes each bit in the bitmask represents. The result
1368 * is multiplied with the number of bits set in the bitmask.
1369 *
1370 * @cbm is unsigned long, even if only 32 bits are used to make the
1371 * bitmap functions work correctly.
1372 */
rdtgroup_cbm_to_size(struct rdt_resource * r,struct rdt_domain * d,unsigned long cbm)1373 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
1374 struct rdt_domain *d, unsigned long cbm)
1375 {
1376 struct cpu_cacheinfo *ci;
1377 unsigned int size = 0;
1378 int num_b, i;
1379
1380 num_b = bitmap_weight(&cbm, r->cache.cbm_len);
1381 ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask));
1382 for (i = 0; i < ci->num_leaves; i++) {
1383 if (ci->info_list[i].level == r->cache_level) {
1384 size = ci->info_list[i].size / r->cache.cbm_len * num_b;
1385 break;
1386 }
1387 }
1388
1389 return size;
1390 }
1391
1392 /**
1393 * rdtgroup_size_show - Display size in bytes of allocated regions
1394 *
1395 * The "size" file mirrors the layout of the "schemata" file, printing the
1396 * size in bytes of each region instead of the capacity bitmask.
1397 *
1398 */
rdtgroup_size_show(struct kernfs_open_file * of,struct seq_file * s,void * v)1399 static int rdtgroup_size_show(struct kernfs_open_file *of,
1400 struct seq_file *s, void *v)
1401 {
1402 struct rdtgroup *rdtgrp;
1403 struct rdt_resource *r;
1404 struct rdt_domain *d;
1405 unsigned int size;
1406 int ret = 0;
1407 bool sep;
1408 u32 ctrl;
1409
1410 rdtgrp = rdtgroup_kn_lock_live(of->kn);
1411 if (!rdtgrp) {
1412 rdtgroup_kn_unlock(of->kn);
1413 return -ENOENT;
1414 }
1415
1416 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
1417 if (!rdtgrp->plr->d) {
1418 rdt_last_cmd_clear();
1419 rdt_last_cmd_puts("Cache domain offline\n");
1420 ret = -ENODEV;
1421 } else {
1422 seq_printf(s, "%*s:", max_name_width,
1423 rdtgrp->plr->r->name);
1424 size = rdtgroup_cbm_to_size(rdtgrp->plr->r,
1425 rdtgrp->plr->d,
1426 rdtgrp->plr->cbm);
1427 seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size);
1428 }
1429 goto out;
1430 }
1431
1432 for_each_alloc_enabled_rdt_resource(r) {
1433 sep = false;
1434 seq_printf(s, "%*s:", max_name_width, r->name);
1435 list_for_each_entry(d, &r->domains, list) {
1436 if (sep)
1437 seq_putc(s, ';');
1438 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
1439 size = 0;
1440 } else {
1441 ctrl = (!is_mba_sc(r) ?
1442 d->ctrl_val[rdtgrp->closid] :
1443 d->mbps_val[rdtgrp->closid]);
1444 if (r->rid == RDT_RESOURCE_MBA)
1445 size = ctrl;
1446 else
1447 size = rdtgroup_cbm_to_size(r, d, ctrl);
1448 }
1449 seq_printf(s, "%d=%u", d->id, size);
1450 sep = true;
1451 }
1452 seq_putc(s, '\n');
1453 }
1454
1455 out:
1456 rdtgroup_kn_unlock(of->kn);
1457
1458 return ret;
1459 }
1460
1461 /* rdtgroup information files for one cache resource. */
1462 static struct rftype res_common_files[] = {
1463 {
1464 .name = "last_cmd_status",
1465 .mode = 0444,
1466 .kf_ops = &rdtgroup_kf_single_ops,
1467 .seq_show = rdt_last_cmd_status_show,
1468 .fflags = RF_TOP_INFO,
1469 },
1470 {
1471 .name = "num_closids",
1472 .mode = 0444,
1473 .kf_ops = &rdtgroup_kf_single_ops,
1474 .seq_show = rdt_num_closids_show,
1475 .fflags = RF_CTRL_INFO,
1476 },
1477 {
1478 .name = "mon_features",
1479 .mode = 0444,
1480 .kf_ops = &rdtgroup_kf_single_ops,
1481 .seq_show = rdt_mon_features_show,
1482 .fflags = RF_MON_INFO,
1483 },
1484 {
1485 .name = "num_rmids",
1486 .mode = 0444,
1487 .kf_ops = &rdtgroup_kf_single_ops,
1488 .seq_show = rdt_num_rmids_show,
1489 .fflags = RF_MON_INFO,
1490 },
1491 {
1492 .name = "cbm_mask",
1493 .mode = 0444,
1494 .kf_ops = &rdtgroup_kf_single_ops,
1495 .seq_show = rdt_default_ctrl_show,
1496 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
1497 },
1498 {
1499 .name = "min_cbm_bits",
1500 .mode = 0444,
1501 .kf_ops = &rdtgroup_kf_single_ops,
1502 .seq_show = rdt_min_cbm_bits_show,
1503 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
1504 },
1505 {
1506 .name = "shareable_bits",
1507 .mode = 0444,
1508 .kf_ops = &rdtgroup_kf_single_ops,
1509 .seq_show = rdt_shareable_bits_show,
1510 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
1511 },
1512 {
1513 .name = "bit_usage",
1514 .mode = 0444,
1515 .kf_ops = &rdtgroup_kf_single_ops,
1516 .seq_show = rdt_bit_usage_show,
1517 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
1518 },
1519 {
1520 .name = "min_bandwidth",
1521 .mode = 0444,
1522 .kf_ops = &rdtgroup_kf_single_ops,
1523 .seq_show = rdt_min_bw_show,
1524 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB,
1525 },
1526 {
1527 .name = "bandwidth_gran",
1528 .mode = 0444,
1529 .kf_ops = &rdtgroup_kf_single_ops,
1530 .seq_show = rdt_bw_gran_show,
1531 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB,
1532 },
1533 {
1534 .name = "delay_linear",
1535 .mode = 0444,
1536 .kf_ops = &rdtgroup_kf_single_ops,
1537 .seq_show = rdt_delay_linear_show,
1538 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB,
1539 },
1540 /*
1541 * Platform specific which (if any) capabilities are provided by
1542 * thread_throttle_mode. Defer "fflags" initialization to platform
1543 * discovery.
1544 */
1545 {
1546 .name = "thread_throttle_mode",
1547 .mode = 0444,
1548 .kf_ops = &rdtgroup_kf_single_ops,
1549 .seq_show = rdt_thread_throttle_mode_show,
1550 },
1551 {
1552 .name = "max_threshold_occupancy",
1553 .mode = 0644,
1554 .kf_ops = &rdtgroup_kf_single_ops,
1555 .write = max_threshold_occ_write,
1556 .seq_show = max_threshold_occ_show,
1557 .fflags = RF_MON_INFO | RFTYPE_RES_CACHE,
1558 },
1559 {
1560 .name = "cpus",
1561 .mode = 0644,
1562 .kf_ops = &rdtgroup_kf_single_ops,
1563 .write = rdtgroup_cpus_write,
1564 .seq_show = rdtgroup_cpus_show,
1565 .fflags = RFTYPE_BASE,
1566 },
1567 {
1568 .name = "cpus_list",
1569 .mode = 0644,
1570 .kf_ops = &rdtgroup_kf_single_ops,
1571 .write = rdtgroup_cpus_write,
1572 .seq_show = rdtgroup_cpus_show,
1573 .flags = RFTYPE_FLAGS_CPUS_LIST,
1574 .fflags = RFTYPE_BASE,
1575 },
1576 {
1577 .name = "tasks",
1578 .mode = 0644,
1579 .kf_ops = &rdtgroup_kf_single_ops,
1580 .write = rdtgroup_tasks_write,
1581 .seq_show = rdtgroup_tasks_show,
1582 .fflags = RFTYPE_BASE,
1583 },
1584 {
1585 .name = "schemata",
1586 .mode = 0644,
1587 .kf_ops = &rdtgroup_kf_single_ops,
1588 .write = rdtgroup_schemata_write,
1589 .seq_show = rdtgroup_schemata_show,
1590 .fflags = RF_CTRL_BASE,
1591 },
1592 {
1593 .name = "mode",
1594 .mode = 0644,
1595 .kf_ops = &rdtgroup_kf_single_ops,
1596 .write = rdtgroup_mode_write,
1597 .seq_show = rdtgroup_mode_show,
1598 .fflags = RF_CTRL_BASE,
1599 },
1600 {
1601 .name = "size",
1602 .mode = 0444,
1603 .kf_ops = &rdtgroup_kf_single_ops,
1604 .seq_show = rdtgroup_size_show,
1605 .fflags = RF_CTRL_BASE,
1606 },
1607
1608 };
1609
rdtgroup_add_files(struct kernfs_node * kn,unsigned long fflags)1610 static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags)
1611 {
1612 struct rftype *rfts, *rft;
1613 int ret, len;
1614
1615 rfts = res_common_files;
1616 len = ARRAY_SIZE(res_common_files);
1617
1618 lockdep_assert_held(&rdtgroup_mutex);
1619
1620 for (rft = rfts; rft < rfts + len; rft++) {
1621 if (rft->fflags && ((fflags & rft->fflags) == rft->fflags)) {
1622 ret = rdtgroup_add_file(kn, rft);
1623 if (ret)
1624 goto error;
1625 }
1626 }
1627
1628 return 0;
1629 error:
1630 pr_warn("Failed to add %s, err=%d\n", rft->name, ret);
1631 while (--rft >= rfts) {
1632 if ((fflags & rft->fflags) == rft->fflags)
1633 kernfs_remove_by_name(kn, rft->name);
1634 }
1635 return ret;
1636 }
1637
rdtgroup_get_rftype_by_name(const char * name)1638 static struct rftype *rdtgroup_get_rftype_by_name(const char *name)
1639 {
1640 struct rftype *rfts, *rft;
1641 int len;
1642
1643 rfts = res_common_files;
1644 len = ARRAY_SIZE(res_common_files);
1645
1646 for (rft = rfts; rft < rfts + len; rft++) {
1647 if (!strcmp(rft->name, name))
1648 return rft;
1649 }
1650
1651 return NULL;
1652 }
1653
thread_throttle_mode_init(void)1654 void __init thread_throttle_mode_init(void)
1655 {
1656 struct rftype *rft;
1657
1658 rft = rdtgroup_get_rftype_by_name("thread_throttle_mode");
1659 if (!rft)
1660 return;
1661
1662 rft->fflags = RF_CTRL_INFO | RFTYPE_RES_MB;
1663 }
1664
1665 /**
1666 * rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file
1667 * @r: The resource group with which the file is associated.
1668 * @name: Name of the file
1669 *
1670 * The permissions of named resctrl file, directory, or link are modified
1671 * to not allow read, write, or execute by any user.
1672 *
1673 * WARNING: This function is intended to communicate to the user that the
1674 * resctrl file has been locked down - that it is not relevant to the
1675 * particular state the system finds itself in. It should not be relied
1676 * on to protect from user access because after the file's permissions
1677 * are restricted the user can still change the permissions using chmod
1678 * from the command line.
1679 *
1680 * Return: 0 on success, <0 on failure.
1681 */
rdtgroup_kn_mode_restrict(struct rdtgroup * r,const char * name)1682 int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name)
1683 {
1684 struct iattr iattr = {.ia_valid = ATTR_MODE,};
1685 struct kernfs_node *kn;
1686 int ret = 0;
1687
1688 kn = kernfs_find_and_get_ns(r->kn, name, NULL);
1689 if (!kn)
1690 return -ENOENT;
1691
1692 switch (kernfs_type(kn)) {
1693 case KERNFS_DIR:
1694 iattr.ia_mode = S_IFDIR;
1695 break;
1696 case KERNFS_FILE:
1697 iattr.ia_mode = S_IFREG;
1698 break;
1699 case KERNFS_LINK:
1700 iattr.ia_mode = S_IFLNK;
1701 break;
1702 }
1703
1704 ret = kernfs_setattr(kn, &iattr);
1705 kernfs_put(kn);
1706 return ret;
1707 }
1708
1709 /**
1710 * rdtgroup_kn_mode_restore - Restore user access to named resctrl file
1711 * @r: The resource group with which the file is associated.
1712 * @name: Name of the file
1713 * @mask: Mask of permissions that should be restored
1714 *
1715 * Restore the permissions of the named file. If @name is a directory the
1716 * permissions of its parent will be used.
1717 *
1718 * Return: 0 on success, <0 on failure.
1719 */
rdtgroup_kn_mode_restore(struct rdtgroup * r,const char * name,umode_t mask)1720 int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name,
1721 umode_t mask)
1722 {
1723 struct iattr iattr = {.ia_valid = ATTR_MODE,};
1724 struct kernfs_node *kn, *parent;
1725 struct rftype *rfts, *rft;
1726 int ret, len;
1727
1728 rfts = res_common_files;
1729 len = ARRAY_SIZE(res_common_files);
1730
1731 for (rft = rfts; rft < rfts + len; rft++) {
1732 if (!strcmp(rft->name, name))
1733 iattr.ia_mode = rft->mode & mask;
1734 }
1735
1736 kn = kernfs_find_and_get_ns(r->kn, name, NULL);
1737 if (!kn)
1738 return -ENOENT;
1739
1740 switch (kernfs_type(kn)) {
1741 case KERNFS_DIR:
1742 parent = kernfs_get_parent(kn);
1743 if (parent) {
1744 iattr.ia_mode |= parent->mode;
1745 kernfs_put(parent);
1746 }
1747 iattr.ia_mode |= S_IFDIR;
1748 break;
1749 case KERNFS_FILE:
1750 iattr.ia_mode |= S_IFREG;
1751 break;
1752 case KERNFS_LINK:
1753 iattr.ia_mode |= S_IFLNK;
1754 break;
1755 }
1756
1757 ret = kernfs_setattr(kn, &iattr);
1758 kernfs_put(kn);
1759 return ret;
1760 }
1761
rdtgroup_mkdir_info_resdir(struct rdt_resource * r,char * name,unsigned long fflags)1762 static int rdtgroup_mkdir_info_resdir(struct rdt_resource *r, char *name,
1763 unsigned long fflags)
1764 {
1765 struct kernfs_node *kn_subdir;
1766 int ret;
1767
1768 kn_subdir = kernfs_create_dir(kn_info, name,
1769 kn_info->mode, r);
1770 if (IS_ERR(kn_subdir))
1771 return PTR_ERR(kn_subdir);
1772
1773 ret = rdtgroup_kn_set_ugid(kn_subdir);
1774 if (ret)
1775 return ret;
1776
1777 ret = rdtgroup_add_files(kn_subdir, fflags);
1778 if (!ret)
1779 kernfs_activate(kn_subdir);
1780
1781 return ret;
1782 }
1783
rdtgroup_create_info_dir(struct kernfs_node * parent_kn)1784 static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
1785 {
1786 struct rdt_resource *r;
1787 unsigned long fflags;
1788 char name[32];
1789 int ret;
1790
1791 /* create the directory */
1792 kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL);
1793 if (IS_ERR(kn_info))
1794 return PTR_ERR(kn_info);
1795
1796 ret = rdtgroup_add_files(kn_info, RF_TOP_INFO);
1797 if (ret)
1798 goto out_destroy;
1799
1800 for_each_alloc_enabled_rdt_resource(r) {
1801 fflags = r->fflags | RF_CTRL_INFO;
1802 ret = rdtgroup_mkdir_info_resdir(r, r->name, fflags);
1803 if (ret)
1804 goto out_destroy;
1805 }
1806
1807 for_each_mon_enabled_rdt_resource(r) {
1808 fflags = r->fflags | RF_MON_INFO;
1809 sprintf(name, "%s_MON", r->name);
1810 ret = rdtgroup_mkdir_info_resdir(r, name, fflags);
1811 if (ret)
1812 goto out_destroy;
1813 }
1814
1815 ret = rdtgroup_kn_set_ugid(kn_info);
1816 if (ret)
1817 goto out_destroy;
1818
1819 kernfs_activate(kn_info);
1820
1821 return 0;
1822
1823 out_destroy:
1824 kernfs_remove(kn_info);
1825 return ret;
1826 }
1827
1828 static int
mongroup_create_dir(struct kernfs_node * parent_kn,struct rdtgroup * prgrp,char * name,struct kernfs_node ** dest_kn)1829 mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp,
1830 char *name, struct kernfs_node **dest_kn)
1831 {
1832 struct kernfs_node *kn;
1833 int ret;
1834
1835 /* create the directory */
1836 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp);
1837 if (IS_ERR(kn))
1838 return PTR_ERR(kn);
1839
1840 if (dest_kn)
1841 *dest_kn = kn;
1842
1843 ret = rdtgroup_kn_set_ugid(kn);
1844 if (ret)
1845 goto out_destroy;
1846
1847 kernfs_activate(kn);
1848
1849 return 0;
1850
1851 out_destroy:
1852 kernfs_remove(kn);
1853 return ret;
1854 }
1855
l3_qos_cfg_update(void * arg)1856 static void l3_qos_cfg_update(void *arg)
1857 {
1858 bool *enable = arg;
1859
1860 wrmsrl(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL);
1861 }
1862
l2_qos_cfg_update(void * arg)1863 static void l2_qos_cfg_update(void *arg)
1864 {
1865 bool *enable = arg;
1866
1867 wrmsrl(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL);
1868 }
1869
is_mba_linear(void)1870 static inline bool is_mba_linear(void)
1871 {
1872 return rdt_resources_all[RDT_RESOURCE_MBA].membw.delay_linear;
1873 }
1874
set_cache_qos_cfg(int level,bool enable)1875 static int set_cache_qos_cfg(int level, bool enable)
1876 {
1877 void (*update)(void *arg);
1878 struct rdt_resource *r_l;
1879 cpumask_var_t cpu_mask;
1880 struct rdt_domain *d;
1881 int cpu;
1882
1883 if (level == RDT_RESOURCE_L3)
1884 update = l3_qos_cfg_update;
1885 else if (level == RDT_RESOURCE_L2)
1886 update = l2_qos_cfg_update;
1887 else
1888 return -EINVAL;
1889
1890 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
1891 return -ENOMEM;
1892
1893 r_l = &rdt_resources_all[level];
1894 list_for_each_entry(d, &r_l->domains, list) {
1895 if (r_l->cache.arch_has_per_cpu_cfg)
1896 /* Pick all the CPUs in the domain instance */
1897 for_each_cpu(cpu, &d->cpu_mask)
1898 cpumask_set_cpu(cpu, cpu_mask);
1899 else
1900 /* Pick one CPU from each domain instance to update MSR */
1901 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
1902 }
1903 cpu = get_cpu();
1904 /* Update QOS_CFG MSR on this cpu if it's in cpu_mask. */
1905 if (cpumask_test_cpu(cpu, cpu_mask))
1906 update(&enable);
1907 /* Update QOS_CFG MSR on all other cpus in cpu_mask. */
1908 smp_call_function_many(cpu_mask, update, &enable, 1);
1909 put_cpu();
1910
1911 free_cpumask_var(cpu_mask);
1912
1913 return 0;
1914 }
1915
1916 /* Restore the qos cfg state when a domain comes online */
rdt_domain_reconfigure_cdp(struct rdt_resource * r)1917 void rdt_domain_reconfigure_cdp(struct rdt_resource *r)
1918 {
1919 if (!r->alloc_capable)
1920 return;
1921
1922 if (r == &rdt_resources_all[RDT_RESOURCE_L2DATA])
1923 l2_qos_cfg_update(&r->alloc_enabled);
1924
1925 if (r == &rdt_resources_all[RDT_RESOURCE_L3DATA])
1926 l3_qos_cfg_update(&r->alloc_enabled);
1927 }
1928
1929 /*
1930 * Enable or disable the MBA software controller
1931 * which helps user specify bandwidth in MBps.
1932 * MBA software controller is supported only if
1933 * MBM is supported and MBA is in linear scale.
1934 */
set_mba_sc(bool mba_sc)1935 static int set_mba_sc(bool mba_sc)
1936 {
1937 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA];
1938 struct rdt_domain *d;
1939
1940 if (!is_mbm_enabled() || !is_mba_linear() ||
1941 mba_sc == is_mba_sc(r))
1942 return -EINVAL;
1943
1944 r->membw.mba_sc = mba_sc;
1945 list_for_each_entry(d, &r->domains, list)
1946 setup_default_ctrlval(r, d->ctrl_val, d->mbps_val);
1947
1948 return 0;
1949 }
1950
cdp_enable(int level,int data_type,int code_type)1951 static int cdp_enable(int level, int data_type, int code_type)
1952 {
1953 struct rdt_resource *r_ldata = &rdt_resources_all[data_type];
1954 struct rdt_resource *r_lcode = &rdt_resources_all[code_type];
1955 struct rdt_resource *r_l = &rdt_resources_all[level];
1956 int ret;
1957
1958 if (!r_l->alloc_capable || !r_ldata->alloc_capable ||
1959 !r_lcode->alloc_capable)
1960 return -EINVAL;
1961
1962 ret = set_cache_qos_cfg(level, true);
1963 if (!ret) {
1964 r_l->alloc_enabled = false;
1965 r_ldata->alloc_enabled = true;
1966 r_lcode->alloc_enabled = true;
1967 }
1968 return ret;
1969 }
1970
cdpl3_enable(void)1971 static int cdpl3_enable(void)
1972 {
1973 return cdp_enable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA,
1974 RDT_RESOURCE_L3CODE);
1975 }
1976
cdpl2_enable(void)1977 static int cdpl2_enable(void)
1978 {
1979 return cdp_enable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA,
1980 RDT_RESOURCE_L2CODE);
1981 }
1982
cdp_disable(int level,int data_type,int code_type)1983 static void cdp_disable(int level, int data_type, int code_type)
1984 {
1985 struct rdt_resource *r = &rdt_resources_all[level];
1986
1987 r->alloc_enabled = r->alloc_capable;
1988
1989 if (rdt_resources_all[data_type].alloc_enabled) {
1990 rdt_resources_all[data_type].alloc_enabled = false;
1991 rdt_resources_all[code_type].alloc_enabled = false;
1992 set_cache_qos_cfg(level, false);
1993 }
1994 }
1995
cdpl3_disable(void)1996 static void cdpl3_disable(void)
1997 {
1998 cdp_disable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA, RDT_RESOURCE_L3CODE);
1999 }
2000
cdpl2_disable(void)2001 static void cdpl2_disable(void)
2002 {
2003 cdp_disable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA, RDT_RESOURCE_L2CODE);
2004 }
2005
cdp_disable_all(void)2006 static void cdp_disable_all(void)
2007 {
2008 if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled)
2009 cdpl3_disable();
2010 if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled)
2011 cdpl2_disable();
2012 }
2013
2014 /*
2015 * We don't allow rdtgroup directories to be created anywhere
2016 * except the root directory. Thus when looking for the rdtgroup
2017 * structure for a kernfs node we are either looking at a directory,
2018 * in which case the rdtgroup structure is pointed at by the "priv"
2019 * field, otherwise we have a file, and need only look to the parent
2020 * to find the rdtgroup.
2021 */
kernfs_to_rdtgroup(struct kernfs_node * kn)2022 static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn)
2023 {
2024 if (kernfs_type(kn) == KERNFS_DIR) {
2025 /*
2026 * All the resource directories use "kn->priv"
2027 * to point to the "struct rdtgroup" for the
2028 * resource. "info" and its subdirectories don't
2029 * have rdtgroup structures, so return NULL here.
2030 */
2031 if (kn == kn_info || kn->parent == kn_info)
2032 return NULL;
2033 else
2034 return kn->priv;
2035 } else {
2036 return kn->parent->priv;
2037 }
2038 }
2039
rdtgroup_kn_lock_live(struct kernfs_node * kn)2040 struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn)
2041 {
2042 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
2043
2044 if (!rdtgrp)
2045 return NULL;
2046
2047 atomic_inc(&rdtgrp->waitcount);
2048 kernfs_break_active_protection(kn);
2049
2050 mutex_lock(&rdtgroup_mutex);
2051
2052 /* Was this group deleted while we waited? */
2053 if (rdtgrp->flags & RDT_DELETED)
2054 return NULL;
2055
2056 return rdtgrp;
2057 }
2058
rdtgroup_kn_unlock(struct kernfs_node * kn)2059 void rdtgroup_kn_unlock(struct kernfs_node *kn)
2060 {
2061 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
2062
2063 if (!rdtgrp)
2064 return;
2065
2066 mutex_unlock(&rdtgroup_mutex);
2067
2068 if (atomic_dec_and_test(&rdtgrp->waitcount) &&
2069 (rdtgrp->flags & RDT_DELETED)) {
2070 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
2071 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
2072 rdtgroup_pseudo_lock_remove(rdtgrp);
2073 kernfs_unbreak_active_protection(kn);
2074 rdtgroup_remove(rdtgrp);
2075 } else {
2076 kernfs_unbreak_active_protection(kn);
2077 }
2078 }
2079
2080 static int mkdir_mondata_all(struct kernfs_node *parent_kn,
2081 struct rdtgroup *prgrp,
2082 struct kernfs_node **mon_data_kn);
2083
rdt_enable_ctx(struct rdt_fs_context * ctx)2084 static int rdt_enable_ctx(struct rdt_fs_context *ctx)
2085 {
2086 int ret = 0;
2087
2088 if (ctx->enable_cdpl2)
2089 ret = cdpl2_enable();
2090
2091 if (!ret && ctx->enable_cdpl3)
2092 ret = cdpl3_enable();
2093
2094 if (!ret && ctx->enable_mba_mbps)
2095 ret = set_mba_sc(true);
2096
2097 return ret;
2098 }
2099
rdt_get_tree(struct fs_context * fc)2100 static int rdt_get_tree(struct fs_context *fc)
2101 {
2102 struct rdt_fs_context *ctx = rdt_fc2context(fc);
2103 struct rdt_domain *dom;
2104 struct rdt_resource *r;
2105 int ret;
2106
2107 cpus_read_lock();
2108 mutex_lock(&rdtgroup_mutex);
2109 /*
2110 * resctrl file system can only be mounted once.
2111 */
2112 if (static_branch_unlikely(&rdt_enable_key)) {
2113 ret = -EBUSY;
2114 goto out;
2115 }
2116
2117 ret = rdt_enable_ctx(ctx);
2118 if (ret < 0)
2119 goto out_cdp;
2120
2121 closid_init();
2122
2123 ret = rdtgroup_create_info_dir(rdtgroup_default.kn);
2124 if (ret < 0)
2125 goto out_mba;
2126
2127 if (rdt_mon_capable) {
2128 ret = mongroup_create_dir(rdtgroup_default.kn,
2129 &rdtgroup_default, "mon_groups",
2130 &kn_mongrp);
2131 if (ret < 0)
2132 goto out_info;
2133
2134 ret = mkdir_mondata_all(rdtgroup_default.kn,
2135 &rdtgroup_default, &kn_mondata);
2136 if (ret < 0)
2137 goto out_mongrp;
2138 rdtgroup_default.mon.mon_data_kn = kn_mondata;
2139 }
2140
2141 ret = rdt_pseudo_lock_init();
2142 if (ret)
2143 goto out_mondata;
2144
2145 ret = kernfs_get_tree(fc);
2146 if (ret < 0)
2147 goto out_psl;
2148
2149 if (rdt_alloc_capable)
2150 static_branch_enable_cpuslocked(&rdt_alloc_enable_key);
2151 if (rdt_mon_capable)
2152 static_branch_enable_cpuslocked(&rdt_mon_enable_key);
2153
2154 if (rdt_alloc_capable || rdt_mon_capable)
2155 static_branch_enable_cpuslocked(&rdt_enable_key);
2156
2157 if (is_mbm_enabled()) {
2158 r = &rdt_resources_all[RDT_RESOURCE_L3];
2159 list_for_each_entry(dom, &r->domains, list)
2160 mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL);
2161 }
2162
2163 goto out;
2164
2165 out_psl:
2166 rdt_pseudo_lock_release();
2167 out_mondata:
2168 if (rdt_mon_capable)
2169 kernfs_remove(kn_mondata);
2170 out_mongrp:
2171 if (rdt_mon_capable)
2172 kernfs_remove(kn_mongrp);
2173 out_info:
2174 kernfs_remove(kn_info);
2175 out_mba:
2176 if (ctx->enable_mba_mbps)
2177 set_mba_sc(false);
2178 out_cdp:
2179 cdp_disable_all();
2180 out:
2181 rdt_last_cmd_clear();
2182 mutex_unlock(&rdtgroup_mutex);
2183 cpus_read_unlock();
2184 return ret;
2185 }
2186
2187 enum rdt_param {
2188 Opt_cdp,
2189 Opt_cdpl2,
2190 Opt_mba_mbps,
2191 nr__rdt_params
2192 };
2193
2194 static const struct fs_parameter_spec rdt_fs_parameters[] = {
2195 fsparam_flag("cdp", Opt_cdp),
2196 fsparam_flag("cdpl2", Opt_cdpl2),
2197 fsparam_flag("mba_MBps", Opt_mba_mbps),
2198 {}
2199 };
2200
rdt_parse_param(struct fs_context * fc,struct fs_parameter * param)2201 static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param)
2202 {
2203 struct rdt_fs_context *ctx = rdt_fc2context(fc);
2204 struct fs_parse_result result;
2205 int opt;
2206
2207 opt = fs_parse(fc, rdt_fs_parameters, param, &result);
2208 if (opt < 0)
2209 return opt;
2210
2211 switch (opt) {
2212 case Opt_cdp:
2213 ctx->enable_cdpl3 = true;
2214 return 0;
2215 case Opt_cdpl2:
2216 ctx->enable_cdpl2 = true;
2217 return 0;
2218 case Opt_mba_mbps:
2219 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
2220 return -EINVAL;
2221 ctx->enable_mba_mbps = true;
2222 return 0;
2223 }
2224
2225 return -EINVAL;
2226 }
2227
rdt_fs_context_free(struct fs_context * fc)2228 static void rdt_fs_context_free(struct fs_context *fc)
2229 {
2230 struct rdt_fs_context *ctx = rdt_fc2context(fc);
2231
2232 kernfs_free_fs_context(fc);
2233 kfree(ctx);
2234 }
2235
2236 static const struct fs_context_operations rdt_fs_context_ops = {
2237 .free = rdt_fs_context_free,
2238 .parse_param = rdt_parse_param,
2239 .get_tree = rdt_get_tree,
2240 };
2241
rdt_init_fs_context(struct fs_context * fc)2242 static int rdt_init_fs_context(struct fs_context *fc)
2243 {
2244 struct rdt_fs_context *ctx;
2245
2246 ctx = kzalloc(sizeof(struct rdt_fs_context), GFP_KERNEL);
2247 if (!ctx)
2248 return -ENOMEM;
2249
2250 ctx->kfc.root = rdt_root;
2251 ctx->kfc.magic = RDTGROUP_SUPER_MAGIC;
2252 fc->fs_private = &ctx->kfc;
2253 fc->ops = &rdt_fs_context_ops;
2254 put_user_ns(fc->user_ns);
2255 fc->user_ns = get_user_ns(&init_user_ns);
2256 fc->global = true;
2257 return 0;
2258 }
2259
reset_all_ctrls(struct rdt_resource * r)2260 static int reset_all_ctrls(struct rdt_resource *r)
2261 {
2262 struct msr_param msr_param;
2263 cpumask_var_t cpu_mask;
2264 struct rdt_domain *d;
2265 int i, cpu;
2266
2267 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
2268 return -ENOMEM;
2269
2270 msr_param.res = r;
2271 msr_param.low = 0;
2272 msr_param.high = r->num_closid;
2273
2274 /*
2275 * Disable resource control for this resource by setting all
2276 * CBMs in all domains to the maximum mask value. Pick one CPU
2277 * from each domain to update the MSRs below.
2278 */
2279 list_for_each_entry(d, &r->domains, list) {
2280 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
2281
2282 for (i = 0; i < r->num_closid; i++)
2283 d->ctrl_val[i] = r->default_ctrl;
2284 }
2285 cpu = get_cpu();
2286 /* Update CBM on this cpu if it's in cpu_mask. */
2287 if (cpumask_test_cpu(cpu, cpu_mask))
2288 rdt_ctrl_update(&msr_param);
2289 /* Update CBM on all other cpus in cpu_mask. */
2290 smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1);
2291 put_cpu();
2292
2293 free_cpumask_var(cpu_mask);
2294
2295 return 0;
2296 }
2297
2298 /*
2299 * Move tasks from one to the other group. If @from is NULL, then all tasks
2300 * in the systems are moved unconditionally (used for teardown).
2301 *
2302 * If @mask is not NULL the cpus on which moved tasks are running are set
2303 * in that mask so the update smp function call is restricted to affected
2304 * cpus.
2305 */
rdt_move_group_tasks(struct rdtgroup * from,struct rdtgroup * to,struct cpumask * mask)2306 static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
2307 struct cpumask *mask)
2308 {
2309 struct task_struct *p, *t;
2310
2311 read_lock(&tasklist_lock);
2312 for_each_process_thread(p, t) {
2313 if (!from || is_closid_match(t, from) ||
2314 is_rmid_match(t, from)) {
2315 WRITE_ONCE(t->closid, to->closid);
2316 WRITE_ONCE(t->rmid, to->mon.rmid);
2317
2318 /*
2319 * Order the closid/rmid stores above before the loads
2320 * in task_curr(). This pairs with the full barrier
2321 * between the rq->curr update and resctrl_sched_in()
2322 * during context switch.
2323 */
2324 smp_mb();
2325
2326 /*
2327 * If the task is on a CPU, set the CPU in the mask.
2328 * The detection is inaccurate as tasks might move or
2329 * schedule before the smp function call takes place.
2330 * In such a case the function call is pointless, but
2331 * there is no other side effect.
2332 */
2333 if (IS_ENABLED(CONFIG_SMP) && mask && task_curr(t))
2334 cpumask_set_cpu(task_cpu(t), mask);
2335 }
2336 }
2337 read_unlock(&tasklist_lock);
2338 }
2339
free_all_child_rdtgrp(struct rdtgroup * rdtgrp)2340 static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp)
2341 {
2342 struct rdtgroup *sentry, *stmp;
2343 struct list_head *head;
2344
2345 head = &rdtgrp->mon.crdtgrp_list;
2346 list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) {
2347 free_rmid(sentry->mon.rmid);
2348 list_del(&sentry->mon.crdtgrp_list);
2349
2350 if (atomic_read(&sentry->waitcount) != 0)
2351 sentry->flags = RDT_DELETED;
2352 else
2353 rdtgroup_remove(sentry);
2354 }
2355 }
2356
2357 /*
2358 * Forcibly remove all of subdirectories under root.
2359 */
rmdir_all_sub(void)2360 static void rmdir_all_sub(void)
2361 {
2362 struct rdtgroup *rdtgrp, *tmp;
2363
2364 /* Move all tasks to the default resource group */
2365 rdt_move_group_tasks(NULL, &rdtgroup_default, NULL);
2366
2367 list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) {
2368 /* Free any child rmids */
2369 free_all_child_rdtgrp(rdtgrp);
2370
2371 /* Remove each rdtgroup other than root */
2372 if (rdtgrp == &rdtgroup_default)
2373 continue;
2374
2375 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
2376 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
2377 rdtgroup_pseudo_lock_remove(rdtgrp);
2378
2379 /*
2380 * Give any CPUs back to the default group. We cannot copy
2381 * cpu_online_mask because a CPU might have executed the
2382 * offline callback already, but is still marked online.
2383 */
2384 cpumask_or(&rdtgroup_default.cpu_mask,
2385 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
2386
2387 free_rmid(rdtgrp->mon.rmid);
2388
2389 kernfs_remove(rdtgrp->kn);
2390 list_del(&rdtgrp->rdtgroup_list);
2391
2392 if (atomic_read(&rdtgrp->waitcount) != 0)
2393 rdtgrp->flags = RDT_DELETED;
2394 else
2395 rdtgroup_remove(rdtgrp);
2396 }
2397 /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
2398 update_closid_rmid(cpu_online_mask, &rdtgroup_default);
2399
2400 kernfs_remove(kn_info);
2401 kernfs_remove(kn_mongrp);
2402 kernfs_remove(kn_mondata);
2403 }
2404
rdt_kill_sb(struct super_block * sb)2405 static void rdt_kill_sb(struct super_block *sb)
2406 {
2407 struct rdt_resource *r;
2408
2409 cpus_read_lock();
2410 mutex_lock(&rdtgroup_mutex);
2411
2412 set_mba_sc(false);
2413
2414 /*Put everything back to default values. */
2415 for_each_alloc_enabled_rdt_resource(r)
2416 reset_all_ctrls(r);
2417 cdp_disable_all();
2418 rmdir_all_sub();
2419 rdt_pseudo_lock_release();
2420 rdtgroup_default.mode = RDT_MODE_SHAREABLE;
2421 static_branch_disable_cpuslocked(&rdt_alloc_enable_key);
2422 static_branch_disable_cpuslocked(&rdt_mon_enable_key);
2423 static_branch_disable_cpuslocked(&rdt_enable_key);
2424 kernfs_kill_sb(sb);
2425 mutex_unlock(&rdtgroup_mutex);
2426 cpus_read_unlock();
2427 }
2428
2429 static struct file_system_type rdt_fs_type = {
2430 .name = "resctrl",
2431 .init_fs_context = rdt_init_fs_context,
2432 .parameters = rdt_fs_parameters,
2433 .kill_sb = rdt_kill_sb,
2434 };
2435
mon_addfile(struct kernfs_node * parent_kn,const char * name,void * priv)2436 static int mon_addfile(struct kernfs_node *parent_kn, const char *name,
2437 void *priv)
2438 {
2439 struct kernfs_node *kn;
2440 int ret = 0;
2441
2442 kn = __kernfs_create_file(parent_kn, name, 0444,
2443 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 0,
2444 &kf_mondata_ops, priv, NULL, NULL);
2445 if (IS_ERR(kn))
2446 return PTR_ERR(kn);
2447
2448 ret = rdtgroup_kn_set_ugid(kn);
2449 if (ret) {
2450 kernfs_remove(kn);
2451 return ret;
2452 }
2453
2454 return ret;
2455 }
2456
2457 /*
2458 * Remove all subdirectories of mon_data of ctrl_mon groups
2459 * and monitor groups with given domain id.
2460 */
rmdir_mondata_subdir_allrdtgrp(struct rdt_resource * r,unsigned int dom_id)2461 void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, unsigned int dom_id)
2462 {
2463 struct rdtgroup *prgrp, *crgrp;
2464 char name[32];
2465
2466 if (!r->mon_enabled)
2467 return;
2468
2469 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
2470 sprintf(name, "mon_%s_%02d", r->name, dom_id);
2471 kernfs_remove_by_name(prgrp->mon.mon_data_kn, name);
2472
2473 list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list)
2474 kernfs_remove_by_name(crgrp->mon.mon_data_kn, name);
2475 }
2476 }
2477
mkdir_mondata_subdir(struct kernfs_node * parent_kn,struct rdt_domain * d,struct rdt_resource * r,struct rdtgroup * prgrp)2478 static int mkdir_mondata_subdir(struct kernfs_node *parent_kn,
2479 struct rdt_domain *d,
2480 struct rdt_resource *r, struct rdtgroup *prgrp)
2481 {
2482 union mon_data_bits priv;
2483 struct kernfs_node *kn;
2484 struct mon_evt *mevt;
2485 struct rmid_read rr;
2486 char name[32];
2487 int ret;
2488
2489 sprintf(name, "mon_%s_%02d", r->name, d->id);
2490 /* create the directory */
2491 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp);
2492 if (IS_ERR(kn))
2493 return PTR_ERR(kn);
2494
2495 ret = rdtgroup_kn_set_ugid(kn);
2496 if (ret)
2497 goto out_destroy;
2498
2499 if (WARN_ON(list_empty(&r->evt_list))) {
2500 ret = -EPERM;
2501 goto out_destroy;
2502 }
2503
2504 priv.u.rid = r->rid;
2505 priv.u.domid = d->id;
2506 list_for_each_entry(mevt, &r->evt_list, list) {
2507 priv.u.evtid = mevt->evtid;
2508 ret = mon_addfile(kn, mevt->name, priv.priv);
2509 if (ret)
2510 goto out_destroy;
2511
2512 if (is_mbm_event(mevt->evtid))
2513 mon_event_read(&rr, r, d, prgrp, mevt->evtid, true);
2514 }
2515 kernfs_activate(kn);
2516 return 0;
2517
2518 out_destroy:
2519 kernfs_remove(kn);
2520 return ret;
2521 }
2522
2523 /*
2524 * Add all subdirectories of mon_data for "ctrl_mon" groups
2525 * and "monitor" groups with given domain id.
2526 */
mkdir_mondata_subdir_allrdtgrp(struct rdt_resource * r,struct rdt_domain * d)2527 void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r,
2528 struct rdt_domain *d)
2529 {
2530 struct kernfs_node *parent_kn;
2531 struct rdtgroup *prgrp, *crgrp;
2532 struct list_head *head;
2533
2534 if (!r->mon_enabled)
2535 return;
2536
2537 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
2538 parent_kn = prgrp->mon.mon_data_kn;
2539 mkdir_mondata_subdir(parent_kn, d, r, prgrp);
2540
2541 head = &prgrp->mon.crdtgrp_list;
2542 list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
2543 parent_kn = crgrp->mon.mon_data_kn;
2544 mkdir_mondata_subdir(parent_kn, d, r, crgrp);
2545 }
2546 }
2547 }
2548
mkdir_mondata_subdir_alldom(struct kernfs_node * parent_kn,struct rdt_resource * r,struct rdtgroup * prgrp)2549 static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn,
2550 struct rdt_resource *r,
2551 struct rdtgroup *prgrp)
2552 {
2553 struct rdt_domain *dom;
2554 int ret;
2555
2556 list_for_each_entry(dom, &r->domains, list) {
2557 ret = mkdir_mondata_subdir(parent_kn, dom, r, prgrp);
2558 if (ret)
2559 return ret;
2560 }
2561
2562 return 0;
2563 }
2564
2565 /*
2566 * This creates a directory mon_data which contains the monitored data.
2567 *
2568 * mon_data has one directory for each domain whic are named
2569 * in the format mon_<domain_name>_<domain_id>. For ex: A mon_data
2570 * with L3 domain looks as below:
2571 * ./mon_data:
2572 * mon_L3_00
2573 * mon_L3_01
2574 * mon_L3_02
2575 * ...
2576 *
2577 * Each domain directory has one file per event:
2578 * ./mon_L3_00/:
2579 * llc_occupancy
2580 *
2581 */
mkdir_mondata_all(struct kernfs_node * parent_kn,struct rdtgroup * prgrp,struct kernfs_node ** dest_kn)2582 static int mkdir_mondata_all(struct kernfs_node *parent_kn,
2583 struct rdtgroup *prgrp,
2584 struct kernfs_node **dest_kn)
2585 {
2586 struct rdt_resource *r;
2587 struct kernfs_node *kn;
2588 int ret;
2589
2590 /*
2591 * Create the mon_data directory first.
2592 */
2593 ret = mongroup_create_dir(parent_kn, prgrp, "mon_data", &kn);
2594 if (ret)
2595 return ret;
2596
2597 if (dest_kn)
2598 *dest_kn = kn;
2599
2600 /*
2601 * Create the subdirectories for each domain. Note that all events
2602 * in a domain like L3 are grouped into a resource whose domain is L3
2603 */
2604 for_each_mon_enabled_rdt_resource(r) {
2605 ret = mkdir_mondata_subdir_alldom(kn, r, prgrp);
2606 if (ret)
2607 goto out_destroy;
2608 }
2609
2610 return 0;
2611
2612 out_destroy:
2613 kernfs_remove(kn);
2614 return ret;
2615 }
2616
2617 /**
2618 * cbm_ensure_valid - Enforce validity on provided CBM
2619 * @_val: Candidate CBM
2620 * @r: RDT resource to which the CBM belongs
2621 *
2622 * The provided CBM represents all cache portions available for use. This
2623 * may be represented by a bitmap that does not consist of contiguous ones
2624 * and thus be an invalid CBM.
2625 * Here the provided CBM is forced to be a valid CBM by only considering
2626 * the first set of contiguous bits as valid and clearing all bits.
2627 * The intention here is to provide a valid default CBM with which a new
2628 * resource group is initialized. The user can follow this with a
2629 * modification to the CBM if the default does not satisfy the
2630 * requirements.
2631 */
cbm_ensure_valid(u32 _val,struct rdt_resource * r)2632 static u32 cbm_ensure_valid(u32 _val, struct rdt_resource *r)
2633 {
2634 unsigned int cbm_len = r->cache.cbm_len;
2635 unsigned long first_bit, zero_bit;
2636 unsigned long val = _val;
2637
2638 if (!val)
2639 return 0;
2640
2641 first_bit = find_first_bit(&val, cbm_len);
2642 zero_bit = find_next_zero_bit(&val, cbm_len, first_bit);
2643
2644 /* Clear any remaining bits to ensure contiguous region */
2645 bitmap_clear(&val, zero_bit, cbm_len - zero_bit);
2646 return (u32)val;
2647 }
2648
2649 /*
2650 * Initialize cache resources per RDT domain
2651 *
2652 * Set the RDT domain up to start off with all usable allocations. That is,
2653 * all shareable and unused bits. All-zero CBM is invalid.
2654 */
__init_one_rdt_domain(struct rdt_domain * d,struct rdt_resource * r,u32 closid)2655 static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
2656 u32 closid)
2657 {
2658 struct rdt_resource *r_cdp = NULL;
2659 struct rdt_domain *d_cdp = NULL;
2660 u32 used_b = 0, unused_b = 0;
2661 unsigned long tmp_cbm;
2662 enum rdtgrp_mode mode;
2663 u32 peer_ctl, *ctrl;
2664 int i;
2665
2666 rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp);
2667 d->have_new_ctrl = false;
2668 d->new_ctrl = r->cache.shareable_bits;
2669 used_b = r->cache.shareable_bits;
2670 ctrl = d->ctrl_val;
2671 for (i = 0; i < closids_supported(); i++, ctrl++) {
2672 if (closid_allocated(i) && i != closid) {
2673 mode = rdtgroup_mode_by_closid(i);
2674 if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
2675 /*
2676 * ctrl values for locksetup aren't relevant
2677 * until the schemata is written, and the mode
2678 * becomes RDT_MODE_PSEUDO_LOCKED.
2679 */
2680 continue;
2681 /*
2682 * If CDP is active include peer domain's
2683 * usage to ensure there is no overlap
2684 * with an exclusive group.
2685 */
2686 if (d_cdp)
2687 peer_ctl = d_cdp->ctrl_val[i];
2688 else
2689 peer_ctl = 0;
2690 used_b |= *ctrl | peer_ctl;
2691 if (mode == RDT_MODE_SHAREABLE)
2692 d->new_ctrl |= *ctrl | peer_ctl;
2693 }
2694 }
2695 if (d->plr && d->plr->cbm > 0)
2696 used_b |= d->plr->cbm;
2697 unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1);
2698 unused_b &= BIT_MASK(r->cache.cbm_len) - 1;
2699 d->new_ctrl |= unused_b;
2700 /*
2701 * Force the initial CBM to be valid, user can
2702 * modify the CBM based on system availability.
2703 */
2704 d->new_ctrl = cbm_ensure_valid(d->new_ctrl, r);
2705 /*
2706 * Assign the u32 CBM to an unsigned long to ensure that
2707 * bitmap_weight() does not access out-of-bound memory.
2708 */
2709 tmp_cbm = d->new_ctrl;
2710 if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) {
2711 rdt_last_cmd_printf("No space on %s:%d\n", r->name, d->id);
2712 return -ENOSPC;
2713 }
2714 d->have_new_ctrl = true;
2715
2716 return 0;
2717 }
2718
2719 /*
2720 * Initialize cache resources with default values.
2721 *
2722 * A new RDT group is being created on an allocation capable (CAT)
2723 * supporting system. Set this group up to start off with all usable
2724 * allocations.
2725 *
2726 * If there are no more shareable bits available on any domain then
2727 * the entire allocation will fail.
2728 */
rdtgroup_init_cat(struct rdt_resource * r,u32 closid)2729 static int rdtgroup_init_cat(struct rdt_resource *r, u32 closid)
2730 {
2731 struct rdt_domain *d;
2732 int ret;
2733
2734 list_for_each_entry(d, &r->domains, list) {
2735 ret = __init_one_rdt_domain(d, r, closid);
2736 if (ret < 0)
2737 return ret;
2738 }
2739
2740 return 0;
2741 }
2742
2743 /* Initialize MBA resource with default values. */
rdtgroup_init_mba(struct rdt_resource * r)2744 static void rdtgroup_init_mba(struct rdt_resource *r)
2745 {
2746 struct rdt_domain *d;
2747
2748 list_for_each_entry(d, &r->domains, list) {
2749 d->new_ctrl = is_mba_sc(r) ? MBA_MAX_MBPS : r->default_ctrl;
2750 d->have_new_ctrl = true;
2751 }
2752 }
2753
2754 /* Initialize the RDT group's allocations. */
rdtgroup_init_alloc(struct rdtgroup * rdtgrp)2755 static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
2756 {
2757 struct rdt_resource *r;
2758 int ret;
2759
2760 for_each_alloc_enabled_rdt_resource(r) {
2761 if (r->rid == RDT_RESOURCE_MBA) {
2762 rdtgroup_init_mba(r);
2763 } else {
2764 ret = rdtgroup_init_cat(r, rdtgrp->closid);
2765 if (ret < 0)
2766 return ret;
2767 }
2768
2769 ret = update_domains(r, rdtgrp->closid);
2770 if (ret < 0) {
2771 rdt_last_cmd_puts("Failed to initialize allocations\n");
2772 return ret;
2773 }
2774
2775 }
2776
2777 rdtgrp->mode = RDT_MODE_SHAREABLE;
2778
2779 return 0;
2780 }
2781
mkdir_rdt_prepare(struct kernfs_node * parent_kn,const char * name,umode_t mode,enum rdt_group_type rtype,struct rdtgroup ** r)2782 static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
2783 const char *name, umode_t mode,
2784 enum rdt_group_type rtype, struct rdtgroup **r)
2785 {
2786 struct rdtgroup *prdtgrp, *rdtgrp;
2787 struct kernfs_node *kn;
2788 uint files = 0;
2789 int ret;
2790
2791 prdtgrp = rdtgroup_kn_lock_live(parent_kn);
2792 if (!prdtgrp) {
2793 ret = -ENODEV;
2794 goto out_unlock;
2795 }
2796
2797 if (rtype == RDTMON_GROUP &&
2798 (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
2799 prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) {
2800 ret = -EINVAL;
2801 rdt_last_cmd_puts("Pseudo-locking in progress\n");
2802 goto out_unlock;
2803 }
2804
2805 /* allocate the rdtgroup. */
2806 rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL);
2807 if (!rdtgrp) {
2808 ret = -ENOSPC;
2809 rdt_last_cmd_puts("Kernel out of memory\n");
2810 goto out_unlock;
2811 }
2812 *r = rdtgrp;
2813 rdtgrp->mon.parent = prdtgrp;
2814 rdtgrp->type = rtype;
2815 INIT_LIST_HEAD(&rdtgrp->mon.crdtgrp_list);
2816
2817 /* kernfs creates the directory for rdtgrp */
2818 kn = kernfs_create_dir(parent_kn, name, mode, rdtgrp);
2819 if (IS_ERR(kn)) {
2820 ret = PTR_ERR(kn);
2821 rdt_last_cmd_puts("kernfs create error\n");
2822 goto out_free_rgrp;
2823 }
2824 rdtgrp->kn = kn;
2825
2826 /*
2827 * kernfs_remove() will drop the reference count on "kn" which
2828 * will free it. But we still need it to stick around for the
2829 * rdtgroup_kn_unlock(kn) call. Take one extra reference here,
2830 * which will be dropped by kernfs_put() in rdtgroup_remove().
2831 */
2832 kernfs_get(kn);
2833
2834 ret = rdtgroup_kn_set_ugid(kn);
2835 if (ret) {
2836 rdt_last_cmd_puts("kernfs perm error\n");
2837 goto out_destroy;
2838 }
2839
2840 files = RFTYPE_BASE | BIT(RF_CTRLSHIFT + rtype);
2841 ret = rdtgroup_add_files(kn, files);
2842 if (ret) {
2843 rdt_last_cmd_puts("kernfs fill error\n");
2844 goto out_destroy;
2845 }
2846
2847 if (rdt_mon_capable) {
2848 ret = alloc_rmid();
2849 if (ret < 0) {
2850 rdt_last_cmd_puts("Out of RMIDs\n");
2851 goto out_destroy;
2852 }
2853 rdtgrp->mon.rmid = ret;
2854
2855 ret = mkdir_mondata_all(kn, rdtgrp, &rdtgrp->mon.mon_data_kn);
2856 if (ret) {
2857 rdt_last_cmd_puts("kernfs subdir error\n");
2858 goto out_idfree;
2859 }
2860 }
2861 kernfs_activate(kn);
2862
2863 /*
2864 * The caller unlocks the parent_kn upon success.
2865 */
2866 return 0;
2867
2868 out_idfree:
2869 free_rmid(rdtgrp->mon.rmid);
2870 out_destroy:
2871 kernfs_put(rdtgrp->kn);
2872 kernfs_remove(rdtgrp->kn);
2873 out_free_rgrp:
2874 kfree(rdtgrp);
2875 out_unlock:
2876 rdtgroup_kn_unlock(parent_kn);
2877 return ret;
2878 }
2879
mkdir_rdt_prepare_clean(struct rdtgroup * rgrp)2880 static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp)
2881 {
2882 kernfs_remove(rgrp->kn);
2883 free_rmid(rgrp->mon.rmid);
2884 rdtgroup_remove(rgrp);
2885 }
2886
2887 /*
2888 * Create a monitor group under "mon_groups" directory of a control
2889 * and monitor group(ctrl_mon). This is a resource group
2890 * to monitor a subset of tasks and cpus in its parent ctrl_mon group.
2891 */
rdtgroup_mkdir_mon(struct kernfs_node * parent_kn,const char * name,umode_t mode)2892 static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn,
2893 const char *name, umode_t mode)
2894 {
2895 struct rdtgroup *rdtgrp, *prgrp;
2896 int ret;
2897
2898 ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTMON_GROUP, &rdtgrp);
2899 if (ret)
2900 return ret;
2901
2902 prgrp = rdtgrp->mon.parent;
2903 rdtgrp->closid = prgrp->closid;
2904
2905 /*
2906 * Add the rdtgrp to the list of rdtgrps the parent
2907 * ctrl_mon group has to track.
2908 */
2909 list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list);
2910
2911 rdtgroup_kn_unlock(parent_kn);
2912 return ret;
2913 }
2914
2915 /*
2916 * These are rdtgroups created under the root directory. Can be used
2917 * to allocate and monitor resources.
2918 */
rdtgroup_mkdir_ctrl_mon(struct kernfs_node * parent_kn,const char * name,umode_t mode)2919 static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
2920 const char *name, umode_t mode)
2921 {
2922 struct rdtgroup *rdtgrp;
2923 struct kernfs_node *kn;
2924 u32 closid;
2925 int ret;
2926
2927 ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTCTRL_GROUP, &rdtgrp);
2928 if (ret)
2929 return ret;
2930
2931 kn = rdtgrp->kn;
2932 ret = closid_alloc();
2933 if (ret < 0) {
2934 rdt_last_cmd_puts("Out of CLOSIDs\n");
2935 goto out_common_fail;
2936 }
2937 closid = ret;
2938 ret = 0;
2939
2940 rdtgrp->closid = closid;
2941 ret = rdtgroup_init_alloc(rdtgrp);
2942 if (ret < 0)
2943 goto out_id_free;
2944
2945 list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups);
2946
2947 if (rdt_mon_capable) {
2948 /*
2949 * Create an empty mon_groups directory to hold the subset
2950 * of tasks and cpus to monitor.
2951 */
2952 ret = mongroup_create_dir(kn, rdtgrp, "mon_groups", NULL);
2953 if (ret) {
2954 rdt_last_cmd_puts("kernfs subdir error\n");
2955 goto out_del_list;
2956 }
2957 }
2958
2959 goto out_unlock;
2960
2961 out_del_list:
2962 list_del(&rdtgrp->rdtgroup_list);
2963 out_id_free:
2964 closid_free(closid);
2965 out_common_fail:
2966 mkdir_rdt_prepare_clean(rdtgrp);
2967 out_unlock:
2968 rdtgroup_kn_unlock(parent_kn);
2969 return ret;
2970 }
2971
2972 /*
2973 * We allow creating mon groups only with in a directory called "mon_groups"
2974 * which is present in every ctrl_mon group. Check if this is a valid
2975 * "mon_groups" directory.
2976 *
2977 * 1. The directory should be named "mon_groups".
2978 * 2. The mon group itself should "not" be named "mon_groups".
2979 * This makes sure "mon_groups" directory always has a ctrl_mon group
2980 * as parent.
2981 */
is_mon_groups(struct kernfs_node * kn,const char * name)2982 static bool is_mon_groups(struct kernfs_node *kn, const char *name)
2983 {
2984 return (!strcmp(kn->name, "mon_groups") &&
2985 strcmp(name, "mon_groups"));
2986 }
2987
rdtgroup_mkdir(struct kernfs_node * parent_kn,const char * name,umode_t mode)2988 static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
2989 umode_t mode)
2990 {
2991 /* Do not accept '\n' to avoid unparsable situation. */
2992 if (strchr(name, '\n'))
2993 return -EINVAL;
2994
2995 /*
2996 * If the parent directory is the root directory and RDT
2997 * allocation is supported, add a control and monitoring
2998 * subdirectory
2999 */
3000 if (rdt_alloc_capable && parent_kn == rdtgroup_default.kn)
3001 return rdtgroup_mkdir_ctrl_mon(parent_kn, name, mode);
3002
3003 /*
3004 * If RDT monitoring is supported and the parent directory is a valid
3005 * "mon_groups" directory, add a monitoring subdirectory.
3006 */
3007 if (rdt_mon_capable && is_mon_groups(parent_kn, name))
3008 return rdtgroup_mkdir_mon(parent_kn, name, mode);
3009
3010 return -EPERM;
3011 }
3012
rdtgroup_rmdir_mon(struct kernfs_node * kn,struct rdtgroup * rdtgrp,cpumask_var_t tmpmask)3013 static int rdtgroup_rmdir_mon(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
3014 cpumask_var_t tmpmask)
3015 {
3016 struct rdtgroup *prdtgrp = rdtgrp->mon.parent;
3017 int cpu;
3018
3019 /* Give any tasks back to the parent group */
3020 rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask);
3021
3022 /* Update per cpu rmid of the moved CPUs first */
3023 for_each_cpu(cpu, &rdtgrp->cpu_mask)
3024 per_cpu(pqr_state.default_rmid, cpu) = prdtgrp->mon.rmid;
3025 /*
3026 * Update the MSR on moved CPUs and CPUs which have moved
3027 * task running on them.
3028 */
3029 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
3030 update_closid_rmid(tmpmask, NULL);
3031
3032 rdtgrp->flags = RDT_DELETED;
3033 free_rmid(rdtgrp->mon.rmid);
3034
3035 /*
3036 * Remove the rdtgrp from the parent ctrl_mon group's list
3037 */
3038 WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list));
3039 list_del(&rdtgrp->mon.crdtgrp_list);
3040
3041 kernfs_remove(rdtgrp->kn);
3042
3043 return 0;
3044 }
3045
rdtgroup_ctrl_remove(struct kernfs_node * kn,struct rdtgroup * rdtgrp)3046 static int rdtgroup_ctrl_remove(struct kernfs_node *kn,
3047 struct rdtgroup *rdtgrp)
3048 {
3049 rdtgrp->flags = RDT_DELETED;
3050 list_del(&rdtgrp->rdtgroup_list);
3051
3052 kernfs_remove(rdtgrp->kn);
3053 return 0;
3054 }
3055
rdtgroup_rmdir_ctrl(struct kernfs_node * kn,struct rdtgroup * rdtgrp,cpumask_var_t tmpmask)3056 static int rdtgroup_rmdir_ctrl(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
3057 cpumask_var_t tmpmask)
3058 {
3059 int cpu;
3060
3061 /* Give any tasks back to the default group */
3062 rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask);
3063
3064 /* Give any CPUs back to the default group */
3065 cpumask_or(&rdtgroup_default.cpu_mask,
3066 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
3067
3068 /* Update per cpu closid and rmid of the moved CPUs first */
3069 for_each_cpu(cpu, &rdtgrp->cpu_mask) {
3070 per_cpu(pqr_state.default_closid, cpu) = rdtgroup_default.closid;
3071 per_cpu(pqr_state.default_rmid, cpu) = rdtgroup_default.mon.rmid;
3072 }
3073
3074 /*
3075 * Update the MSR on moved CPUs and CPUs which have moved
3076 * task running on them.
3077 */
3078 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
3079 update_closid_rmid(tmpmask, NULL);
3080
3081 closid_free(rdtgrp->closid);
3082 free_rmid(rdtgrp->mon.rmid);
3083
3084 rdtgroup_ctrl_remove(kn, rdtgrp);
3085
3086 /*
3087 * Free all the child monitor group rmids.
3088 */
3089 free_all_child_rdtgrp(rdtgrp);
3090
3091 return 0;
3092 }
3093
rdtgroup_rmdir(struct kernfs_node * kn)3094 static int rdtgroup_rmdir(struct kernfs_node *kn)
3095 {
3096 struct kernfs_node *parent_kn = kn->parent;
3097 struct rdtgroup *rdtgrp;
3098 cpumask_var_t tmpmask;
3099 int ret = 0;
3100
3101 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
3102 return -ENOMEM;
3103
3104 rdtgrp = rdtgroup_kn_lock_live(kn);
3105 if (!rdtgrp) {
3106 ret = -EPERM;
3107 goto out;
3108 }
3109
3110 /*
3111 * If the rdtgroup is a ctrl_mon group and parent directory
3112 * is the root directory, remove the ctrl_mon group.
3113 *
3114 * If the rdtgroup is a mon group and parent directory
3115 * is a valid "mon_groups" directory, remove the mon group.
3116 */
3117 if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn &&
3118 rdtgrp != &rdtgroup_default) {
3119 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
3120 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
3121 ret = rdtgroup_ctrl_remove(kn, rdtgrp);
3122 } else {
3123 ret = rdtgroup_rmdir_ctrl(kn, rdtgrp, tmpmask);
3124 }
3125 } else if (rdtgrp->type == RDTMON_GROUP &&
3126 is_mon_groups(parent_kn, kn->name)) {
3127 ret = rdtgroup_rmdir_mon(kn, rdtgrp, tmpmask);
3128 } else {
3129 ret = -EPERM;
3130 }
3131
3132 out:
3133 rdtgroup_kn_unlock(kn);
3134 free_cpumask_var(tmpmask);
3135 return ret;
3136 }
3137
rdtgroup_show_options(struct seq_file * seq,struct kernfs_root * kf)3138 static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf)
3139 {
3140 if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled)
3141 seq_puts(seq, ",cdp");
3142
3143 if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled)
3144 seq_puts(seq, ",cdpl2");
3145
3146 if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA]))
3147 seq_puts(seq, ",mba_MBps");
3148
3149 return 0;
3150 }
3151
3152 static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = {
3153 .mkdir = rdtgroup_mkdir,
3154 .rmdir = rdtgroup_rmdir,
3155 .show_options = rdtgroup_show_options,
3156 };
3157
rdtgroup_setup_root(void)3158 static int __init rdtgroup_setup_root(void)
3159 {
3160 int ret;
3161
3162 rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops,
3163 KERNFS_ROOT_CREATE_DEACTIVATED |
3164 KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK,
3165 &rdtgroup_default);
3166 if (IS_ERR(rdt_root))
3167 return PTR_ERR(rdt_root);
3168
3169 mutex_lock(&rdtgroup_mutex);
3170
3171 rdtgroup_default.closid = 0;
3172 rdtgroup_default.mon.rmid = 0;
3173 rdtgroup_default.type = RDTCTRL_GROUP;
3174 INIT_LIST_HEAD(&rdtgroup_default.mon.crdtgrp_list);
3175
3176 list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups);
3177
3178 ret = rdtgroup_add_files(rdt_root->kn, RF_CTRL_BASE);
3179 if (ret) {
3180 kernfs_destroy_root(rdt_root);
3181 goto out;
3182 }
3183
3184 rdtgroup_default.kn = rdt_root->kn;
3185 kernfs_activate(rdtgroup_default.kn);
3186
3187 out:
3188 mutex_unlock(&rdtgroup_mutex);
3189
3190 return ret;
3191 }
3192
3193 /*
3194 * rdtgroup_init - rdtgroup initialization
3195 *
3196 * Setup resctrl file system including set up root, create mount point,
3197 * register rdtgroup filesystem, and initialize files under root directory.
3198 *
3199 * Return: 0 on success or -errno
3200 */
rdtgroup_init(void)3201 int __init rdtgroup_init(void)
3202 {
3203 int ret = 0;
3204
3205 seq_buf_init(&last_cmd_status, last_cmd_status_buf,
3206 sizeof(last_cmd_status_buf));
3207
3208 ret = rdtgroup_setup_root();
3209 if (ret)
3210 return ret;
3211
3212 ret = sysfs_create_mount_point(fs_kobj, "resctrl");
3213 if (ret)
3214 goto cleanup_root;
3215
3216 ret = register_filesystem(&rdt_fs_type);
3217 if (ret)
3218 goto cleanup_mountpoint;
3219
3220 /*
3221 * Adding the resctrl debugfs directory here may not be ideal since
3222 * it would let the resctrl debugfs directory appear on the debugfs
3223 * filesystem before the resctrl filesystem is mounted.
3224 * It may also be ok since that would enable debugging of RDT before
3225 * resctrl is mounted.
3226 * The reason why the debugfs directory is created here and not in
3227 * rdt_get_tree() is because rdt_get_tree() takes rdtgroup_mutex and
3228 * during the debugfs directory creation also &sb->s_type->i_mutex_key
3229 * (the lockdep class of inode->i_rwsem). Other filesystem
3230 * interactions (eg. SyS_getdents) have the lock ordering:
3231 * &sb->s_type->i_mutex_key --> &mm->mmap_lock
3232 * During mmap(), called with &mm->mmap_lock, the rdtgroup_mutex
3233 * is taken, thus creating dependency:
3234 * &mm->mmap_lock --> rdtgroup_mutex for the latter that can cause
3235 * issues considering the other two lock dependencies.
3236 * By creating the debugfs directory here we avoid a dependency
3237 * that may cause deadlock (even though file operations cannot
3238 * occur until the filesystem is mounted, but I do not know how to
3239 * tell lockdep that).
3240 */
3241 debugfs_resctrl = debugfs_create_dir("resctrl", NULL);
3242
3243 return 0;
3244
3245 cleanup_mountpoint:
3246 sysfs_remove_mount_point(fs_kobj, "resctrl");
3247 cleanup_root:
3248 kernfs_destroy_root(rdt_root);
3249
3250 return ret;
3251 }
3252
rdtgroup_exit(void)3253 void __exit rdtgroup_exit(void)
3254 {
3255 debugfs_remove_recursive(debugfs_resctrl);
3256 unregister_filesystem(&rdt_fs_type);
3257 sysfs_remove_mount_point(fs_kobj, "resctrl");
3258 kernfs_destroy_root(rdt_root);
3259 }
3260