• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/device.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <uapi/linux/idxd.h>
10 #include "registers.h"
11 #include "idxd.h"
12 
13 static char *idxd_wq_type_names[] = {
14 	[IDXD_WQT_NONE]		= "none",
15 	[IDXD_WQT_KERNEL]	= "kernel",
16 	[IDXD_WQT_USER]		= "user",
17 };
18 
19 /* IDXD engine attributes */
engine_group_id_show(struct device * dev,struct device_attribute * attr,char * buf)20 static ssize_t engine_group_id_show(struct device *dev,
21 				    struct device_attribute *attr, char *buf)
22 {
23 	struct idxd_engine *engine = confdev_to_engine(dev);
24 
25 	if (engine->group)
26 		return sysfs_emit(buf, "%d\n", engine->group->id);
27 	else
28 		return sysfs_emit(buf, "%d\n", -1);
29 }
30 
engine_group_id_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)31 static ssize_t engine_group_id_store(struct device *dev,
32 				     struct device_attribute *attr,
33 				     const char *buf, size_t count)
34 {
35 	struct idxd_engine *engine = confdev_to_engine(dev);
36 	struct idxd_device *idxd = engine->idxd;
37 	long id;
38 	int rc;
39 	struct idxd_group *prevg;
40 
41 	rc = kstrtol(buf, 10, &id);
42 	if (rc < 0)
43 		return -EINVAL;
44 
45 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
46 		return -EPERM;
47 
48 	if (id > idxd->max_groups - 1 || id < -1)
49 		return -EINVAL;
50 
51 	if (id == -1) {
52 		if (engine->group) {
53 			engine->group->num_engines--;
54 			engine->group = NULL;
55 		}
56 		return count;
57 	}
58 
59 	prevg = engine->group;
60 
61 	if (prevg)
62 		prevg->num_engines--;
63 	engine->group = idxd->groups[id];
64 	engine->group->num_engines++;
65 
66 	return count;
67 }
68 
69 static struct device_attribute dev_attr_engine_group =
70 		__ATTR(group_id, 0644, engine_group_id_show,
71 		       engine_group_id_store);
72 
73 static struct attribute *idxd_engine_attributes[] = {
74 	&dev_attr_engine_group.attr,
75 	NULL,
76 };
77 
78 static const struct attribute_group idxd_engine_attribute_group = {
79 	.attrs = idxd_engine_attributes,
80 };
81 
82 static const struct attribute_group *idxd_engine_attribute_groups[] = {
83 	&idxd_engine_attribute_group,
84 	NULL,
85 };
86 
idxd_conf_engine_release(struct device * dev)87 static void idxd_conf_engine_release(struct device *dev)
88 {
89 	struct idxd_engine *engine = confdev_to_engine(dev);
90 
91 	kfree(engine);
92 }
93 
94 struct device_type idxd_engine_device_type = {
95 	.name = "engine",
96 	.release = idxd_conf_engine_release,
97 	.groups = idxd_engine_attribute_groups,
98 };
99 
100 /* Group attributes */
101 
idxd_set_free_rdbufs(struct idxd_device * idxd)102 static void idxd_set_free_rdbufs(struct idxd_device *idxd)
103 {
104 	int i, rdbufs;
105 
106 	for (i = 0, rdbufs = 0; i < idxd->max_groups; i++) {
107 		struct idxd_group *g = idxd->groups[i];
108 
109 		rdbufs += g->rdbufs_reserved;
110 	}
111 
112 	idxd->nr_rdbufs = idxd->max_rdbufs - rdbufs;
113 }
114 
group_tokens_reserved_show(struct device * dev,struct device_attribute * attr,char * buf)115 static ssize_t group_tokens_reserved_show(struct device *dev,
116 					  struct device_attribute *attr,
117 					  char *buf)
118 {
119 	struct idxd_group *group = confdev_to_group(dev);
120 
121 	return sysfs_emit(buf, "%u\n", group->rdbufs_reserved);
122 }
123 
group_tokens_reserved_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)124 static ssize_t group_tokens_reserved_store(struct device *dev,
125 					   struct device_attribute *attr,
126 					   const char *buf, size_t count)
127 {
128 	struct idxd_group *group = confdev_to_group(dev);
129 	struct idxd_device *idxd = group->idxd;
130 	unsigned long val;
131 	int rc;
132 
133 	rc = kstrtoul(buf, 10, &val);
134 	if (rc < 0)
135 		return -EINVAL;
136 
137 	if (idxd->data->type == IDXD_TYPE_IAX)
138 		return -EOPNOTSUPP;
139 
140 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
141 		return -EPERM;
142 
143 	if (idxd->state == IDXD_DEV_ENABLED)
144 		return -EPERM;
145 
146 	if (val > idxd->max_rdbufs)
147 		return -EINVAL;
148 
149 	if (val > idxd->nr_rdbufs + group->rdbufs_reserved)
150 		return -EINVAL;
151 
152 	group->rdbufs_reserved = val;
153 	idxd_set_free_rdbufs(idxd);
154 	return count;
155 }
156 
157 static struct device_attribute dev_attr_group_tokens_reserved =
158 		__ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
159 		       group_tokens_reserved_store);
160 
group_tokens_allowed_show(struct device * dev,struct device_attribute * attr,char * buf)161 static ssize_t group_tokens_allowed_show(struct device *dev,
162 					 struct device_attribute *attr,
163 					 char *buf)
164 {
165 	struct idxd_group *group = confdev_to_group(dev);
166 
167 	return sysfs_emit(buf, "%u\n", group->rdbufs_allowed);
168 }
169 
group_tokens_allowed_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)170 static ssize_t group_tokens_allowed_store(struct device *dev,
171 					  struct device_attribute *attr,
172 					  const char *buf, size_t count)
173 {
174 	struct idxd_group *group = confdev_to_group(dev);
175 	struct idxd_device *idxd = group->idxd;
176 	unsigned long val;
177 	int rc;
178 
179 	rc = kstrtoul(buf, 10, &val);
180 	if (rc < 0)
181 		return -EINVAL;
182 
183 	if (idxd->data->type == IDXD_TYPE_IAX)
184 		return -EOPNOTSUPP;
185 
186 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
187 		return -EPERM;
188 
189 	if (idxd->state == IDXD_DEV_ENABLED)
190 		return -EPERM;
191 
192 	if (val < 4 * group->num_engines ||
193 	    val > group->rdbufs_reserved + idxd->nr_rdbufs)
194 		return -EINVAL;
195 
196 	group->rdbufs_allowed = val;
197 	return count;
198 }
199 
200 static struct device_attribute dev_attr_group_tokens_allowed =
201 		__ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
202 		       group_tokens_allowed_store);
203 
group_use_token_limit_show(struct device * dev,struct device_attribute * attr,char * buf)204 static ssize_t group_use_token_limit_show(struct device *dev,
205 					  struct device_attribute *attr,
206 					  char *buf)
207 {
208 	struct idxd_group *group = confdev_to_group(dev);
209 
210 	return sysfs_emit(buf, "%u\n", group->use_rdbuf_limit);
211 }
212 
group_use_token_limit_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)213 static ssize_t group_use_token_limit_store(struct device *dev,
214 					   struct device_attribute *attr,
215 					   const char *buf, size_t count)
216 {
217 	struct idxd_group *group = confdev_to_group(dev);
218 	struct idxd_device *idxd = group->idxd;
219 	unsigned long val;
220 	int rc;
221 
222 	rc = kstrtoul(buf, 10, &val);
223 	if (rc < 0)
224 		return -EINVAL;
225 
226 	if (idxd->data->type == IDXD_TYPE_IAX)
227 		return -EOPNOTSUPP;
228 
229 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
230 		return -EPERM;
231 
232 	if (idxd->state == IDXD_DEV_ENABLED)
233 		return -EPERM;
234 
235 	if (idxd->rdbuf_limit == 0)
236 		return -EPERM;
237 
238 	group->use_rdbuf_limit = !!val;
239 	return count;
240 }
241 
242 static struct device_attribute dev_attr_group_use_token_limit =
243 		__ATTR(use_token_limit, 0644, group_use_token_limit_show,
244 		       group_use_token_limit_store);
245 
group_engines_show(struct device * dev,struct device_attribute * attr,char * buf)246 static ssize_t group_engines_show(struct device *dev,
247 				  struct device_attribute *attr, char *buf)
248 {
249 	struct idxd_group *group = confdev_to_group(dev);
250 	int i, rc = 0;
251 	struct idxd_device *idxd = group->idxd;
252 
253 	for (i = 0; i < idxd->max_engines; i++) {
254 		struct idxd_engine *engine = idxd->engines[i];
255 
256 		if (!engine->group)
257 			continue;
258 
259 		if (engine->group->id == group->id)
260 			rc += sysfs_emit_at(buf, rc, "engine%d.%d ", idxd->id, engine->id);
261 	}
262 
263 	if (!rc)
264 		return 0;
265 	rc--;
266 	rc += sysfs_emit_at(buf, rc, "\n");
267 
268 	return rc;
269 }
270 
271 static struct device_attribute dev_attr_group_engines =
272 		__ATTR(engines, 0444, group_engines_show, NULL);
273 
group_work_queues_show(struct device * dev,struct device_attribute * attr,char * buf)274 static ssize_t group_work_queues_show(struct device *dev,
275 				      struct device_attribute *attr, char *buf)
276 {
277 	struct idxd_group *group = confdev_to_group(dev);
278 	int i, rc = 0;
279 	struct idxd_device *idxd = group->idxd;
280 
281 	for (i = 0; i < idxd->max_wqs; i++) {
282 		struct idxd_wq *wq = idxd->wqs[i];
283 
284 		if (!wq->group)
285 			continue;
286 
287 		if (wq->group->id == group->id)
288 			rc += sysfs_emit_at(buf, rc, "wq%d.%d ", idxd->id, wq->id);
289 	}
290 
291 	if (!rc)
292 		return 0;
293 	rc--;
294 	rc += sysfs_emit_at(buf, rc, "\n");
295 
296 	return rc;
297 }
298 
299 static struct device_attribute dev_attr_group_work_queues =
300 		__ATTR(work_queues, 0444, group_work_queues_show, NULL);
301 
group_traffic_class_a_show(struct device * dev,struct device_attribute * attr,char * buf)302 static ssize_t group_traffic_class_a_show(struct device *dev,
303 					  struct device_attribute *attr,
304 					  char *buf)
305 {
306 	struct idxd_group *group = confdev_to_group(dev);
307 
308 	return sysfs_emit(buf, "%d\n", group->tc_a);
309 }
310 
group_traffic_class_a_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)311 static ssize_t group_traffic_class_a_store(struct device *dev,
312 					   struct device_attribute *attr,
313 					   const char *buf, size_t count)
314 {
315 	struct idxd_group *group = confdev_to_group(dev);
316 	struct idxd_device *idxd = group->idxd;
317 	long val;
318 	int rc;
319 
320 	rc = kstrtol(buf, 10, &val);
321 	if (rc < 0)
322 		return -EINVAL;
323 
324 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
325 		return -EPERM;
326 
327 	if (idxd->state == IDXD_DEV_ENABLED)
328 		return -EPERM;
329 
330 	if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override)
331 		return -EPERM;
332 
333 	if (val < 0 || val > 7)
334 		return -EINVAL;
335 
336 	group->tc_a = val;
337 	return count;
338 }
339 
340 static struct device_attribute dev_attr_group_traffic_class_a =
341 		__ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
342 		       group_traffic_class_a_store);
343 
group_traffic_class_b_show(struct device * dev,struct device_attribute * attr,char * buf)344 static ssize_t group_traffic_class_b_show(struct device *dev,
345 					  struct device_attribute *attr,
346 					  char *buf)
347 {
348 	struct idxd_group *group = confdev_to_group(dev);
349 
350 	return sysfs_emit(buf, "%d\n", group->tc_b);
351 }
352 
group_traffic_class_b_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)353 static ssize_t group_traffic_class_b_store(struct device *dev,
354 					   struct device_attribute *attr,
355 					   const char *buf, size_t count)
356 {
357 	struct idxd_group *group = confdev_to_group(dev);
358 	struct idxd_device *idxd = group->idxd;
359 	long val;
360 	int rc;
361 
362 	rc = kstrtol(buf, 10, &val);
363 	if (rc < 0)
364 		return -EINVAL;
365 
366 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
367 		return -EPERM;
368 
369 	if (idxd->state == IDXD_DEV_ENABLED)
370 		return -EPERM;
371 
372 	if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override)
373 		return -EPERM;
374 
375 	if (val < 0 || val > 7)
376 		return -EINVAL;
377 
378 	group->tc_b = val;
379 	return count;
380 }
381 
382 static struct device_attribute dev_attr_group_traffic_class_b =
383 		__ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
384 		       group_traffic_class_b_store);
385 
386 static struct attribute *idxd_group_attributes[] = {
387 	&dev_attr_group_work_queues.attr,
388 	&dev_attr_group_engines.attr,
389 	&dev_attr_group_use_token_limit.attr,
390 	&dev_attr_group_tokens_allowed.attr,
391 	&dev_attr_group_tokens_reserved.attr,
392 	&dev_attr_group_traffic_class_a.attr,
393 	&dev_attr_group_traffic_class_b.attr,
394 	NULL,
395 };
396 
397 static const struct attribute_group idxd_group_attribute_group = {
398 	.attrs = idxd_group_attributes,
399 };
400 
401 static const struct attribute_group *idxd_group_attribute_groups[] = {
402 	&idxd_group_attribute_group,
403 	NULL,
404 };
405 
idxd_conf_group_release(struct device * dev)406 static void idxd_conf_group_release(struct device *dev)
407 {
408 	struct idxd_group *group = confdev_to_group(dev);
409 
410 	kfree(group);
411 }
412 
413 struct device_type idxd_group_device_type = {
414 	.name = "group",
415 	.release = idxd_conf_group_release,
416 	.groups = idxd_group_attribute_groups,
417 };
418 
419 /* IDXD work queue attribs */
wq_clients_show(struct device * dev,struct device_attribute * attr,char * buf)420 static ssize_t wq_clients_show(struct device *dev,
421 			       struct device_attribute *attr, char *buf)
422 {
423 	struct idxd_wq *wq = confdev_to_wq(dev);
424 
425 	return sysfs_emit(buf, "%d\n", wq->client_count);
426 }
427 
428 static struct device_attribute dev_attr_wq_clients =
429 		__ATTR(clients, 0444, wq_clients_show, NULL);
430 
wq_state_show(struct device * dev,struct device_attribute * attr,char * buf)431 static ssize_t wq_state_show(struct device *dev,
432 			     struct device_attribute *attr, char *buf)
433 {
434 	struct idxd_wq *wq = confdev_to_wq(dev);
435 
436 	switch (wq->state) {
437 	case IDXD_WQ_DISABLED:
438 		return sysfs_emit(buf, "disabled\n");
439 	case IDXD_WQ_ENABLED:
440 		return sysfs_emit(buf, "enabled\n");
441 	}
442 
443 	return sysfs_emit(buf, "unknown\n");
444 }
445 
446 static struct device_attribute dev_attr_wq_state =
447 		__ATTR(state, 0444, wq_state_show, NULL);
448 
wq_group_id_show(struct device * dev,struct device_attribute * attr,char * buf)449 static ssize_t wq_group_id_show(struct device *dev,
450 				struct device_attribute *attr, char *buf)
451 {
452 	struct idxd_wq *wq = confdev_to_wq(dev);
453 
454 	if (wq->group)
455 		return sysfs_emit(buf, "%u\n", wq->group->id);
456 	else
457 		return sysfs_emit(buf, "-1\n");
458 }
459 
wq_group_id_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)460 static ssize_t wq_group_id_store(struct device *dev,
461 				 struct device_attribute *attr,
462 				 const char *buf, size_t count)
463 {
464 	struct idxd_wq *wq = confdev_to_wq(dev);
465 	struct idxd_device *idxd = wq->idxd;
466 	long id;
467 	int rc;
468 	struct idxd_group *prevg, *group;
469 
470 	rc = kstrtol(buf, 10, &id);
471 	if (rc < 0)
472 		return -EINVAL;
473 
474 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
475 		return -EPERM;
476 
477 	if (wq->state != IDXD_WQ_DISABLED)
478 		return -EPERM;
479 
480 	if (id > idxd->max_groups - 1 || id < -1)
481 		return -EINVAL;
482 
483 	if (id == -1) {
484 		if (wq->group) {
485 			wq->group->num_wqs--;
486 			wq->group = NULL;
487 		}
488 		return count;
489 	}
490 
491 	group = idxd->groups[id];
492 	prevg = wq->group;
493 
494 	if (prevg)
495 		prevg->num_wqs--;
496 	wq->group = group;
497 	group->num_wqs++;
498 	return count;
499 }
500 
501 static struct device_attribute dev_attr_wq_group_id =
502 		__ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
503 
wq_mode_show(struct device * dev,struct device_attribute * attr,char * buf)504 static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
505 			    char *buf)
506 {
507 	struct idxd_wq *wq = confdev_to_wq(dev);
508 
509 	return sysfs_emit(buf, "%s\n", wq_dedicated(wq) ? "dedicated" : "shared");
510 }
511 
wq_mode_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)512 static ssize_t wq_mode_store(struct device *dev,
513 			     struct device_attribute *attr, const char *buf,
514 			     size_t count)
515 {
516 	struct idxd_wq *wq = confdev_to_wq(dev);
517 	struct idxd_device *idxd = wq->idxd;
518 
519 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
520 		return -EPERM;
521 
522 	if (wq->state != IDXD_WQ_DISABLED)
523 		return -EPERM;
524 
525 	if (sysfs_streq(buf, "dedicated")) {
526 		set_bit(WQ_FLAG_DEDICATED, &wq->flags);
527 		wq->threshold = 0;
528 	} else if (sysfs_streq(buf, "shared") && device_swq_supported(idxd)) {
529 		clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
530 	} else {
531 		return -EINVAL;
532 	}
533 
534 	return count;
535 }
536 
537 static struct device_attribute dev_attr_wq_mode =
538 		__ATTR(mode, 0644, wq_mode_show, wq_mode_store);
539 
wq_size_show(struct device * dev,struct device_attribute * attr,char * buf)540 static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
541 			    char *buf)
542 {
543 	struct idxd_wq *wq = confdev_to_wq(dev);
544 
545 	return sysfs_emit(buf, "%u\n", wq->size);
546 }
547 
total_claimed_wq_size(struct idxd_device * idxd)548 static int total_claimed_wq_size(struct idxd_device *idxd)
549 {
550 	int i;
551 	int wq_size = 0;
552 
553 	for (i = 0; i < idxd->max_wqs; i++) {
554 		struct idxd_wq *wq = idxd->wqs[i];
555 
556 		wq_size += wq->size;
557 	}
558 
559 	return wq_size;
560 }
561 
wq_size_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)562 static ssize_t wq_size_store(struct device *dev,
563 			     struct device_attribute *attr, const char *buf,
564 			     size_t count)
565 {
566 	struct idxd_wq *wq = confdev_to_wq(dev);
567 	unsigned long size;
568 	struct idxd_device *idxd = wq->idxd;
569 	int rc;
570 
571 	rc = kstrtoul(buf, 10, &size);
572 	if (rc < 0)
573 		return -EINVAL;
574 
575 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
576 		return -EPERM;
577 
578 	if (idxd->state == IDXD_DEV_ENABLED)
579 		return -EPERM;
580 
581 	if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
582 		return -EINVAL;
583 
584 	wq->size = size;
585 	return count;
586 }
587 
588 static struct device_attribute dev_attr_wq_size =
589 		__ATTR(size, 0644, wq_size_show, wq_size_store);
590 
wq_priority_show(struct device * dev,struct device_attribute * attr,char * buf)591 static ssize_t wq_priority_show(struct device *dev,
592 				struct device_attribute *attr, char *buf)
593 {
594 	struct idxd_wq *wq = confdev_to_wq(dev);
595 
596 	return sysfs_emit(buf, "%u\n", wq->priority);
597 }
598 
wq_priority_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)599 static ssize_t wq_priority_store(struct device *dev,
600 				 struct device_attribute *attr,
601 				 const char *buf, size_t count)
602 {
603 	struct idxd_wq *wq = confdev_to_wq(dev);
604 	unsigned long prio;
605 	struct idxd_device *idxd = wq->idxd;
606 	int rc;
607 
608 	rc = kstrtoul(buf, 10, &prio);
609 	if (rc < 0)
610 		return -EINVAL;
611 
612 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
613 		return -EPERM;
614 
615 	if (wq->state != IDXD_WQ_DISABLED)
616 		return -EPERM;
617 
618 	if (prio > IDXD_MAX_PRIORITY)
619 		return -EINVAL;
620 
621 	wq->priority = prio;
622 	return count;
623 }
624 
625 static struct device_attribute dev_attr_wq_priority =
626 		__ATTR(priority, 0644, wq_priority_show, wq_priority_store);
627 
wq_block_on_fault_show(struct device * dev,struct device_attribute * attr,char * buf)628 static ssize_t wq_block_on_fault_show(struct device *dev,
629 				      struct device_attribute *attr, char *buf)
630 {
631 	struct idxd_wq *wq = confdev_to_wq(dev);
632 
633 	return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags));
634 }
635 
wq_block_on_fault_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)636 static ssize_t wq_block_on_fault_store(struct device *dev,
637 				       struct device_attribute *attr,
638 				       const char *buf, size_t count)
639 {
640 	struct idxd_wq *wq = confdev_to_wq(dev);
641 	struct idxd_device *idxd = wq->idxd;
642 	bool bof;
643 	int rc;
644 
645 	if (!idxd->hw.gen_cap.block_on_fault)
646 		return -EOPNOTSUPP;
647 
648 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
649 		return -EPERM;
650 
651 	if (wq->state != IDXD_WQ_DISABLED)
652 		return -ENXIO;
653 
654 	rc = kstrtobool(buf, &bof);
655 	if (rc < 0)
656 		return rc;
657 
658 	if (bof)
659 		set_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
660 	else
661 		clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
662 
663 	return count;
664 }
665 
666 static struct device_attribute dev_attr_wq_block_on_fault =
667 		__ATTR(block_on_fault, 0644, wq_block_on_fault_show,
668 		       wq_block_on_fault_store);
669 
wq_threshold_show(struct device * dev,struct device_attribute * attr,char * buf)670 static ssize_t wq_threshold_show(struct device *dev,
671 				 struct device_attribute *attr, char *buf)
672 {
673 	struct idxd_wq *wq = confdev_to_wq(dev);
674 
675 	return sysfs_emit(buf, "%u\n", wq->threshold);
676 }
677 
wq_threshold_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)678 static ssize_t wq_threshold_store(struct device *dev,
679 				  struct device_attribute *attr,
680 				  const char *buf, size_t count)
681 {
682 	struct idxd_wq *wq = confdev_to_wq(dev);
683 	struct idxd_device *idxd = wq->idxd;
684 	unsigned int val;
685 	int rc;
686 
687 	rc = kstrtouint(buf, 0, &val);
688 	if (rc < 0)
689 		return -EINVAL;
690 
691 	if (val > wq->size || val <= 0)
692 		return -EINVAL;
693 
694 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
695 		return -EPERM;
696 
697 	if (wq->state != IDXD_WQ_DISABLED)
698 		return -ENXIO;
699 
700 	if (test_bit(WQ_FLAG_DEDICATED, &wq->flags))
701 		return -EINVAL;
702 
703 	wq->threshold = val;
704 
705 	return count;
706 }
707 
708 static struct device_attribute dev_attr_wq_threshold =
709 		__ATTR(threshold, 0644, wq_threshold_show, wq_threshold_store);
710 
wq_type_show(struct device * dev,struct device_attribute * attr,char * buf)711 static ssize_t wq_type_show(struct device *dev,
712 			    struct device_attribute *attr, char *buf)
713 {
714 	struct idxd_wq *wq = confdev_to_wq(dev);
715 
716 	switch (wq->type) {
717 	case IDXD_WQT_KERNEL:
718 		return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_KERNEL]);
719 	case IDXD_WQT_USER:
720 		return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_USER]);
721 	case IDXD_WQT_NONE:
722 	default:
723 		return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_NONE]);
724 	}
725 
726 	return -EINVAL;
727 }
728 
wq_type_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)729 static ssize_t wq_type_store(struct device *dev,
730 			     struct device_attribute *attr, const char *buf,
731 			     size_t count)
732 {
733 	struct idxd_wq *wq = confdev_to_wq(dev);
734 	enum idxd_wq_type old_type;
735 
736 	if (wq->state != IDXD_WQ_DISABLED)
737 		return -EPERM;
738 
739 	old_type = wq->type;
740 	if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE]))
741 		wq->type = IDXD_WQT_NONE;
742 	else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
743 		wq->type = IDXD_WQT_KERNEL;
744 	else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
745 		wq->type = IDXD_WQT_USER;
746 	else
747 		return -EINVAL;
748 
749 	/* If we are changing queue type, clear the name */
750 	if (wq->type != old_type)
751 		memset(wq->name, 0, WQ_NAME_SIZE + 1);
752 
753 	return count;
754 }
755 
756 static struct device_attribute dev_attr_wq_type =
757 		__ATTR(type, 0644, wq_type_show, wq_type_store);
758 
wq_name_show(struct device * dev,struct device_attribute * attr,char * buf)759 static ssize_t wq_name_show(struct device *dev,
760 			    struct device_attribute *attr, char *buf)
761 {
762 	struct idxd_wq *wq = confdev_to_wq(dev);
763 
764 	return sysfs_emit(buf, "%s\n", wq->name);
765 }
766 
wq_name_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)767 static ssize_t wq_name_store(struct device *dev,
768 			     struct device_attribute *attr, const char *buf,
769 			     size_t count)
770 {
771 	struct idxd_wq *wq = confdev_to_wq(dev);
772 
773 	if (wq->state != IDXD_WQ_DISABLED)
774 		return -EPERM;
775 
776 	if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
777 		return -EINVAL;
778 
779 	/*
780 	 * This is temporarily placed here until we have SVM support for
781 	 * dmaengine.
782 	 */
783 	if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd))
784 		return -EOPNOTSUPP;
785 
786 	memset(wq->name, 0, WQ_NAME_SIZE + 1);
787 	strncpy(wq->name, buf, WQ_NAME_SIZE);
788 	strreplace(wq->name, '\n', '\0');
789 	return count;
790 }
791 
792 static struct device_attribute dev_attr_wq_name =
793 		__ATTR(name, 0644, wq_name_show, wq_name_store);
794 
wq_cdev_minor_show(struct device * dev,struct device_attribute * attr,char * buf)795 static ssize_t wq_cdev_minor_show(struct device *dev,
796 				  struct device_attribute *attr, char *buf)
797 {
798 	struct idxd_wq *wq = confdev_to_wq(dev);
799 	int minor = -1;
800 
801 	mutex_lock(&wq->wq_lock);
802 	if (wq->idxd_cdev)
803 		minor = wq->idxd_cdev->minor;
804 	mutex_unlock(&wq->wq_lock);
805 
806 	if (minor == -1)
807 		return -ENXIO;
808 	return sysfs_emit(buf, "%d\n", minor);
809 }
810 
811 static struct device_attribute dev_attr_wq_cdev_minor =
812 		__ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
813 
__get_sysfs_u64(const char * buf,u64 * val)814 static int __get_sysfs_u64(const char *buf, u64 *val)
815 {
816 	int rc;
817 
818 	rc = kstrtou64(buf, 0, val);
819 	if (rc < 0)
820 		return -EINVAL;
821 
822 	if (*val == 0)
823 		return -EINVAL;
824 
825 	*val = roundup_pow_of_two(*val);
826 	return 0;
827 }
828 
wq_max_transfer_size_show(struct device * dev,struct device_attribute * attr,char * buf)829 static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr,
830 					 char *buf)
831 {
832 	struct idxd_wq *wq = confdev_to_wq(dev);
833 
834 	return sysfs_emit(buf, "%llu\n", wq->max_xfer_bytes);
835 }
836 
wq_max_transfer_size_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)837 static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr,
838 					  const char *buf, size_t count)
839 {
840 	struct idxd_wq *wq = confdev_to_wq(dev);
841 	struct idxd_device *idxd = wq->idxd;
842 	u64 xfer_size;
843 	int rc;
844 
845 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
846 		return -EPERM;
847 
848 	if (wq->state != IDXD_WQ_DISABLED)
849 		return -EPERM;
850 
851 	rc = __get_sysfs_u64(buf, &xfer_size);
852 	if (rc < 0)
853 		return rc;
854 
855 	if (xfer_size > idxd->max_xfer_bytes)
856 		return -EINVAL;
857 
858 	wq->max_xfer_bytes = xfer_size;
859 
860 	return count;
861 }
862 
863 static struct device_attribute dev_attr_wq_max_transfer_size =
864 		__ATTR(max_transfer_size, 0644,
865 		       wq_max_transfer_size_show, wq_max_transfer_size_store);
866 
wq_max_batch_size_show(struct device * dev,struct device_attribute * attr,char * buf)867 static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf)
868 {
869 	struct idxd_wq *wq = confdev_to_wq(dev);
870 
871 	return sysfs_emit(buf, "%u\n", wq->max_batch_size);
872 }
873 
wq_max_batch_size_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)874 static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr,
875 				       const char *buf, size_t count)
876 {
877 	struct idxd_wq *wq = confdev_to_wq(dev);
878 	struct idxd_device *idxd = wq->idxd;
879 	u64 batch_size;
880 	int rc;
881 
882 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
883 		return -EPERM;
884 
885 	if (wq->state != IDXD_WQ_DISABLED)
886 		return -EPERM;
887 
888 	rc = __get_sysfs_u64(buf, &batch_size);
889 	if (rc < 0)
890 		return rc;
891 
892 	if (batch_size > idxd->max_batch_size)
893 		return -EINVAL;
894 
895 	wq->max_batch_size = (u32)batch_size;
896 
897 	return count;
898 }
899 
900 static struct device_attribute dev_attr_wq_max_batch_size =
901 		__ATTR(max_batch_size, 0644, wq_max_batch_size_show, wq_max_batch_size_store);
902 
wq_ats_disable_show(struct device * dev,struct device_attribute * attr,char * buf)903 static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *attr, char *buf)
904 {
905 	struct idxd_wq *wq = confdev_to_wq(dev);
906 
907 	return sysfs_emit(buf, "%u\n", wq->ats_dis);
908 }
909 
wq_ats_disable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)910 static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr,
911 				    const char *buf, size_t count)
912 {
913 	struct idxd_wq *wq = confdev_to_wq(dev);
914 	struct idxd_device *idxd = wq->idxd;
915 	bool ats_dis;
916 	int rc;
917 
918 	if (wq->state != IDXD_WQ_DISABLED)
919 		return -EPERM;
920 
921 	if (!idxd->hw.wq_cap.wq_ats_support)
922 		return -EOPNOTSUPP;
923 
924 	rc = kstrtobool(buf, &ats_dis);
925 	if (rc < 0)
926 		return rc;
927 
928 	wq->ats_dis = ats_dis;
929 
930 	return count;
931 }
932 
933 static struct device_attribute dev_attr_wq_ats_disable =
934 		__ATTR(ats_disable, 0644, wq_ats_disable_show, wq_ats_disable_store);
935 
wq_occupancy_show(struct device * dev,struct device_attribute * attr,char * buf)936 static ssize_t wq_occupancy_show(struct device *dev, struct device_attribute *attr, char *buf)
937 {
938 	struct idxd_wq *wq = confdev_to_wq(dev);
939 	struct idxd_device *idxd = wq->idxd;
940 	u32 occup, offset;
941 
942 	if (!idxd->hw.wq_cap.occupancy)
943 		return -EOPNOTSUPP;
944 
945 	offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_OCCUP_IDX);
946 	occup = ioread32(idxd->reg_base + offset) & WQCFG_OCCUP_MASK;
947 
948 	return sysfs_emit(buf, "%u\n", occup);
949 }
950 
951 static struct device_attribute dev_attr_wq_occupancy =
952 		__ATTR(occupancy, 0444, wq_occupancy_show, NULL);
953 
954 static struct attribute *idxd_wq_attributes[] = {
955 	&dev_attr_wq_clients.attr,
956 	&dev_attr_wq_state.attr,
957 	&dev_attr_wq_group_id.attr,
958 	&dev_attr_wq_mode.attr,
959 	&dev_attr_wq_size.attr,
960 	&dev_attr_wq_priority.attr,
961 	&dev_attr_wq_block_on_fault.attr,
962 	&dev_attr_wq_threshold.attr,
963 	&dev_attr_wq_type.attr,
964 	&dev_attr_wq_name.attr,
965 	&dev_attr_wq_cdev_minor.attr,
966 	&dev_attr_wq_max_transfer_size.attr,
967 	&dev_attr_wq_max_batch_size.attr,
968 	&dev_attr_wq_ats_disable.attr,
969 	&dev_attr_wq_occupancy.attr,
970 	NULL,
971 };
972 
973 static const struct attribute_group idxd_wq_attribute_group = {
974 	.attrs = idxd_wq_attributes,
975 };
976 
977 static const struct attribute_group *idxd_wq_attribute_groups[] = {
978 	&idxd_wq_attribute_group,
979 	NULL,
980 };
981 
idxd_conf_wq_release(struct device * dev)982 static void idxd_conf_wq_release(struct device *dev)
983 {
984 	struct idxd_wq *wq = confdev_to_wq(dev);
985 
986 	kfree(wq->wqcfg);
987 	kfree(wq);
988 }
989 
990 struct device_type idxd_wq_device_type = {
991 	.name = "wq",
992 	.release = idxd_conf_wq_release,
993 	.groups = idxd_wq_attribute_groups,
994 };
995 
996 /* IDXD device attribs */
version_show(struct device * dev,struct device_attribute * attr,char * buf)997 static ssize_t version_show(struct device *dev, struct device_attribute *attr,
998 			    char *buf)
999 {
1000 	struct idxd_device *idxd = confdev_to_idxd(dev);
1001 
1002 	return sysfs_emit(buf, "%#x\n", idxd->hw.version);
1003 }
1004 static DEVICE_ATTR_RO(version);
1005 
max_work_queues_size_show(struct device * dev,struct device_attribute * attr,char * buf)1006 static ssize_t max_work_queues_size_show(struct device *dev,
1007 					 struct device_attribute *attr,
1008 					 char *buf)
1009 {
1010 	struct idxd_device *idxd = confdev_to_idxd(dev);
1011 
1012 	return sysfs_emit(buf, "%u\n", idxd->max_wq_size);
1013 }
1014 static DEVICE_ATTR_RO(max_work_queues_size);
1015 
max_groups_show(struct device * dev,struct device_attribute * attr,char * buf)1016 static ssize_t max_groups_show(struct device *dev,
1017 			       struct device_attribute *attr, char *buf)
1018 {
1019 	struct idxd_device *idxd = confdev_to_idxd(dev);
1020 
1021 	return sysfs_emit(buf, "%u\n", idxd->max_groups);
1022 }
1023 static DEVICE_ATTR_RO(max_groups);
1024 
max_work_queues_show(struct device * dev,struct device_attribute * attr,char * buf)1025 static ssize_t max_work_queues_show(struct device *dev,
1026 				    struct device_attribute *attr, char *buf)
1027 {
1028 	struct idxd_device *idxd = confdev_to_idxd(dev);
1029 
1030 	return sysfs_emit(buf, "%u\n", idxd->max_wqs);
1031 }
1032 static DEVICE_ATTR_RO(max_work_queues);
1033 
max_engines_show(struct device * dev,struct device_attribute * attr,char * buf)1034 static ssize_t max_engines_show(struct device *dev,
1035 				struct device_attribute *attr, char *buf)
1036 {
1037 	struct idxd_device *idxd = confdev_to_idxd(dev);
1038 
1039 	return sysfs_emit(buf, "%u\n", idxd->max_engines);
1040 }
1041 static DEVICE_ATTR_RO(max_engines);
1042 
numa_node_show(struct device * dev,struct device_attribute * attr,char * buf)1043 static ssize_t numa_node_show(struct device *dev,
1044 			      struct device_attribute *attr, char *buf)
1045 {
1046 	struct idxd_device *idxd = confdev_to_idxd(dev);
1047 
1048 	return sysfs_emit(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
1049 }
1050 static DEVICE_ATTR_RO(numa_node);
1051 
max_batch_size_show(struct device * dev,struct device_attribute * attr,char * buf)1052 static ssize_t max_batch_size_show(struct device *dev,
1053 				   struct device_attribute *attr, char *buf)
1054 {
1055 	struct idxd_device *idxd = confdev_to_idxd(dev);
1056 
1057 	return sysfs_emit(buf, "%u\n", idxd->max_batch_size);
1058 }
1059 static DEVICE_ATTR_RO(max_batch_size);
1060 
max_transfer_size_show(struct device * dev,struct device_attribute * attr,char * buf)1061 static ssize_t max_transfer_size_show(struct device *dev,
1062 				      struct device_attribute *attr,
1063 				      char *buf)
1064 {
1065 	struct idxd_device *idxd = confdev_to_idxd(dev);
1066 
1067 	return sysfs_emit(buf, "%llu\n", idxd->max_xfer_bytes);
1068 }
1069 static DEVICE_ATTR_RO(max_transfer_size);
1070 
op_cap_show(struct device * dev,struct device_attribute * attr,char * buf)1071 static ssize_t op_cap_show(struct device *dev,
1072 			   struct device_attribute *attr, char *buf)
1073 {
1074 	struct idxd_device *idxd = confdev_to_idxd(dev);
1075 	int i, rc = 0;
1076 
1077 	for (i = 0; i < 4; i++)
1078 		rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]);
1079 
1080 	rc--;
1081 	rc += sysfs_emit_at(buf, rc, "\n");
1082 	return rc;
1083 }
1084 static DEVICE_ATTR_RO(op_cap);
1085 
gen_cap_show(struct device * dev,struct device_attribute * attr,char * buf)1086 static ssize_t gen_cap_show(struct device *dev,
1087 			    struct device_attribute *attr, char *buf)
1088 {
1089 	struct idxd_device *idxd = confdev_to_idxd(dev);
1090 
1091 	return sysfs_emit(buf, "%#llx\n", idxd->hw.gen_cap.bits);
1092 }
1093 static DEVICE_ATTR_RO(gen_cap);
1094 
configurable_show(struct device * dev,struct device_attribute * attr,char * buf)1095 static ssize_t configurable_show(struct device *dev,
1096 				 struct device_attribute *attr, char *buf)
1097 {
1098 	struct idxd_device *idxd = confdev_to_idxd(dev);
1099 
1100 	return sysfs_emit(buf, "%u\n", test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
1101 }
1102 static DEVICE_ATTR_RO(configurable);
1103 
clients_show(struct device * dev,struct device_attribute * attr,char * buf)1104 static ssize_t clients_show(struct device *dev,
1105 			    struct device_attribute *attr, char *buf)
1106 {
1107 	struct idxd_device *idxd = confdev_to_idxd(dev);
1108 	int count = 0, i;
1109 
1110 	spin_lock(&idxd->dev_lock);
1111 	for (i = 0; i < idxd->max_wqs; i++) {
1112 		struct idxd_wq *wq = idxd->wqs[i];
1113 
1114 		count += wq->client_count;
1115 	}
1116 	spin_unlock(&idxd->dev_lock);
1117 
1118 	return sysfs_emit(buf, "%d\n", count);
1119 }
1120 static DEVICE_ATTR_RO(clients);
1121 
pasid_enabled_show(struct device * dev,struct device_attribute * attr,char * buf)1122 static ssize_t pasid_enabled_show(struct device *dev,
1123 				  struct device_attribute *attr, char *buf)
1124 {
1125 	struct idxd_device *idxd = confdev_to_idxd(dev);
1126 
1127 	return sysfs_emit(buf, "%u\n", device_pasid_enabled(idxd));
1128 }
1129 static DEVICE_ATTR_RO(pasid_enabled);
1130 
state_show(struct device * dev,struct device_attribute * attr,char * buf)1131 static ssize_t state_show(struct device *dev,
1132 			  struct device_attribute *attr, char *buf)
1133 {
1134 	struct idxd_device *idxd = confdev_to_idxd(dev);
1135 
1136 	switch (idxd->state) {
1137 	case IDXD_DEV_DISABLED:
1138 		return sysfs_emit(buf, "disabled\n");
1139 	case IDXD_DEV_ENABLED:
1140 		return sysfs_emit(buf, "enabled\n");
1141 	case IDXD_DEV_HALTED:
1142 		return sysfs_emit(buf, "halted\n");
1143 	}
1144 
1145 	return sysfs_emit(buf, "unknown\n");
1146 }
1147 static DEVICE_ATTR_RO(state);
1148 
errors_show(struct device * dev,struct device_attribute * attr,char * buf)1149 static ssize_t errors_show(struct device *dev,
1150 			   struct device_attribute *attr, char *buf)
1151 {
1152 	struct idxd_device *idxd = confdev_to_idxd(dev);
1153 	int i, out = 0;
1154 
1155 	spin_lock(&idxd->dev_lock);
1156 	for (i = 0; i < 4; i++)
1157 		out += sysfs_emit_at(buf, out, "%#018llx ", idxd->sw_err.bits[i]);
1158 	spin_unlock(&idxd->dev_lock);
1159 	out--;
1160 	out += sysfs_emit_at(buf, out, "\n");
1161 	return out;
1162 }
1163 static DEVICE_ATTR_RO(errors);
1164 
max_tokens_show(struct device * dev,struct device_attribute * attr,char * buf)1165 static ssize_t max_tokens_show(struct device *dev,
1166 			       struct device_attribute *attr, char *buf)
1167 {
1168 	struct idxd_device *idxd = confdev_to_idxd(dev);
1169 
1170 	return sysfs_emit(buf, "%u\n", idxd->max_rdbufs);
1171 }
1172 static DEVICE_ATTR_RO(max_tokens);
1173 
token_limit_show(struct device * dev,struct device_attribute * attr,char * buf)1174 static ssize_t token_limit_show(struct device *dev,
1175 				struct device_attribute *attr, char *buf)
1176 {
1177 	struct idxd_device *idxd = confdev_to_idxd(dev);
1178 
1179 	return sysfs_emit(buf, "%u\n", idxd->rdbuf_limit);
1180 }
1181 
token_limit_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1182 static ssize_t token_limit_store(struct device *dev,
1183 				 struct device_attribute *attr,
1184 				 const char *buf, size_t count)
1185 {
1186 	struct idxd_device *idxd = confdev_to_idxd(dev);
1187 	unsigned long val;
1188 	int rc;
1189 
1190 	rc = kstrtoul(buf, 10, &val);
1191 	if (rc < 0)
1192 		return -EINVAL;
1193 
1194 	if (idxd->state == IDXD_DEV_ENABLED)
1195 		return -EPERM;
1196 
1197 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1198 		return -EPERM;
1199 
1200 	if (!idxd->hw.group_cap.rdbuf_limit)
1201 		return -EPERM;
1202 
1203 	if (val > idxd->hw.group_cap.total_rdbufs)
1204 		return -EINVAL;
1205 
1206 	idxd->rdbuf_limit = val;
1207 	return count;
1208 }
1209 static DEVICE_ATTR_RW(token_limit);
1210 
cdev_major_show(struct device * dev,struct device_attribute * attr,char * buf)1211 static ssize_t cdev_major_show(struct device *dev,
1212 			       struct device_attribute *attr, char *buf)
1213 {
1214 	struct idxd_device *idxd = confdev_to_idxd(dev);
1215 
1216 	return sysfs_emit(buf, "%u\n", idxd->major);
1217 }
1218 static DEVICE_ATTR_RO(cdev_major);
1219 
cmd_status_show(struct device * dev,struct device_attribute * attr,char * buf)1220 static ssize_t cmd_status_show(struct device *dev,
1221 			       struct device_attribute *attr, char *buf)
1222 {
1223 	struct idxd_device *idxd = confdev_to_idxd(dev);
1224 
1225 	return sysfs_emit(buf, "%#x\n", idxd->cmd_status);
1226 }
1227 
cmd_status_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1228 static ssize_t cmd_status_store(struct device *dev, struct device_attribute *attr,
1229 				const char *buf, size_t count)
1230 {
1231 	struct idxd_device *idxd = confdev_to_idxd(dev);
1232 
1233 	idxd->cmd_status = 0;
1234 	return count;
1235 }
1236 static DEVICE_ATTR_RW(cmd_status);
1237 
1238 static struct attribute *idxd_device_attributes[] = {
1239 	&dev_attr_version.attr,
1240 	&dev_attr_max_groups.attr,
1241 	&dev_attr_max_work_queues.attr,
1242 	&dev_attr_max_work_queues_size.attr,
1243 	&dev_attr_max_engines.attr,
1244 	&dev_attr_numa_node.attr,
1245 	&dev_attr_max_batch_size.attr,
1246 	&dev_attr_max_transfer_size.attr,
1247 	&dev_attr_op_cap.attr,
1248 	&dev_attr_gen_cap.attr,
1249 	&dev_attr_configurable.attr,
1250 	&dev_attr_clients.attr,
1251 	&dev_attr_pasid_enabled.attr,
1252 	&dev_attr_state.attr,
1253 	&dev_attr_errors.attr,
1254 	&dev_attr_max_tokens.attr,
1255 	&dev_attr_token_limit.attr,
1256 	&dev_attr_cdev_major.attr,
1257 	&dev_attr_cmd_status.attr,
1258 	NULL,
1259 };
1260 
1261 static const struct attribute_group idxd_device_attribute_group = {
1262 	.attrs = idxd_device_attributes,
1263 };
1264 
1265 static const struct attribute_group *idxd_attribute_groups[] = {
1266 	&idxd_device_attribute_group,
1267 	NULL,
1268 };
1269 
idxd_conf_device_release(struct device * dev)1270 static void idxd_conf_device_release(struct device *dev)
1271 {
1272 	struct idxd_device *idxd = confdev_to_idxd(dev);
1273 
1274 	kfree(idxd->groups);
1275 	kfree(idxd->wqs);
1276 	kfree(idxd->engines);
1277 	kfree(idxd->irq_entries);
1278 	kfree(idxd->int_handles);
1279 	ida_free(&idxd_ida, idxd->id);
1280 	kfree(idxd);
1281 }
1282 
1283 struct device_type dsa_device_type = {
1284 	.name = "dsa",
1285 	.release = idxd_conf_device_release,
1286 	.groups = idxd_attribute_groups,
1287 };
1288 
1289 struct device_type iax_device_type = {
1290 	.name = "iax",
1291 	.release = idxd_conf_device_release,
1292 	.groups = idxd_attribute_groups,
1293 };
1294 
idxd_register_engine_devices(struct idxd_device * idxd)1295 static int idxd_register_engine_devices(struct idxd_device *idxd)
1296 {
1297 	struct idxd_engine *engine;
1298 	int i, j, rc;
1299 
1300 	for (i = 0; i < idxd->max_engines; i++) {
1301 		engine = idxd->engines[i];
1302 		rc = device_add(engine_confdev(engine));
1303 		if (rc < 0)
1304 			goto cleanup;
1305 	}
1306 
1307 	return 0;
1308 
1309 cleanup:
1310 	j = i - 1;
1311 	for (; i < idxd->max_engines; i++) {
1312 		engine = idxd->engines[i];
1313 		put_device(engine_confdev(engine));
1314 	}
1315 
1316 	while (j--) {
1317 		engine = idxd->engines[j];
1318 		device_unregister(engine_confdev(engine));
1319 	}
1320 	return rc;
1321 }
1322 
idxd_register_group_devices(struct idxd_device * idxd)1323 static int idxd_register_group_devices(struct idxd_device *idxd)
1324 {
1325 	struct idxd_group *group;
1326 	int i, j, rc;
1327 
1328 	for (i = 0; i < idxd->max_groups; i++) {
1329 		group = idxd->groups[i];
1330 		rc = device_add(group_confdev(group));
1331 		if (rc < 0)
1332 			goto cleanup;
1333 	}
1334 
1335 	return 0;
1336 
1337 cleanup:
1338 	j = i - 1;
1339 	for (; i < idxd->max_groups; i++) {
1340 		group = idxd->groups[i];
1341 		put_device(group_confdev(group));
1342 	}
1343 
1344 	while (j--) {
1345 		group = idxd->groups[j];
1346 		device_unregister(group_confdev(group));
1347 	}
1348 	return rc;
1349 }
1350 
idxd_register_wq_devices(struct idxd_device * idxd)1351 static int idxd_register_wq_devices(struct idxd_device *idxd)
1352 {
1353 	struct idxd_wq *wq;
1354 	int i, rc, j;
1355 
1356 	for (i = 0; i < idxd->max_wqs; i++) {
1357 		wq = idxd->wqs[i];
1358 		rc = device_add(wq_confdev(wq));
1359 		if (rc < 0)
1360 			goto cleanup;
1361 	}
1362 
1363 	return 0;
1364 
1365 cleanup:
1366 	j = i - 1;
1367 	for (; i < idxd->max_wqs; i++) {
1368 		wq = idxd->wqs[i];
1369 		put_device(wq_confdev(wq));
1370 	}
1371 
1372 	while (j--) {
1373 		wq = idxd->wqs[j];
1374 		device_unregister(wq_confdev(wq));
1375 	}
1376 	return rc;
1377 }
1378 
idxd_register_devices(struct idxd_device * idxd)1379 int idxd_register_devices(struct idxd_device *idxd)
1380 {
1381 	struct device *dev = &idxd->pdev->dev;
1382 	int rc, i;
1383 
1384 	rc = device_add(idxd_confdev(idxd));
1385 	if (rc < 0)
1386 		return rc;
1387 
1388 	rc = idxd_register_wq_devices(idxd);
1389 	if (rc < 0) {
1390 		dev_dbg(dev, "WQ devices registering failed: %d\n", rc);
1391 		goto err_wq;
1392 	}
1393 
1394 	rc = idxd_register_engine_devices(idxd);
1395 	if (rc < 0) {
1396 		dev_dbg(dev, "Engine devices registering failed: %d\n", rc);
1397 		goto err_engine;
1398 	}
1399 
1400 	rc = idxd_register_group_devices(idxd);
1401 	if (rc < 0) {
1402 		dev_dbg(dev, "Group device registering failed: %d\n", rc);
1403 		goto err_group;
1404 	}
1405 
1406 	return 0;
1407 
1408  err_group:
1409 	for (i = 0; i < idxd->max_engines; i++)
1410 		device_unregister(engine_confdev(idxd->engines[i]));
1411  err_engine:
1412 	for (i = 0; i < idxd->max_wqs; i++)
1413 		device_unregister(wq_confdev(idxd->wqs[i]));
1414  err_wq:
1415 	device_del(idxd_confdev(idxd));
1416 	return rc;
1417 }
1418 
idxd_unregister_devices(struct idxd_device * idxd)1419 void idxd_unregister_devices(struct idxd_device *idxd)
1420 {
1421 	int i;
1422 
1423 	for (i = 0; i < idxd->max_wqs; i++) {
1424 		struct idxd_wq *wq = idxd->wqs[i];
1425 
1426 		device_unregister(wq_confdev(wq));
1427 	}
1428 
1429 	for (i = 0; i < idxd->max_engines; i++) {
1430 		struct idxd_engine *engine = idxd->engines[i];
1431 
1432 		device_unregister(engine_confdev(engine));
1433 	}
1434 
1435 	for (i = 0; i < idxd->max_groups; i++) {
1436 		struct idxd_group *group = idxd->groups[i];
1437 
1438 		device_unregister(group_confdev(group));
1439 	}
1440 }
1441 
idxd_register_bus_type(void)1442 int idxd_register_bus_type(void)
1443 {
1444 	return bus_register(&dsa_bus_type);
1445 }
1446 
idxd_unregister_bus_type(void)1447 void idxd_unregister_bus_type(void)
1448 {
1449 	bus_unregister(&dsa_bus_type);
1450 }
1451