1 /*
2 * Copyright 2014 IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/device.h>
12 #include <linux/sysfs.h>
13 #include <linux/pci_regs.h>
14
15 #include "cxl.h"
16
17 #define to_afu_chardev_m(d) dev_get_drvdata(d)
18
19 /********* Adapter attributes **********************************************/
20
caia_version_show(struct device * device,struct device_attribute * attr,char * buf)21 static ssize_t caia_version_show(struct device *device,
22 struct device_attribute *attr,
23 char *buf)
24 {
25 struct cxl *adapter = to_cxl_adapter(device);
26
27 return scnprintf(buf, PAGE_SIZE, "%i.%i\n", adapter->caia_major,
28 adapter->caia_minor);
29 }
30
psl_revision_show(struct device * device,struct device_attribute * attr,char * buf)31 static ssize_t psl_revision_show(struct device *device,
32 struct device_attribute *attr,
33 char *buf)
34 {
35 struct cxl *adapter = to_cxl_adapter(device);
36
37 return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_rev);
38 }
39
base_image_show(struct device * device,struct device_attribute * attr,char * buf)40 static ssize_t base_image_show(struct device *device,
41 struct device_attribute *attr,
42 char *buf)
43 {
44 struct cxl *adapter = to_cxl_adapter(device);
45
46 return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->base_image);
47 }
48
image_loaded_show(struct device * device,struct device_attribute * attr,char * buf)49 static ssize_t image_loaded_show(struct device *device,
50 struct device_attribute *attr,
51 char *buf)
52 {
53 struct cxl *adapter = to_cxl_adapter(device);
54
55 if (adapter->user_image_loaded)
56 return scnprintf(buf, PAGE_SIZE, "user\n");
57 return scnprintf(buf, PAGE_SIZE, "factory\n");
58 }
59
psl_timebase_synced_show(struct device * device,struct device_attribute * attr,char * buf)60 static ssize_t psl_timebase_synced_show(struct device *device,
61 struct device_attribute *attr,
62 char *buf)
63 {
64 struct cxl *adapter = to_cxl_adapter(device);
65
66 return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_timebase_synced);
67 }
68
reset_adapter_store(struct device * device,struct device_attribute * attr,const char * buf,size_t count)69 static ssize_t reset_adapter_store(struct device *device,
70 struct device_attribute *attr,
71 const char *buf, size_t count)
72 {
73 struct cxl *adapter = to_cxl_adapter(device);
74 int rc;
75 int val;
76
77 rc = sscanf(buf, "%i", &val);
78 if ((rc != 1) || (val != 1 && val != -1))
79 return -EINVAL;
80
81 /*
82 * See if we can lock the context mapping that's only allowed
83 * when there are no contexts attached to the adapter. Once
84 * taken this will also prevent any context from getting activated.
85 */
86 if (val == 1) {
87 rc = cxl_adapter_context_lock(adapter);
88 if (rc)
89 goto out;
90
91 rc = cxl_ops->adapter_reset(adapter);
92 /* In case reset failed release context lock */
93 if (rc)
94 cxl_adapter_context_unlock(adapter);
95
96 } else if (val == -1) {
97 /* Perform a forced adapter reset */
98 rc = cxl_ops->adapter_reset(adapter);
99 }
100
101 out:
102 return rc ? rc : count;
103 }
104
load_image_on_perst_show(struct device * device,struct device_attribute * attr,char * buf)105 static ssize_t load_image_on_perst_show(struct device *device,
106 struct device_attribute *attr,
107 char *buf)
108 {
109 struct cxl *adapter = to_cxl_adapter(device);
110
111 if (!adapter->perst_loads_image)
112 return scnprintf(buf, PAGE_SIZE, "none\n");
113
114 if (adapter->perst_select_user)
115 return scnprintf(buf, PAGE_SIZE, "user\n");
116 return scnprintf(buf, PAGE_SIZE, "factory\n");
117 }
118
load_image_on_perst_store(struct device * device,struct device_attribute * attr,const char * buf,size_t count)119 static ssize_t load_image_on_perst_store(struct device *device,
120 struct device_attribute *attr,
121 const char *buf, size_t count)
122 {
123 struct cxl *adapter = to_cxl_adapter(device);
124 int rc;
125
126 if (!strncmp(buf, "none", 4))
127 adapter->perst_loads_image = false;
128 else if (!strncmp(buf, "user", 4)) {
129 adapter->perst_select_user = true;
130 adapter->perst_loads_image = true;
131 } else if (!strncmp(buf, "factory", 7)) {
132 adapter->perst_select_user = false;
133 adapter->perst_loads_image = true;
134 } else
135 return -EINVAL;
136
137 if ((rc = cxl_update_image_control(adapter)))
138 return rc;
139
140 return count;
141 }
142
perst_reloads_same_image_show(struct device * device,struct device_attribute * attr,char * buf)143 static ssize_t perst_reloads_same_image_show(struct device *device,
144 struct device_attribute *attr,
145 char *buf)
146 {
147 struct cxl *adapter = to_cxl_adapter(device);
148
149 return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->perst_same_image);
150 }
151
perst_reloads_same_image_store(struct device * device,struct device_attribute * attr,const char * buf,size_t count)152 static ssize_t perst_reloads_same_image_store(struct device *device,
153 struct device_attribute *attr,
154 const char *buf, size_t count)
155 {
156 struct cxl *adapter = to_cxl_adapter(device);
157 int rc;
158 int val;
159
160 rc = sscanf(buf, "%i", &val);
161 if ((rc != 1) || !(val == 1 || val == 0))
162 return -EINVAL;
163
164 adapter->perst_same_image = (val == 1 ? true : false);
165 return count;
166 }
167
168 static struct device_attribute adapter_attrs[] = {
169 __ATTR_RO(caia_version),
170 __ATTR_RO(psl_revision),
171 __ATTR_RO(base_image),
172 __ATTR_RO(image_loaded),
173 __ATTR_RO(psl_timebase_synced),
174 __ATTR_RW(load_image_on_perst),
175 __ATTR_RW(perst_reloads_same_image),
176 __ATTR(reset, S_IWUSR, NULL, reset_adapter_store),
177 };
178
179
180 /********* AFU master specific attributes **********************************/
181
mmio_size_show_master(struct device * device,struct device_attribute * attr,char * buf)182 static ssize_t mmio_size_show_master(struct device *device,
183 struct device_attribute *attr,
184 char *buf)
185 {
186 struct cxl_afu *afu = to_afu_chardev_m(device);
187
188 return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size);
189 }
190
pp_mmio_off_show(struct device * device,struct device_attribute * attr,char * buf)191 static ssize_t pp_mmio_off_show(struct device *device,
192 struct device_attribute *attr,
193 char *buf)
194 {
195 struct cxl_afu *afu = to_afu_chardev_m(device);
196
197 return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->native->pp_offset);
198 }
199
pp_mmio_len_show(struct device * device,struct device_attribute * attr,char * buf)200 static ssize_t pp_mmio_len_show(struct device *device,
201 struct device_attribute *attr,
202 char *buf)
203 {
204 struct cxl_afu *afu = to_afu_chardev_m(device);
205
206 return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size);
207 }
208
209 static struct device_attribute afu_master_attrs[] = {
210 __ATTR(mmio_size, S_IRUGO, mmio_size_show_master, NULL),
211 __ATTR_RO(pp_mmio_off),
212 __ATTR_RO(pp_mmio_len),
213 };
214
215
216 /********* AFU attributes **************************************************/
217
mmio_size_show(struct device * device,struct device_attribute * attr,char * buf)218 static ssize_t mmio_size_show(struct device *device,
219 struct device_attribute *attr,
220 char *buf)
221 {
222 struct cxl_afu *afu = to_cxl_afu(device);
223
224 if (afu->pp_size)
225 return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size);
226 return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size);
227 }
228
reset_store_afu(struct device * device,struct device_attribute * attr,const char * buf,size_t count)229 static ssize_t reset_store_afu(struct device *device,
230 struct device_attribute *attr,
231 const char *buf, size_t count)
232 {
233 struct cxl_afu *afu = to_cxl_afu(device);
234 int rc;
235
236 /* Not safe to reset if it is currently in use */
237 mutex_lock(&afu->contexts_lock);
238 if (!idr_is_empty(&afu->contexts_idr)) {
239 rc = -EBUSY;
240 goto err;
241 }
242
243 if ((rc = cxl_ops->afu_reset(afu)))
244 goto err;
245
246 rc = count;
247 err:
248 mutex_unlock(&afu->contexts_lock);
249 return rc;
250 }
251
irqs_min_show(struct device * device,struct device_attribute * attr,char * buf)252 static ssize_t irqs_min_show(struct device *device,
253 struct device_attribute *attr,
254 char *buf)
255 {
256 struct cxl_afu *afu = to_cxl_afu(device);
257
258 return scnprintf(buf, PAGE_SIZE, "%i\n", afu->pp_irqs);
259 }
260
irqs_max_show(struct device * device,struct device_attribute * attr,char * buf)261 static ssize_t irqs_max_show(struct device *device,
262 struct device_attribute *attr,
263 char *buf)
264 {
265 struct cxl_afu *afu = to_cxl_afu(device);
266
267 return scnprintf(buf, PAGE_SIZE, "%i\n", afu->irqs_max);
268 }
269
irqs_max_store(struct device * device,struct device_attribute * attr,const char * buf,size_t count)270 static ssize_t irqs_max_store(struct device *device,
271 struct device_attribute *attr,
272 const char *buf, size_t count)
273 {
274 struct cxl_afu *afu = to_cxl_afu(device);
275 ssize_t ret;
276 int irqs_max;
277
278 ret = sscanf(buf, "%i", &irqs_max);
279 if (ret != 1)
280 return -EINVAL;
281
282 if (irqs_max < afu->pp_irqs)
283 return -EINVAL;
284
285 if (cpu_has_feature(CPU_FTR_HVMODE)) {
286 if (irqs_max > afu->adapter->user_irqs)
287 return -EINVAL;
288 } else {
289 /* pHyp sets a per-AFU limit */
290 if (irqs_max > afu->guest->max_ints)
291 return -EINVAL;
292 }
293
294 afu->irqs_max = irqs_max;
295 return count;
296 }
297
modes_supported_show(struct device * device,struct device_attribute * attr,char * buf)298 static ssize_t modes_supported_show(struct device *device,
299 struct device_attribute *attr, char *buf)
300 {
301 struct cxl_afu *afu = to_cxl_afu(device);
302 char *p = buf, *end = buf + PAGE_SIZE;
303
304 if (afu->modes_supported & CXL_MODE_DEDICATED)
305 p += scnprintf(p, end - p, "dedicated_process\n");
306 if (afu->modes_supported & CXL_MODE_DIRECTED)
307 p += scnprintf(p, end - p, "afu_directed\n");
308 return (p - buf);
309 }
310
prefault_mode_show(struct device * device,struct device_attribute * attr,char * buf)311 static ssize_t prefault_mode_show(struct device *device,
312 struct device_attribute *attr,
313 char *buf)
314 {
315 struct cxl_afu *afu = to_cxl_afu(device);
316
317 switch (afu->prefault_mode) {
318 case CXL_PREFAULT_WED:
319 return scnprintf(buf, PAGE_SIZE, "work_element_descriptor\n");
320 case CXL_PREFAULT_ALL:
321 return scnprintf(buf, PAGE_SIZE, "all\n");
322 default:
323 return scnprintf(buf, PAGE_SIZE, "none\n");
324 }
325 }
326
prefault_mode_store(struct device * device,struct device_attribute * attr,const char * buf,size_t count)327 static ssize_t prefault_mode_store(struct device *device,
328 struct device_attribute *attr,
329 const char *buf, size_t count)
330 {
331 struct cxl_afu *afu = to_cxl_afu(device);
332 enum prefault_modes mode = -1;
333
334 if (!strncmp(buf, "none", 4))
335 mode = CXL_PREFAULT_NONE;
336 else {
337 if (!radix_enabled()) {
338
339 /* only allowed when not in radix mode */
340 if (!strncmp(buf, "work_element_descriptor", 23))
341 mode = CXL_PREFAULT_WED;
342 if (!strncmp(buf, "all", 3))
343 mode = CXL_PREFAULT_ALL;
344 } else {
345 dev_err(device, "Cannot prefault with radix enabled\n");
346 }
347 }
348
349 if (mode == -1)
350 return -EINVAL;
351
352 afu->prefault_mode = mode;
353 return count;
354 }
355
mode_show(struct device * device,struct device_attribute * attr,char * buf)356 static ssize_t mode_show(struct device *device,
357 struct device_attribute *attr,
358 char *buf)
359 {
360 struct cxl_afu *afu = to_cxl_afu(device);
361
362 if (afu->current_mode == CXL_MODE_DEDICATED)
363 return scnprintf(buf, PAGE_SIZE, "dedicated_process\n");
364 if (afu->current_mode == CXL_MODE_DIRECTED)
365 return scnprintf(buf, PAGE_SIZE, "afu_directed\n");
366 return scnprintf(buf, PAGE_SIZE, "none\n");
367 }
368
mode_store(struct device * device,struct device_attribute * attr,const char * buf,size_t count)369 static ssize_t mode_store(struct device *device, struct device_attribute *attr,
370 const char *buf, size_t count)
371 {
372 struct cxl_afu *afu = to_cxl_afu(device);
373 int old_mode, mode = -1;
374 int rc = -EBUSY;
375
376 /* can't change this if we have a user */
377 mutex_lock(&afu->contexts_lock);
378 if (!idr_is_empty(&afu->contexts_idr))
379 goto err;
380
381 if (!strncmp(buf, "dedicated_process", 17))
382 mode = CXL_MODE_DEDICATED;
383 if (!strncmp(buf, "afu_directed", 12))
384 mode = CXL_MODE_DIRECTED;
385 if (!strncmp(buf, "none", 4))
386 mode = 0;
387
388 if (mode == -1) {
389 rc = -EINVAL;
390 goto err;
391 }
392
393 /*
394 * afu_deactivate_mode needs to be done outside the lock, prevent
395 * other contexts coming in before we are ready:
396 */
397 old_mode = afu->current_mode;
398 afu->current_mode = 0;
399 afu->num_procs = 0;
400
401 mutex_unlock(&afu->contexts_lock);
402
403 if ((rc = cxl_ops->afu_deactivate_mode(afu, old_mode)))
404 return rc;
405 if ((rc = cxl_ops->afu_activate_mode(afu, mode)))
406 return rc;
407
408 return count;
409 err:
410 mutex_unlock(&afu->contexts_lock);
411 return rc;
412 }
413
api_version_show(struct device * device,struct device_attribute * attr,char * buf)414 static ssize_t api_version_show(struct device *device,
415 struct device_attribute *attr,
416 char *buf)
417 {
418 return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION);
419 }
420
api_version_compatible_show(struct device * device,struct device_attribute * attr,char * buf)421 static ssize_t api_version_compatible_show(struct device *device,
422 struct device_attribute *attr,
423 char *buf)
424 {
425 return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION_COMPATIBLE);
426 }
427
afu_eb_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)428 static ssize_t afu_eb_read(struct file *filp, struct kobject *kobj,
429 struct bin_attribute *bin_attr, char *buf,
430 loff_t off, size_t count)
431 {
432 struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj));
433
434 return cxl_ops->afu_read_err_buffer(afu, buf, off, count);
435 }
436
437 static struct device_attribute afu_attrs[] = {
438 __ATTR_RO(mmio_size),
439 __ATTR_RO(irqs_min),
440 __ATTR_RW(irqs_max),
441 __ATTR_RO(modes_supported),
442 __ATTR_RW(mode),
443 __ATTR_RW(prefault_mode),
444 __ATTR_RO(api_version),
445 __ATTR_RO(api_version_compatible),
446 __ATTR(reset, S_IWUSR, NULL, reset_store_afu),
447 };
448
cxl_sysfs_adapter_add(struct cxl * adapter)449 int cxl_sysfs_adapter_add(struct cxl *adapter)
450 {
451 struct device_attribute *dev_attr;
452 int i, rc;
453
454 for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) {
455 dev_attr = &adapter_attrs[i];
456 if (cxl_ops->support_attributes(dev_attr->attr.name,
457 CXL_ADAPTER_ATTRS)) {
458 if ((rc = device_create_file(&adapter->dev, dev_attr)))
459 goto err;
460 }
461 }
462 return 0;
463 err:
464 for (i--; i >= 0; i--) {
465 dev_attr = &adapter_attrs[i];
466 if (cxl_ops->support_attributes(dev_attr->attr.name,
467 CXL_ADAPTER_ATTRS))
468 device_remove_file(&adapter->dev, dev_attr);
469 }
470 return rc;
471 }
472
cxl_sysfs_adapter_remove(struct cxl * adapter)473 void cxl_sysfs_adapter_remove(struct cxl *adapter)
474 {
475 struct device_attribute *dev_attr;
476 int i;
477
478 for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) {
479 dev_attr = &adapter_attrs[i];
480 if (cxl_ops->support_attributes(dev_attr->attr.name,
481 CXL_ADAPTER_ATTRS))
482 device_remove_file(&adapter->dev, dev_attr);
483 }
484 }
485
486 struct afu_config_record {
487 struct kobject kobj;
488 struct bin_attribute config_attr;
489 struct list_head list;
490 int cr;
491 u16 device;
492 u16 vendor;
493 u32 class;
494 };
495
496 #define to_cr(obj) container_of(obj, struct afu_config_record, kobj)
497
vendor_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)498 static ssize_t vendor_show(struct kobject *kobj,
499 struct kobj_attribute *attr, char *buf)
500 {
501 struct afu_config_record *cr = to_cr(kobj);
502
503 return scnprintf(buf, PAGE_SIZE, "0x%.4x\n", cr->vendor);
504 }
505
device_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)506 static ssize_t device_show(struct kobject *kobj,
507 struct kobj_attribute *attr, char *buf)
508 {
509 struct afu_config_record *cr = to_cr(kobj);
510
511 return scnprintf(buf, PAGE_SIZE, "0x%.4x\n", cr->device);
512 }
513
class_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)514 static ssize_t class_show(struct kobject *kobj,
515 struct kobj_attribute *attr, char *buf)
516 {
517 struct afu_config_record *cr = to_cr(kobj);
518
519 return scnprintf(buf, PAGE_SIZE, "0x%.6x\n", cr->class);
520 }
521
afu_read_config(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)522 static ssize_t afu_read_config(struct file *filp, struct kobject *kobj,
523 struct bin_attribute *bin_attr, char *buf,
524 loff_t off, size_t count)
525 {
526 struct afu_config_record *cr = to_cr(kobj);
527 struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj->parent));
528
529 u64 i, j, val, rc;
530
531 for (i = 0; i < count;) {
532 rc = cxl_ops->afu_cr_read64(afu, cr->cr, off & ~0x7, &val);
533 if (rc)
534 val = ~0ULL;
535 for (j = off & 0x7; j < 8 && i < count; i++, j++, off++)
536 buf[i] = (val >> (j * 8)) & 0xff;
537 }
538
539 return count;
540 }
541
542 static struct kobj_attribute vendor_attribute =
543 __ATTR_RO(vendor);
544 static struct kobj_attribute device_attribute =
545 __ATTR_RO(device);
546 static struct kobj_attribute class_attribute =
547 __ATTR_RO(class);
548
549 static struct attribute *afu_cr_attrs[] = {
550 &vendor_attribute.attr,
551 &device_attribute.attr,
552 &class_attribute.attr,
553 NULL,
554 };
555
release_afu_config_record(struct kobject * kobj)556 static void release_afu_config_record(struct kobject *kobj)
557 {
558 struct afu_config_record *cr = to_cr(kobj);
559
560 kfree(cr);
561 }
562
563 static struct kobj_type afu_config_record_type = {
564 .sysfs_ops = &kobj_sysfs_ops,
565 .release = release_afu_config_record,
566 .default_attrs = afu_cr_attrs,
567 };
568
cxl_sysfs_afu_new_cr(struct cxl_afu * afu,int cr_idx)569 static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int cr_idx)
570 {
571 struct afu_config_record *cr;
572 int rc;
573
574 cr = kzalloc(sizeof(struct afu_config_record), GFP_KERNEL);
575 if (!cr)
576 return ERR_PTR(-ENOMEM);
577
578 cr->cr = cr_idx;
579
580 rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_DEVICE_ID, &cr->device);
581 if (rc)
582 goto err;
583 rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_VENDOR_ID, &cr->vendor);
584 if (rc)
585 goto err;
586 rc = cxl_ops->afu_cr_read32(afu, cr_idx, PCI_CLASS_REVISION, &cr->class);
587 if (rc)
588 goto err;
589 cr->class >>= 8;
590
591 /*
592 * Export raw AFU PCIe like config record. For now this is read only by
593 * root - we can expand that later to be readable by non-root and maybe
594 * even writable provided we have a good use-case. Once we support
595 * exposing AFUs through a virtual PHB they will get that for free from
596 * Linux' PCI infrastructure, but until then it's not clear that we
597 * need it for anything since the main use case is just identifying
598 * AFUs, which can be done via the vendor, device and class attributes.
599 */
600 sysfs_bin_attr_init(&cr->config_attr);
601 cr->config_attr.attr.name = "config";
602 cr->config_attr.attr.mode = S_IRUSR;
603 cr->config_attr.size = afu->crs_len;
604 cr->config_attr.read = afu_read_config;
605
606 rc = kobject_init_and_add(&cr->kobj, &afu_config_record_type,
607 &afu->dev.kobj, "cr%i", cr->cr);
608 if (rc)
609 goto err;
610
611 rc = sysfs_create_bin_file(&cr->kobj, &cr->config_attr);
612 if (rc)
613 goto err1;
614
615 rc = kobject_uevent(&cr->kobj, KOBJ_ADD);
616 if (rc)
617 goto err2;
618
619 return cr;
620 err2:
621 sysfs_remove_bin_file(&cr->kobj, &cr->config_attr);
622 err1:
623 kobject_put(&cr->kobj);
624 return ERR_PTR(rc);
625 err:
626 kfree(cr);
627 return ERR_PTR(rc);
628 }
629
cxl_sysfs_afu_remove(struct cxl_afu * afu)630 void cxl_sysfs_afu_remove(struct cxl_afu *afu)
631 {
632 struct device_attribute *dev_attr;
633 struct afu_config_record *cr, *tmp;
634 int i;
635
636 /* remove the err buffer bin attribute */
637 if (afu->eb_len)
638 device_remove_bin_file(&afu->dev, &afu->attr_eb);
639
640 for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
641 dev_attr = &afu_attrs[i];
642 if (cxl_ops->support_attributes(dev_attr->attr.name,
643 CXL_AFU_ATTRS))
644 device_remove_file(&afu->dev, &afu_attrs[i]);
645 }
646
647 list_for_each_entry_safe(cr, tmp, &afu->crs, list) {
648 sysfs_remove_bin_file(&cr->kobj, &cr->config_attr);
649 kobject_put(&cr->kobj);
650 }
651 }
652
cxl_sysfs_afu_add(struct cxl_afu * afu)653 int cxl_sysfs_afu_add(struct cxl_afu *afu)
654 {
655 struct device_attribute *dev_attr;
656 struct afu_config_record *cr;
657 int i, rc;
658
659 INIT_LIST_HEAD(&afu->crs);
660
661 for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
662 dev_attr = &afu_attrs[i];
663 if (cxl_ops->support_attributes(dev_attr->attr.name,
664 CXL_AFU_ATTRS)) {
665 if ((rc = device_create_file(&afu->dev, &afu_attrs[i])))
666 goto err;
667 }
668 }
669
670 /* conditionally create the add the binary file for error info buffer */
671 if (afu->eb_len) {
672 sysfs_attr_init(&afu->attr_eb.attr);
673
674 afu->attr_eb.attr.name = "afu_err_buff";
675 afu->attr_eb.attr.mode = S_IRUGO;
676 afu->attr_eb.size = afu->eb_len;
677 afu->attr_eb.read = afu_eb_read;
678
679 rc = device_create_bin_file(&afu->dev, &afu->attr_eb);
680 if (rc) {
681 dev_err(&afu->dev,
682 "Unable to create eb attr for the afu. Err(%d)\n",
683 rc);
684 goto err;
685 }
686 }
687
688 for (i = 0; i < afu->crs_num; i++) {
689 cr = cxl_sysfs_afu_new_cr(afu, i);
690 if (IS_ERR(cr)) {
691 rc = PTR_ERR(cr);
692 goto err1;
693 }
694 list_add(&cr->list, &afu->crs);
695 }
696
697 return 0;
698
699 err1:
700 cxl_sysfs_afu_remove(afu);
701 return rc;
702 err:
703 /* reset the eb_len as we havent created the bin attr */
704 afu->eb_len = 0;
705
706 for (i--; i >= 0; i--) {
707 dev_attr = &afu_attrs[i];
708 if (cxl_ops->support_attributes(dev_attr->attr.name,
709 CXL_AFU_ATTRS))
710 device_remove_file(&afu->dev, &afu_attrs[i]);
711 }
712 return rc;
713 }
714
cxl_sysfs_afu_m_add(struct cxl_afu * afu)715 int cxl_sysfs_afu_m_add(struct cxl_afu *afu)
716 {
717 struct device_attribute *dev_attr;
718 int i, rc;
719
720 for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) {
721 dev_attr = &afu_master_attrs[i];
722 if (cxl_ops->support_attributes(dev_attr->attr.name,
723 CXL_AFU_MASTER_ATTRS)) {
724 if ((rc = device_create_file(afu->chardev_m, &afu_master_attrs[i])))
725 goto err;
726 }
727 }
728
729 return 0;
730
731 err:
732 for (i--; i >= 0; i--) {
733 dev_attr = &afu_master_attrs[i];
734 if (cxl_ops->support_attributes(dev_attr->attr.name,
735 CXL_AFU_MASTER_ATTRS))
736 device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
737 }
738 return rc;
739 }
740
cxl_sysfs_afu_m_remove(struct cxl_afu * afu)741 void cxl_sysfs_afu_m_remove(struct cxl_afu *afu)
742 {
743 struct device_attribute *dev_attr;
744 int i;
745
746 for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) {
747 dev_attr = &afu_master_attrs[i];
748 if (cxl_ops->support_attributes(dev_attr->attr.name,
749 CXL_AFU_MASTER_ATTRS))
750 device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
751 }
752 }
753