1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */
3 #include <linux/io-64-nonatomic-lo-hi.h>
4 #include <linux/device.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/slab.h>
8 #include <linux/idr.h>
9 #include <cxlmem.h>
10 #include <cxl.h>
11 #include "core.h"
12
13 /**
14 * DOC: cxl core
15 *
16 * The CXL core provides a set of interfaces that can be consumed by CXL aware
17 * drivers. The interfaces allow for creation, modification, and destruction of
18 * regions, memory devices, ports, and decoders. CXL aware drivers must register
19 * with the CXL core via these interfaces in order to be able to participate in
20 * cross-device interleave coordination. The CXL core also establishes and
21 * maintains the bridge to the nvdimm subsystem.
22 *
23 * CXL core introduces sysfs hierarchy to control the devices that are
24 * instantiated by the core.
25 */
26
27 static DEFINE_IDA(cxl_port_ida);
28
devtype_show(struct device * dev,struct device_attribute * attr,char * buf)29 static ssize_t devtype_show(struct device *dev, struct device_attribute *attr,
30 char *buf)
31 {
32 return sysfs_emit(buf, "%s\n", dev->type->name);
33 }
34 static DEVICE_ATTR_RO(devtype);
35
36 static struct attribute *cxl_base_attributes[] = {
37 &dev_attr_devtype.attr,
38 NULL,
39 };
40
41 struct attribute_group cxl_base_attribute_group = {
42 .attrs = cxl_base_attributes,
43 };
44
start_show(struct device * dev,struct device_attribute * attr,char * buf)45 static ssize_t start_show(struct device *dev, struct device_attribute *attr,
46 char *buf)
47 {
48 struct cxl_decoder *cxld = to_cxl_decoder(dev);
49
50 return sysfs_emit(buf, "%#llx\n", cxld->range.start);
51 }
52 static DEVICE_ATTR_RO(start);
53
size_show(struct device * dev,struct device_attribute * attr,char * buf)54 static ssize_t size_show(struct device *dev, struct device_attribute *attr,
55 char *buf)
56 {
57 struct cxl_decoder *cxld = to_cxl_decoder(dev);
58
59 return sysfs_emit(buf, "%#llx\n", range_len(&cxld->range));
60 }
61 static DEVICE_ATTR_RO(size);
62
63 #define CXL_DECODER_FLAG_ATTR(name, flag) \
64 static ssize_t name##_show(struct device *dev, \
65 struct device_attribute *attr, char *buf) \
66 { \
67 struct cxl_decoder *cxld = to_cxl_decoder(dev); \
68 \
69 return sysfs_emit(buf, "%s\n", \
70 (cxld->flags & (flag)) ? "1" : "0"); \
71 } \
72 static DEVICE_ATTR_RO(name)
73
74 CXL_DECODER_FLAG_ATTR(cap_pmem, CXL_DECODER_F_PMEM);
75 CXL_DECODER_FLAG_ATTR(cap_ram, CXL_DECODER_F_RAM);
76 CXL_DECODER_FLAG_ATTR(cap_type2, CXL_DECODER_F_TYPE2);
77 CXL_DECODER_FLAG_ATTR(cap_type3, CXL_DECODER_F_TYPE3);
78 CXL_DECODER_FLAG_ATTR(locked, CXL_DECODER_F_LOCK);
79
target_type_show(struct device * dev,struct device_attribute * attr,char * buf)80 static ssize_t target_type_show(struct device *dev,
81 struct device_attribute *attr, char *buf)
82 {
83 struct cxl_decoder *cxld = to_cxl_decoder(dev);
84
85 switch (cxld->target_type) {
86 case CXL_DECODER_ACCELERATOR:
87 return sysfs_emit(buf, "accelerator\n");
88 case CXL_DECODER_EXPANDER:
89 return sysfs_emit(buf, "expander\n");
90 }
91 return -ENXIO;
92 }
93 static DEVICE_ATTR_RO(target_type);
94
target_list_show(struct device * dev,struct device_attribute * attr,char * buf)95 static ssize_t target_list_show(struct device *dev,
96 struct device_attribute *attr, char *buf)
97 {
98 struct cxl_decoder *cxld = to_cxl_decoder(dev);
99 ssize_t offset = 0;
100 int i, rc = 0;
101
102 device_lock(dev);
103 for (i = 0; i < cxld->interleave_ways; i++) {
104 struct cxl_dport *dport = cxld->target[i];
105 struct cxl_dport *next = NULL;
106
107 if (!dport)
108 break;
109
110 if (i + 1 < cxld->interleave_ways)
111 next = cxld->target[i + 1];
112 rc = sysfs_emit_at(buf, offset, "%d%s", dport->port_id,
113 next ? "," : "");
114 if (rc < 0)
115 break;
116 offset += rc;
117 }
118 device_unlock(dev);
119
120 if (rc < 0)
121 return rc;
122
123 rc = sysfs_emit_at(buf, offset, "\n");
124 if (rc < 0)
125 return rc;
126
127 return offset + rc;
128 }
129 static DEVICE_ATTR_RO(target_list);
130
131 static struct attribute *cxl_decoder_base_attrs[] = {
132 &dev_attr_start.attr,
133 &dev_attr_size.attr,
134 &dev_attr_locked.attr,
135 &dev_attr_target_list.attr,
136 NULL,
137 };
138
139 static struct attribute_group cxl_decoder_base_attribute_group = {
140 .attrs = cxl_decoder_base_attrs,
141 };
142
143 static struct attribute *cxl_decoder_root_attrs[] = {
144 &dev_attr_cap_pmem.attr,
145 &dev_attr_cap_ram.attr,
146 &dev_attr_cap_type2.attr,
147 &dev_attr_cap_type3.attr,
148 NULL,
149 };
150
151 static struct attribute_group cxl_decoder_root_attribute_group = {
152 .attrs = cxl_decoder_root_attrs,
153 };
154
155 static const struct attribute_group *cxl_decoder_root_attribute_groups[] = {
156 &cxl_decoder_root_attribute_group,
157 &cxl_decoder_base_attribute_group,
158 &cxl_base_attribute_group,
159 NULL,
160 };
161
162 static struct attribute *cxl_decoder_switch_attrs[] = {
163 &dev_attr_target_type.attr,
164 NULL,
165 };
166
167 static struct attribute_group cxl_decoder_switch_attribute_group = {
168 .attrs = cxl_decoder_switch_attrs,
169 };
170
171 static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = {
172 &cxl_decoder_switch_attribute_group,
173 &cxl_decoder_base_attribute_group,
174 &cxl_base_attribute_group,
175 NULL,
176 };
177
cxl_decoder_release(struct device * dev)178 static void cxl_decoder_release(struct device *dev)
179 {
180 struct cxl_decoder *cxld = to_cxl_decoder(dev);
181 struct cxl_port *port = to_cxl_port(dev->parent);
182
183 ida_free(&port->decoder_ida, cxld->id);
184 kfree(cxld);
185 put_device(&port->dev);
186 }
187
188 static const struct device_type cxl_decoder_switch_type = {
189 .name = "cxl_decoder_switch",
190 .release = cxl_decoder_release,
191 .groups = cxl_decoder_switch_attribute_groups,
192 };
193
194 static const struct device_type cxl_decoder_root_type = {
195 .name = "cxl_decoder_root",
196 .release = cxl_decoder_release,
197 .groups = cxl_decoder_root_attribute_groups,
198 };
199
is_root_decoder(struct device * dev)200 bool is_root_decoder(struct device *dev)
201 {
202 return dev->type == &cxl_decoder_root_type;
203 }
204 EXPORT_SYMBOL_GPL(is_root_decoder);
205
to_cxl_decoder(struct device * dev)206 struct cxl_decoder *to_cxl_decoder(struct device *dev)
207 {
208 if (dev_WARN_ONCE(dev, dev->type->release != cxl_decoder_release,
209 "not a cxl_decoder device\n"))
210 return NULL;
211 return container_of(dev, struct cxl_decoder, dev);
212 }
213 EXPORT_SYMBOL_GPL(to_cxl_decoder);
214
cxl_dport_release(struct cxl_dport * dport)215 static void cxl_dport_release(struct cxl_dport *dport)
216 {
217 list_del(&dport->list);
218 put_device(dport->dport);
219 kfree(dport);
220 }
221
cxl_port_release(struct device * dev)222 static void cxl_port_release(struct device *dev)
223 {
224 struct cxl_port *port = to_cxl_port(dev);
225 struct cxl_dport *dport, *_d;
226
227 device_lock(dev);
228 list_for_each_entry_safe(dport, _d, &port->dports, list)
229 cxl_dport_release(dport);
230 device_unlock(dev);
231 ida_free(&cxl_port_ida, port->id);
232 kfree(port);
233 }
234
235 static const struct attribute_group *cxl_port_attribute_groups[] = {
236 &cxl_base_attribute_group,
237 NULL,
238 };
239
240 static const struct device_type cxl_port_type = {
241 .name = "cxl_port",
242 .release = cxl_port_release,
243 .groups = cxl_port_attribute_groups,
244 };
245
to_cxl_port(struct device * dev)246 struct cxl_port *to_cxl_port(struct device *dev)
247 {
248 if (dev_WARN_ONCE(dev, dev->type != &cxl_port_type,
249 "not a cxl_port device\n"))
250 return NULL;
251 return container_of(dev, struct cxl_port, dev);
252 }
253
unregister_port(void * _port)254 static void unregister_port(void *_port)
255 {
256 struct cxl_port *port = _port;
257 struct cxl_dport *dport;
258
259 device_lock(&port->dev);
260 list_for_each_entry(dport, &port->dports, list) {
261 char link_name[CXL_TARGET_STRLEN];
262
263 if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d",
264 dport->port_id) >= CXL_TARGET_STRLEN)
265 continue;
266 sysfs_remove_link(&port->dev.kobj, link_name);
267 }
268 device_unlock(&port->dev);
269 device_unregister(&port->dev);
270 }
271
cxl_unlink_uport(void * _port)272 static void cxl_unlink_uport(void *_port)
273 {
274 struct cxl_port *port = _port;
275
276 sysfs_remove_link(&port->dev.kobj, "uport");
277 }
278
devm_cxl_link_uport(struct device * host,struct cxl_port * port)279 static int devm_cxl_link_uport(struct device *host, struct cxl_port *port)
280 {
281 int rc;
282
283 rc = sysfs_create_link(&port->dev.kobj, &port->uport->kobj, "uport");
284 if (rc)
285 return rc;
286 return devm_add_action_or_reset(host, cxl_unlink_uport, port);
287 }
288
cxl_port_alloc(struct device * uport,resource_size_t component_reg_phys,struct cxl_port * parent_port)289 static struct cxl_port *cxl_port_alloc(struct device *uport,
290 resource_size_t component_reg_phys,
291 struct cxl_port *parent_port)
292 {
293 struct cxl_port *port;
294 struct device *dev;
295 int rc;
296
297 port = kzalloc(sizeof(*port), GFP_KERNEL);
298 if (!port)
299 return ERR_PTR(-ENOMEM);
300
301 rc = ida_alloc(&cxl_port_ida, GFP_KERNEL);
302 if (rc < 0)
303 goto err;
304 port->id = rc;
305
306 /*
307 * The top-level cxl_port "cxl_root" does not have a cxl_port as
308 * its parent and it does not have any corresponding component
309 * registers as its decode is described by a fixed platform
310 * description.
311 */
312 dev = &port->dev;
313 if (parent_port)
314 dev->parent = &parent_port->dev;
315 else
316 dev->parent = uport;
317
318 port->uport = uport;
319 port->component_reg_phys = component_reg_phys;
320 ida_init(&port->decoder_ida);
321 INIT_LIST_HEAD(&port->dports);
322
323 device_initialize(dev);
324 device_set_pm_not_required(dev);
325 dev->bus = &cxl_bus_type;
326 dev->type = &cxl_port_type;
327
328 return port;
329
330 err:
331 kfree(port);
332 return ERR_PTR(rc);
333 }
334
335 /**
336 * devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy
337 * @host: host device for devm operations
338 * @uport: "physical" device implementing this upstream port
339 * @component_reg_phys: (optional) for configurable cxl_port instances
340 * @parent_port: next hop up in the CXL memory decode hierarchy
341 */
devm_cxl_add_port(struct device * host,struct device * uport,resource_size_t component_reg_phys,struct cxl_port * parent_port)342 struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
343 resource_size_t component_reg_phys,
344 struct cxl_port *parent_port)
345 {
346 struct cxl_port *port;
347 struct device *dev;
348 int rc;
349
350 port = cxl_port_alloc(uport, component_reg_phys, parent_port);
351 if (IS_ERR(port))
352 return port;
353
354 dev = &port->dev;
355 if (parent_port)
356 rc = dev_set_name(dev, "port%d", port->id);
357 else
358 rc = dev_set_name(dev, "root%d", port->id);
359 if (rc)
360 goto err;
361
362 rc = device_add(dev);
363 if (rc)
364 goto err;
365
366 rc = devm_add_action_or_reset(host, unregister_port, port);
367 if (rc)
368 return ERR_PTR(rc);
369
370 rc = devm_cxl_link_uport(host, port);
371 if (rc)
372 return ERR_PTR(rc);
373
374 return port;
375
376 err:
377 put_device(dev);
378 return ERR_PTR(rc);
379 }
380 EXPORT_SYMBOL_GPL(devm_cxl_add_port);
381
find_dport(struct cxl_port * port,int id)382 static struct cxl_dport *find_dport(struct cxl_port *port, int id)
383 {
384 struct cxl_dport *dport;
385
386 device_lock_assert(&port->dev);
387 list_for_each_entry (dport, &port->dports, list)
388 if (dport->port_id == id)
389 return dport;
390 return NULL;
391 }
392
add_dport(struct cxl_port * port,struct cxl_dport * new)393 static int add_dport(struct cxl_port *port, struct cxl_dport *new)
394 {
395 struct cxl_dport *dup;
396
397 device_lock(&port->dev);
398 dup = find_dport(port, new->port_id);
399 if (dup)
400 dev_err(&port->dev,
401 "unable to add dport%d-%s non-unique port id (%s)\n",
402 new->port_id, dev_name(new->dport),
403 dev_name(dup->dport));
404 else
405 list_add_tail(&new->list, &port->dports);
406 device_unlock(&port->dev);
407
408 return dup ? -EEXIST : 0;
409 }
410
411 /**
412 * cxl_add_dport - append downstream port data to a cxl_port
413 * @port: the cxl_port that references this dport
414 * @dport_dev: firmware or PCI device representing the dport
415 * @port_id: identifier for this dport in a decoder's target list
416 * @component_reg_phys: optional location of CXL component registers
417 *
418 * Note that all allocations and links are undone by cxl_port deletion
419 * and release.
420 */
cxl_add_dport(struct cxl_port * port,struct device * dport_dev,int port_id,resource_size_t component_reg_phys)421 int cxl_add_dport(struct cxl_port *port, struct device *dport_dev, int port_id,
422 resource_size_t component_reg_phys)
423 {
424 char link_name[CXL_TARGET_STRLEN];
425 struct cxl_dport *dport;
426 int rc;
427
428 if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d", port_id) >=
429 CXL_TARGET_STRLEN)
430 return -EINVAL;
431
432 dport = kzalloc(sizeof(*dport), GFP_KERNEL);
433 if (!dport)
434 return -ENOMEM;
435
436 INIT_LIST_HEAD(&dport->list);
437 dport->dport = get_device(dport_dev);
438 dport->port_id = port_id;
439 dport->component_reg_phys = component_reg_phys;
440 dport->port = port;
441
442 rc = add_dport(port, dport);
443 if (rc)
444 goto err;
445
446 rc = sysfs_create_link(&port->dev.kobj, &dport_dev->kobj, link_name);
447 if (rc)
448 goto err;
449
450 return 0;
451 err:
452 cxl_dport_release(dport);
453 return rc;
454 }
455 EXPORT_SYMBOL_GPL(cxl_add_dport);
456
457 static struct cxl_decoder *
cxl_decoder_alloc(struct cxl_port * port,int nr_targets,resource_size_t base,resource_size_t len,int interleave_ways,int interleave_granularity,enum cxl_decoder_type type,unsigned long flags)458 cxl_decoder_alloc(struct cxl_port *port, int nr_targets, resource_size_t base,
459 resource_size_t len, int interleave_ways,
460 int interleave_granularity, enum cxl_decoder_type type,
461 unsigned long flags)
462 {
463 struct cxl_decoder *cxld;
464 struct device *dev;
465 int rc = 0;
466
467 if (interleave_ways < 1)
468 return ERR_PTR(-EINVAL);
469
470 device_lock(&port->dev);
471 if (list_empty(&port->dports))
472 rc = -EINVAL;
473 device_unlock(&port->dev);
474 if (rc)
475 return ERR_PTR(rc);
476
477 cxld = kzalloc(struct_size(cxld, target, nr_targets), GFP_KERNEL);
478 if (!cxld)
479 return ERR_PTR(-ENOMEM);
480
481 rc = ida_alloc(&port->decoder_ida, GFP_KERNEL);
482 if (rc < 0)
483 goto err;
484
485 /* need parent to stick around to release the id */
486 get_device(&port->dev);
487
488 *cxld = (struct cxl_decoder) {
489 .id = rc,
490 .range = {
491 .start = base,
492 .end = base + len - 1,
493 },
494 .flags = flags,
495 .interleave_ways = interleave_ways,
496 .interleave_granularity = interleave_granularity,
497 .target_type = type,
498 };
499
500 /* handle implied target_list */
501 if (interleave_ways == 1)
502 cxld->target[0] =
503 list_first_entry(&port->dports, struct cxl_dport, list);
504 dev = &cxld->dev;
505 device_initialize(dev);
506 device_set_pm_not_required(dev);
507 dev->parent = &port->dev;
508 dev->bus = &cxl_bus_type;
509
510 /* root ports do not have a cxl_port_type parent */
511 if (port->dev.parent->type == &cxl_port_type)
512 dev->type = &cxl_decoder_switch_type;
513 else
514 dev->type = &cxl_decoder_root_type;
515
516 return cxld;
517 err:
518 kfree(cxld);
519 return ERR_PTR(rc);
520 }
521
522 struct cxl_decoder *
devm_cxl_add_decoder(struct device * host,struct cxl_port * port,int nr_targets,resource_size_t base,resource_size_t len,int interleave_ways,int interleave_granularity,enum cxl_decoder_type type,unsigned long flags)523 devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets,
524 resource_size_t base, resource_size_t len,
525 int interleave_ways, int interleave_granularity,
526 enum cxl_decoder_type type, unsigned long flags)
527 {
528 struct cxl_decoder *cxld;
529 struct device *dev;
530 int rc;
531
532 cxld = cxl_decoder_alloc(port, nr_targets, base, len, interleave_ways,
533 interleave_granularity, type, flags);
534 if (IS_ERR(cxld))
535 return cxld;
536
537 dev = &cxld->dev;
538 rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id);
539 if (rc)
540 goto err;
541
542 rc = device_add(dev);
543 if (rc)
544 goto err;
545
546 rc = devm_add_action_or_reset(host, unregister_cxl_dev, dev);
547 if (rc)
548 return ERR_PTR(rc);
549 return cxld;
550
551 err:
552 put_device(dev);
553 return ERR_PTR(rc);
554 }
555 EXPORT_SYMBOL_GPL(devm_cxl_add_decoder);
556
557 /**
558 * __cxl_driver_register - register a driver for the cxl bus
559 * @cxl_drv: cxl driver structure to attach
560 * @owner: owning module/driver
561 * @modname: KBUILD_MODNAME for parent driver
562 */
__cxl_driver_register(struct cxl_driver * cxl_drv,struct module * owner,const char * modname)563 int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner,
564 const char *modname)
565 {
566 if (!cxl_drv->probe) {
567 pr_debug("%s ->probe() must be specified\n", modname);
568 return -EINVAL;
569 }
570
571 if (!cxl_drv->name) {
572 pr_debug("%s ->name must be specified\n", modname);
573 return -EINVAL;
574 }
575
576 if (!cxl_drv->id) {
577 pr_debug("%s ->id must be specified\n", modname);
578 return -EINVAL;
579 }
580
581 cxl_drv->drv.bus = &cxl_bus_type;
582 cxl_drv->drv.owner = owner;
583 cxl_drv->drv.mod_name = modname;
584 cxl_drv->drv.name = cxl_drv->name;
585
586 return driver_register(&cxl_drv->drv);
587 }
588 EXPORT_SYMBOL_GPL(__cxl_driver_register);
589
cxl_driver_unregister(struct cxl_driver * cxl_drv)590 void cxl_driver_unregister(struct cxl_driver *cxl_drv)
591 {
592 driver_unregister(&cxl_drv->drv);
593 }
594 EXPORT_SYMBOL_GPL(cxl_driver_unregister);
595
cxl_device_id(struct device * dev)596 static int cxl_device_id(struct device *dev)
597 {
598 if (dev->type == &cxl_nvdimm_bridge_type)
599 return CXL_DEVICE_NVDIMM_BRIDGE;
600 if (dev->type == &cxl_nvdimm_type)
601 return CXL_DEVICE_NVDIMM;
602 return 0;
603 }
604
cxl_bus_uevent(struct device * dev,struct kobj_uevent_env * env)605 static int cxl_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
606 {
607 return add_uevent_var(env, "MODALIAS=" CXL_MODALIAS_FMT,
608 cxl_device_id(dev));
609 }
610
cxl_bus_match(struct device * dev,struct device_driver * drv)611 static int cxl_bus_match(struct device *dev, struct device_driver *drv)
612 {
613 return cxl_device_id(dev) == to_cxl_drv(drv)->id;
614 }
615
cxl_bus_probe(struct device * dev)616 static int cxl_bus_probe(struct device *dev)
617 {
618 return to_cxl_drv(dev->driver)->probe(dev);
619 }
620
cxl_bus_remove(struct device * dev)621 static void cxl_bus_remove(struct device *dev)
622 {
623 struct cxl_driver *cxl_drv = to_cxl_drv(dev->driver);
624
625 if (cxl_drv->remove)
626 cxl_drv->remove(dev);
627 }
628
629 struct bus_type cxl_bus_type = {
630 .name = "cxl",
631 .uevent = cxl_bus_uevent,
632 .match = cxl_bus_match,
633 .probe = cxl_bus_probe,
634 .remove = cxl_bus_remove,
635 };
636 EXPORT_SYMBOL_GPL(cxl_bus_type);
637
cxl_core_init(void)638 static __init int cxl_core_init(void)
639 {
640 int rc;
641
642 rc = cxl_memdev_init();
643 if (rc)
644 return rc;
645
646 rc = bus_register(&cxl_bus_type);
647 if (rc)
648 goto err;
649 return 0;
650
651 err:
652 cxl_memdev_exit();
653 return rc;
654 }
655
cxl_core_exit(void)656 static void cxl_core_exit(void)
657 {
658 bus_unregister(&cxl_bus_type);
659 cxl_memdev_exit();
660 }
661
662 module_init(cxl_core_init);
663 module_exit(cxl_core_exit);
664 MODULE_LICENSE("GPL v2");
665