1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright(c) 2021 Intel Corporation. All rights reserved.
3
4 #include <linux/platform_device.h>
5 #include <linux/genalloc.h>
6 #include <linux/module.h>
7 #include <linux/mutex.h>
8 #include <linux/acpi.h>
9 #include <linux/pci.h>
10 #include <linux/mm.h>
11 #include <cxlmem.h>
12 #include "mock.h"
13
14 #define NR_CXL_HOST_BRIDGES 2
15 #define NR_CXL_SINGLE_HOST 1
16 #define NR_CXL_ROOT_PORTS 2
17 #define NR_CXL_SWITCH_PORTS 2
18 #define NR_CXL_PORT_DECODERS 8
19
20 static struct platform_device *cxl_acpi;
21 static struct platform_device *cxl_host_bridge[NR_CXL_HOST_BRIDGES];
22 #define NR_MULTI_ROOT (NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS)
23 static struct platform_device *cxl_root_port[NR_MULTI_ROOT];
24 static struct platform_device *cxl_switch_uport[NR_MULTI_ROOT];
25 #define NR_MEM_MULTI \
26 (NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS * NR_CXL_SWITCH_PORTS)
27 static struct platform_device *cxl_switch_dport[NR_MEM_MULTI];
28
29 static struct platform_device *cxl_hb_single[NR_CXL_SINGLE_HOST];
30 static struct platform_device *cxl_root_single[NR_CXL_SINGLE_HOST];
31 static struct platform_device *cxl_swu_single[NR_CXL_SINGLE_HOST];
32 #define NR_MEM_SINGLE (NR_CXL_SINGLE_HOST * NR_CXL_SWITCH_PORTS)
33 static struct platform_device *cxl_swd_single[NR_MEM_SINGLE];
34
35 struct platform_device *cxl_mem[NR_MEM_MULTI];
36 struct platform_device *cxl_mem_single[NR_MEM_SINGLE];
37
38
is_multi_bridge(struct device * dev)39 static inline bool is_multi_bridge(struct device *dev)
40 {
41 int i;
42
43 for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++)
44 if (&cxl_host_bridge[i]->dev == dev)
45 return true;
46 return false;
47 }
48
is_single_bridge(struct device * dev)49 static inline bool is_single_bridge(struct device *dev)
50 {
51 int i;
52
53 for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++)
54 if (&cxl_hb_single[i]->dev == dev)
55 return true;
56 return false;
57 }
58
59 static struct acpi_device acpi0017_mock;
60 static struct acpi_device host_bridge[NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST] = {
61 [0] = {
62 .handle = &host_bridge[0],
63 },
64 [1] = {
65 .handle = &host_bridge[1],
66 },
67 [2] = {
68 .handle = &host_bridge[2],
69 },
70
71 };
72
is_mock_dev(struct device * dev)73 static bool is_mock_dev(struct device *dev)
74 {
75 int i;
76
77 for (i = 0; i < ARRAY_SIZE(cxl_mem); i++)
78 if (dev == &cxl_mem[i]->dev)
79 return true;
80 for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++)
81 if (dev == &cxl_mem_single[i]->dev)
82 return true;
83 if (dev == &cxl_acpi->dev)
84 return true;
85 return false;
86 }
87
is_mock_adev(struct acpi_device * adev)88 static bool is_mock_adev(struct acpi_device *adev)
89 {
90 int i;
91
92 if (adev == &acpi0017_mock)
93 return true;
94
95 for (i = 0; i < ARRAY_SIZE(host_bridge); i++)
96 if (adev == &host_bridge[i])
97 return true;
98
99 return false;
100 }
101
102 static struct {
103 struct acpi_table_cedt cedt;
104 struct acpi_cedt_chbs chbs[NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST];
105 struct {
106 struct acpi_cedt_cfmws cfmws;
107 u32 target[1];
108 } cfmws0;
109 struct {
110 struct acpi_cedt_cfmws cfmws;
111 u32 target[2];
112 } cfmws1;
113 struct {
114 struct acpi_cedt_cfmws cfmws;
115 u32 target[1];
116 } cfmws2;
117 struct {
118 struct acpi_cedt_cfmws cfmws;
119 u32 target[2];
120 } cfmws3;
121 struct {
122 struct acpi_cedt_cfmws cfmws;
123 u32 target[1];
124 } cfmws4;
125 } __packed mock_cedt = {
126 .cedt = {
127 .header = {
128 .signature = "CEDT",
129 .length = sizeof(mock_cedt),
130 .revision = 1,
131 },
132 },
133 .chbs[0] = {
134 .header = {
135 .type = ACPI_CEDT_TYPE_CHBS,
136 .length = sizeof(mock_cedt.chbs[0]),
137 },
138 .uid = 0,
139 .cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
140 },
141 .chbs[1] = {
142 .header = {
143 .type = ACPI_CEDT_TYPE_CHBS,
144 .length = sizeof(mock_cedt.chbs[0]),
145 },
146 .uid = 1,
147 .cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
148 },
149 .chbs[2] = {
150 .header = {
151 .type = ACPI_CEDT_TYPE_CHBS,
152 .length = sizeof(mock_cedt.chbs[0]),
153 },
154 .uid = 2,
155 .cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
156 },
157 .cfmws0 = {
158 .cfmws = {
159 .header = {
160 .type = ACPI_CEDT_TYPE_CFMWS,
161 .length = sizeof(mock_cedt.cfmws0),
162 },
163 .interleave_ways = 0,
164 .granularity = 4,
165 .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
166 ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
167 .qtg_id = 0,
168 .window_size = SZ_256M * 4UL,
169 },
170 .target = { 0 },
171 },
172 .cfmws1 = {
173 .cfmws = {
174 .header = {
175 .type = ACPI_CEDT_TYPE_CFMWS,
176 .length = sizeof(mock_cedt.cfmws1),
177 },
178 .interleave_ways = 1,
179 .granularity = 4,
180 .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
181 ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
182 .qtg_id = 1,
183 .window_size = SZ_256M * 8UL,
184 },
185 .target = { 0, 1, },
186 },
187 .cfmws2 = {
188 .cfmws = {
189 .header = {
190 .type = ACPI_CEDT_TYPE_CFMWS,
191 .length = sizeof(mock_cedt.cfmws2),
192 },
193 .interleave_ways = 0,
194 .granularity = 4,
195 .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
196 ACPI_CEDT_CFMWS_RESTRICT_PMEM,
197 .qtg_id = 2,
198 .window_size = SZ_256M * 4UL,
199 },
200 .target = { 0 },
201 },
202 .cfmws3 = {
203 .cfmws = {
204 .header = {
205 .type = ACPI_CEDT_TYPE_CFMWS,
206 .length = sizeof(mock_cedt.cfmws3),
207 },
208 .interleave_ways = 1,
209 .granularity = 4,
210 .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
211 ACPI_CEDT_CFMWS_RESTRICT_PMEM,
212 .qtg_id = 3,
213 .window_size = SZ_256M * 8UL,
214 },
215 .target = { 0, 1, },
216 },
217 .cfmws4 = {
218 .cfmws = {
219 .header = {
220 .type = ACPI_CEDT_TYPE_CFMWS,
221 .length = sizeof(mock_cedt.cfmws4),
222 },
223 .interleave_ways = 0,
224 .granularity = 4,
225 .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
226 ACPI_CEDT_CFMWS_RESTRICT_PMEM,
227 .qtg_id = 4,
228 .window_size = SZ_256M * 4UL,
229 },
230 .target = { 2 },
231 },
232 };
233
234 struct acpi_cedt_cfmws *mock_cfmws[] = {
235 [0] = &mock_cedt.cfmws0.cfmws,
236 [1] = &mock_cedt.cfmws1.cfmws,
237 [2] = &mock_cedt.cfmws2.cfmws,
238 [3] = &mock_cedt.cfmws3.cfmws,
239 [4] = &mock_cedt.cfmws4.cfmws,
240 };
241
242 struct cxl_mock_res {
243 struct list_head list;
244 struct range range;
245 };
246
247 static LIST_HEAD(mock_res);
248 static DEFINE_MUTEX(mock_res_lock);
249 static struct gen_pool *cxl_mock_pool;
250
depopulate_all_mock_resources(void)251 static void depopulate_all_mock_resources(void)
252 {
253 struct cxl_mock_res *res, *_res;
254
255 mutex_lock(&mock_res_lock);
256 list_for_each_entry_safe(res, _res, &mock_res, list) {
257 gen_pool_free(cxl_mock_pool, res->range.start,
258 range_len(&res->range));
259 list_del(&res->list);
260 kfree(res);
261 }
262 mutex_unlock(&mock_res_lock);
263 }
264
alloc_mock_res(resource_size_t size)265 static struct cxl_mock_res *alloc_mock_res(resource_size_t size)
266 {
267 struct cxl_mock_res *res = kzalloc(sizeof(*res), GFP_KERNEL);
268 struct genpool_data_align data = {
269 .align = SZ_256M,
270 };
271 unsigned long phys;
272
273 INIT_LIST_HEAD(&res->list);
274 phys = gen_pool_alloc_algo(cxl_mock_pool, size,
275 gen_pool_first_fit_align, &data);
276 if (!phys)
277 return NULL;
278
279 res->range = (struct range) {
280 .start = phys,
281 .end = phys + size - 1,
282 };
283 mutex_lock(&mock_res_lock);
284 list_add(&res->list, &mock_res);
285 mutex_unlock(&mock_res_lock);
286
287 return res;
288 }
289
populate_cedt(void)290 static int populate_cedt(void)
291 {
292 struct cxl_mock_res *res;
293 int i;
294
295 for (i = 0; i < ARRAY_SIZE(mock_cedt.chbs); i++) {
296 struct acpi_cedt_chbs *chbs = &mock_cedt.chbs[i];
297 resource_size_t size;
298
299 if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL20)
300 size = ACPI_CEDT_CHBS_LENGTH_CXL20;
301 else
302 size = ACPI_CEDT_CHBS_LENGTH_CXL11;
303
304 res = alloc_mock_res(size);
305 if (!res)
306 return -ENOMEM;
307 chbs->base = res->range.start;
308 chbs->length = size;
309 }
310
311 for (i = 0; i < ARRAY_SIZE(mock_cfmws); i++) {
312 struct acpi_cedt_cfmws *window = mock_cfmws[i];
313
314 res = alloc_mock_res(window->window_size);
315 if (!res)
316 return -ENOMEM;
317 window->base_hpa = res->range.start;
318 }
319
320 return 0;
321 }
322
323 /*
324 * WARNING, this hack assumes the format of 'struct
325 * cxl_cfmws_context' and 'struct cxl_chbs_context' share the property that
326 * the first struct member is the device being probed by the cxl_acpi
327 * driver.
328 */
329 struct cxl_cedt_context {
330 struct device *dev;
331 };
332
mock_acpi_table_parse_cedt(enum acpi_cedt_type id,acpi_tbl_entry_handler_arg handler_arg,void * arg)333 static int mock_acpi_table_parse_cedt(enum acpi_cedt_type id,
334 acpi_tbl_entry_handler_arg handler_arg,
335 void *arg)
336 {
337 struct cxl_cedt_context *ctx = arg;
338 struct device *dev = ctx->dev;
339 union acpi_subtable_headers *h;
340 unsigned long end;
341 int i;
342
343 if (dev != &cxl_acpi->dev)
344 return acpi_table_parse_cedt(id, handler_arg, arg);
345
346 if (id == ACPI_CEDT_TYPE_CHBS)
347 for (i = 0; i < ARRAY_SIZE(mock_cedt.chbs); i++) {
348 h = (union acpi_subtable_headers *)&mock_cedt.chbs[i];
349 end = (unsigned long)&mock_cedt.chbs[i + 1];
350 handler_arg(h, arg, end);
351 }
352
353 if (id == ACPI_CEDT_TYPE_CFMWS)
354 for (i = 0; i < ARRAY_SIZE(mock_cfmws); i++) {
355 h = (union acpi_subtable_headers *) mock_cfmws[i];
356 end = (unsigned long) h + mock_cfmws[i]->header.length;
357 handler_arg(h, arg, end);
358 }
359
360 return 0;
361 }
362
is_mock_bridge(struct device * dev)363 static bool is_mock_bridge(struct device *dev)
364 {
365 int i;
366
367 for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++)
368 if (dev == &cxl_host_bridge[i]->dev)
369 return true;
370 for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++)
371 if (dev == &cxl_hb_single[i]->dev)
372 return true;
373 return false;
374 }
375
is_mock_port(struct device * dev)376 static bool is_mock_port(struct device *dev)
377 {
378 int i;
379
380 if (is_mock_bridge(dev))
381 return true;
382
383 for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++)
384 if (dev == &cxl_root_port[i]->dev)
385 return true;
386
387 for (i = 0; i < ARRAY_SIZE(cxl_switch_uport); i++)
388 if (dev == &cxl_switch_uport[i]->dev)
389 return true;
390
391 for (i = 0; i < ARRAY_SIZE(cxl_switch_dport); i++)
392 if (dev == &cxl_switch_dport[i]->dev)
393 return true;
394
395 for (i = 0; i < ARRAY_SIZE(cxl_root_single); i++)
396 if (dev == &cxl_root_single[i]->dev)
397 return true;
398
399 for (i = 0; i < ARRAY_SIZE(cxl_swu_single); i++)
400 if (dev == &cxl_swu_single[i]->dev)
401 return true;
402
403 for (i = 0; i < ARRAY_SIZE(cxl_swd_single); i++)
404 if (dev == &cxl_swd_single[i]->dev)
405 return true;
406
407 if (is_cxl_memdev(dev))
408 return is_mock_dev(dev->parent);
409
410 return false;
411 }
412
host_bridge_index(struct acpi_device * adev)413 static int host_bridge_index(struct acpi_device *adev)
414 {
415 return adev - host_bridge;
416 }
417
find_host_bridge(acpi_handle handle)418 static struct acpi_device *find_host_bridge(acpi_handle handle)
419 {
420 int i;
421
422 for (i = 0; i < ARRAY_SIZE(host_bridge); i++)
423 if (handle == host_bridge[i].handle)
424 return &host_bridge[i];
425 return NULL;
426 }
427
428 static acpi_status
mock_acpi_evaluate_integer(acpi_handle handle,acpi_string pathname,struct acpi_object_list * arguments,unsigned long long * data)429 mock_acpi_evaluate_integer(acpi_handle handle, acpi_string pathname,
430 struct acpi_object_list *arguments,
431 unsigned long long *data)
432 {
433 struct acpi_device *adev = find_host_bridge(handle);
434
435 if (!adev || strcmp(pathname, METHOD_NAME__UID) != 0)
436 return acpi_evaluate_integer(handle, pathname, arguments, data);
437
438 *data = host_bridge_index(adev);
439 return AE_OK;
440 }
441
442 static struct pci_bus mock_pci_bus[NR_CXL_HOST_BRIDGES];
443 static struct acpi_pci_root mock_pci_root[NR_CXL_HOST_BRIDGES] = {
444 [0] = {
445 .bus = &mock_pci_bus[0],
446 },
447 [1] = {
448 .bus = &mock_pci_bus[1],
449 },
450 };
451
is_mock_bus(struct pci_bus * bus)452 static bool is_mock_bus(struct pci_bus *bus)
453 {
454 int i;
455
456 for (i = 0; i < ARRAY_SIZE(mock_pci_bus); i++)
457 if (bus == &mock_pci_bus[i])
458 return true;
459 return false;
460 }
461
mock_acpi_pci_find_root(acpi_handle handle)462 static struct acpi_pci_root *mock_acpi_pci_find_root(acpi_handle handle)
463 {
464 struct acpi_device *adev = find_host_bridge(handle);
465
466 if (!adev)
467 return acpi_pci_find_root(handle);
468 return &mock_pci_root[host_bridge_index(adev)];
469 }
470
mock_cxl_setup_hdm(struct cxl_port * port)471 static struct cxl_hdm *mock_cxl_setup_hdm(struct cxl_port *port)
472 {
473 struct cxl_hdm *cxlhdm = devm_kzalloc(&port->dev, sizeof(*cxlhdm), GFP_KERNEL);
474
475 if (!cxlhdm)
476 return ERR_PTR(-ENOMEM);
477
478 cxlhdm->port = port;
479 return cxlhdm;
480 }
481
mock_cxl_add_passthrough_decoder(struct cxl_port * port)482 static int mock_cxl_add_passthrough_decoder(struct cxl_port *port)
483 {
484 dev_err(&port->dev, "unexpected passthrough decoder for cxl_test\n");
485 return -EOPNOTSUPP;
486 }
487
488
489 struct target_map_ctx {
490 int *target_map;
491 int index;
492 int target_count;
493 };
494
map_targets(struct device * dev,void * data)495 static int map_targets(struct device *dev, void *data)
496 {
497 struct platform_device *pdev = to_platform_device(dev);
498 struct target_map_ctx *ctx = data;
499
500 ctx->target_map[ctx->index++] = pdev->id;
501
502 if (ctx->index > ctx->target_count) {
503 dev_WARN_ONCE(dev, 1, "too many targets found?\n");
504 return -ENXIO;
505 }
506
507 return 0;
508 }
509
mock_decoder_commit(struct cxl_decoder * cxld)510 static int mock_decoder_commit(struct cxl_decoder *cxld)
511 {
512 struct cxl_port *port = to_cxl_port(cxld->dev.parent);
513 int id = cxld->id;
514
515 if (cxld->flags & CXL_DECODER_F_ENABLE)
516 return 0;
517
518 dev_dbg(&port->dev, "%s commit\n", dev_name(&cxld->dev));
519 if (port->commit_end + 1 != id) {
520 dev_dbg(&port->dev,
521 "%s: out of order commit, expected decoder%d.%d\n",
522 dev_name(&cxld->dev), port->id, port->commit_end + 1);
523 return -EBUSY;
524 }
525
526 port->commit_end++;
527 cxld->flags |= CXL_DECODER_F_ENABLE;
528
529 return 0;
530 }
531
mock_decoder_reset(struct cxl_decoder * cxld)532 static int mock_decoder_reset(struct cxl_decoder *cxld)
533 {
534 struct cxl_port *port = to_cxl_port(cxld->dev.parent);
535 int id = cxld->id;
536
537 if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
538 return 0;
539
540 dev_dbg(&port->dev, "%s reset\n", dev_name(&cxld->dev));
541 if (port->commit_end != id) {
542 dev_dbg(&port->dev,
543 "%s: out of order reset, expected decoder%d.%d\n",
544 dev_name(&cxld->dev), port->id, port->commit_end);
545 return -EBUSY;
546 }
547
548 port->commit_end--;
549 cxld->flags &= ~CXL_DECODER_F_ENABLE;
550
551 return 0;
552 }
553
default_mock_decoder(struct cxl_decoder * cxld)554 static void default_mock_decoder(struct cxl_decoder *cxld)
555 {
556 cxld->hpa_range = (struct range){
557 .start = 0,
558 .end = -1,
559 };
560
561 cxld->interleave_ways = 1;
562 cxld->interleave_granularity = 256;
563 cxld->target_type = CXL_DECODER_EXPANDER;
564 cxld->commit = mock_decoder_commit;
565 cxld->reset = mock_decoder_reset;
566 }
567
first_decoder(struct device * dev,void * data)568 static int first_decoder(struct device *dev, void *data)
569 {
570 struct cxl_decoder *cxld;
571
572 if (!is_switch_decoder(dev))
573 return 0;
574 cxld = to_cxl_decoder(dev);
575 if (cxld->id == 0)
576 return 1;
577 return 0;
578 }
579
mock_init_hdm_decoder(struct cxl_decoder * cxld)580 static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
581 {
582 struct acpi_cedt_cfmws *window = mock_cfmws[0];
583 struct platform_device *pdev = NULL;
584 struct cxl_endpoint_decoder *cxled;
585 struct cxl_switch_decoder *cxlsd;
586 struct cxl_port *port, *iter;
587 const int size = SZ_512M;
588 struct cxl_memdev *cxlmd;
589 struct cxl_dport *dport;
590 struct device *dev;
591 bool hb0 = false;
592 u64 base;
593 int i;
594
595 if (is_endpoint_decoder(&cxld->dev)) {
596 cxled = to_cxl_endpoint_decoder(&cxld->dev);
597 cxlmd = cxled_to_memdev(cxled);
598 WARN_ON(!dev_is_platform(cxlmd->dev.parent));
599 pdev = to_platform_device(cxlmd->dev.parent);
600
601 /* check is endpoint is attach to host-bridge0 */
602 port = cxled_to_port(cxled);
603 do {
604 if (port->uport == &cxl_host_bridge[0]->dev) {
605 hb0 = true;
606 break;
607 }
608 if (is_cxl_port(port->dev.parent))
609 port = to_cxl_port(port->dev.parent);
610 else
611 port = NULL;
612 } while (port);
613 port = cxled_to_port(cxled);
614 }
615
616 /*
617 * The first decoder on the first 2 devices on the first switch
618 * attached to host-bridge0 mock a fake / static RAM region. All
619 * other decoders are default disabled. Given the round robin
620 * assignment those devices are named cxl_mem.0, and cxl_mem.4.
621 *
622 * See 'cxl list -BMPu -m cxl_mem.0,cxl_mem.4'
623 */
624 if (!hb0 || pdev->id % 4 || pdev->id > 4 || cxld->id > 0) {
625 default_mock_decoder(cxld);
626 return;
627 }
628
629 base = window->base_hpa;
630 cxld->hpa_range = (struct range) {
631 .start = base,
632 .end = base + size - 1,
633 };
634
635 cxld->interleave_ways = 2;
636 eig_to_granularity(window->granularity, &cxld->interleave_granularity);
637 cxld->target_type = CXL_DECODER_EXPANDER;
638 cxld->flags = CXL_DECODER_F_ENABLE;
639 cxled->state = CXL_DECODER_STATE_AUTO;
640 port->commit_end = cxld->id;
641 devm_cxl_dpa_reserve(cxled, 0, size / cxld->interleave_ways, 0);
642 cxld->commit = mock_decoder_commit;
643 cxld->reset = mock_decoder_reset;
644
645 /*
646 * Now that endpoint decoder is set up, walk up the hierarchy
647 * and setup the switch and root port decoders targeting @cxlmd.
648 */
649 iter = port;
650 for (i = 0; i < 2; i++) {
651 dport = iter->parent_dport;
652 iter = dport->port;
653 dev = device_find_child(&iter->dev, NULL, first_decoder);
654 /*
655 * Ancestor ports are guaranteed to be enumerated before
656 * @port, and all ports have at least one decoder.
657 */
658 if (WARN_ON(!dev))
659 continue;
660 cxlsd = to_cxl_switch_decoder(dev);
661 if (i == 0) {
662 /* put cxl_mem.4 second in the decode order */
663 if (pdev->id == 4)
664 cxlsd->target[1] = dport;
665 else
666 cxlsd->target[0] = dport;
667 } else
668 cxlsd->target[0] = dport;
669 cxld = &cxlsd->cxld;
670 cxld->target_type = CXL_DECODER_EXPANDER;
671 cxld->flags = CXL_DECODER_F_ENABLE;
672 iter->commit_end = 0;
673 /*
674 * Switch targets 2 endpoints, while host bridge targets
675 * one root port
676 */
677 if (i == 0)
678 cxld->interleave_ways = 2;
679 else
680 cxld->interleave_ways = 1;
681 cxld->interleave_granularity = 4096;
682 cxld->hpa_range = (struct range) {
683 .start = base,
684 .end = base + size - 1,
685 };
686 put_device(dev);
687 }
688 }
689
mock_cxl_enumerate_decoders(struct cxl_hdm * cxlhdm)690 static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
691 {
692 struct cxl_port *port = cxlhdm->port;
693 struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
694 int target_count, i;
695
696 if (is_cxl_endpoint(port))
697 target_count = 0;
698 else if (is_cxl_root(parent_port))
699 target_count = NR_CXL_ROOT_PORTS;
700 else
701 target_count = NR_CXL_SWITCH_PORTS;
702
703 for (i = 0; i < NR_CXL_PORT_DECODERS; i++) {
704 int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
705 struct target_map_ctx ctx = {
706 .target_map = target_map,
707 .target_count = target_count,
708 };
709 struct cxl_decoder *cxld;
710 int rc;
711
712 if (target_count) {
713 struct cxl_switch_decoder *cxlsd;
714
715 cxlsd = cxl_switch_decoder_alloc(port, target_count);
716 if (IS_ERR(cxlsd)) {
717 dev_warn(&port->dev,
718 "Failed to allocate the decoder\n");
719 return PTR_ERR(cxlsd);
720 }
721 cxld = &cxlsd->cxld;
722 } else {
723 struct cxl_endpoint_decoder *cxled;
724
725 cxled = cxl_endpoint_decoder_alloc(port);
726
727 if (IS_ERR(cxled)) {
728 dev_warn(&port->dev,
729 "Failed to allocate the decoder\n");
730 return PTR_ERR(cxled);
731 }
732 cxld = &cxled->cxld;
733 }
734
735 mock_init_hdm_decoder(cxld);
736
737 if (target_count) {
738 rc = device_for_each_child(port->uport, &ctx,
739 map_targets);
740 if (rc) {
741 put_device(&cxld->dev);
742 return rc;
743 }
744 }
745
746 rc = cxl_decoder_add_locked(cxld, target_map);
747 if (rc) {
748 put_device(&cxld->dev);
749 dev_err(&port->dev, "Failed to add decoder\n");
750 return rc;
751 }
752
753 rc = cxl_decoder_autoremove(&port->dev, cxld);
754 if (rc)
755 return rc;
756 dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
757 }
758
759 return 0;
760 }
761
mock_cxl_port_enumerate_dports(struct cxl_port * port)762 static int mock_cxl_port_enumerate_dports(struct cxl_port *port)
763 {
764 struct device *dev = &port->dev;
765 struct platform_device **array;
766 int i, array_size;
767
768 if (port->depth == 1) {
769 if (is_multi_bridge(port->uport)) {
770 array_size = ARRAY_SIZE(cxl_root_port);
771 array = cxl_root_port;
772 } else if (is_single_bridge(port->uport)) {
773 array_size = ARRAY_SIZE(cxl_root_single);
774 array = cxl_root_single;
775 } else {
776 dev_dbg(&port->dev, "%s: unknown bridge type\n",
777 dev_name(port->uport));
778 return -ENXIO;
779 }
780 } else if (port->depth == 2) {
781 struct cxl_port *parent = to_cxl_port(port->dev.parent);
782
783 if (is_multi_bridge(parent->uport)) {
784 array_size = ARRAY_SIZE(cxl_switch_dport);
785 array = cxl_switch_dport;
786 } else if (is_single_bridge(parent->uport)) {
787 array_size = ARRAY_SIZE(cxl_swd_single);
788 array = cxl_swd_single;
789 } else {
790 dev_dbg(&port->dev, "%s: unknown bridge type\n",
791 dev_name(port->uport));
792 return -ENXIO;
793 }
794 } else {
795 dev_WARN_ONCE(&port->dev, 1, "unexpected depth %d\n",
796 port->depth);
797 return -ENXIO;
798 }
799
800 for (i = 0; i < array_size; i++) {
801 struct platform_device *pdev = array[i];
802 struct cxl_dport *dport;
803
804 if (pdev->dev.parent != port->uport) {
805 dev_dbg(&port->dev, "%s: mismatch parent %s\n",
806 dev_name(port->uport),
807 dev_name(pdev->dev.parent));
808 continue;
809 }
810
811 dport = devm_cxl_add_dport(port, &pdev->dev, pdev->id,
812 CXL_RESOURCE_NONE);
813
814 if (IS_ERR(dport)) {
815 dev_err(dev, "failed to add dport: %s (%ld)\n",
816 dev_name(&pdev->dev), PTR_ERR(dport));
817 return PTR_ERR(dport);
818 }
819
820 dev_dbg(dev, "add dport%d: %s\n", pdev->id,
821 dev_name(&pdev->dev));
822 }
823
824 return 0;
825 }
826
827 static struct cxl_mock_ops cxl_mock_ops = {
828 .is_mock_adev = is_mock_adev,
829 .is_mock_bridge = is_mock_bridge,
830 .is_mock_bus = is_mock_bus,
831 .is_mock_port = is_mock_port,
832 .is_mock_dev = is_mock_dev,
833 .acpi_table_parse_cedt = mock_acpi_table_parse_cedt,
834 .acpi_evaluate_integer = mock_acpi_evaluate_integer,
835 .acpi_pci_find_root = mock_acpi_pci_find_root,
836 .devm_cxl_port_enumerate_dports = mock_cxl_port_enumerate_dports,
837 .devm_cxl_setup_hdm = mock_cxl_setup_hdm,
838 .devm_cxl_add_passthrough_decoder = mock_cxl_add_passthrough_decoder,
839 .devm_cxl_enumerate_decoders = mock_cxl_enumerate_decoders,
840 .list = LIST_HEAD_INIT(cxl_mock_ops.list),
841 };
842
mock_companion(struct acpi_device * adev,struct device * dev)843 static void mock_companion(struct acpi_device *adev, struct device *dev)
844 {
845 device_initialize(&adev->dev);
846 fwnode_init(&adev->fwnode, NULL);
847 dev->fwnode = &adev->fwnode;
848 adev->fwnode.dev = dev;
849 }
850
851 #ifndef SZ_64G
852 #define SZ_64G (SZ_32G * 2)
853 #endif
854
855 #ifndef SZ_512G
856 #define SZ_512G (SZ_64G * 8)
857 #endif
858
cxl_single_init(void)859 static __init int cxl_single_init(void)
860 {
861 int i, rc;
862
863 for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++) {
864 struct acpi_device *adev =
865 &host_bridge[NR_CXL_HOST_BRIDGES + i];
866 struct platform_device *pdev;
867
868 pdev = platform_device_alloc("cxl_host_bridge",
869 NR_CXL_HOST_BRIDGES + i);
870 if (!pdev)
871 goto err_bridge;
872
873 mock_companion(adev, &pdev->dev);
874 rc = platform_device_add(pdev);
875 if (rc) {
876 platform_device_put(pdev);
877 goto err_bridge;
878 }
879
880 cxl_hb_single[i] = pdev;
881 rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj,
882 "physical_node");
883 if (rc)
884 goto err_bridge;
885 }
886
887 for (i = 0; i < ARRAY_SIZE(cxl_root_single); i++) {
888 struct platform_device *bridge =
889 cxl_hb_single[i % ARRAY_SIZE(cxl_hb_single)];
890 struct platform_device *pdev;
891
892 pdev = platform_device_alloc("cxl_root_port",
893 NR_MULTI_ROOT + i);
894 if (!pdev)
895 goto err_port;
896 pdev->dev.parent = &bridge->dev;
897
898 rc = platform_device_add(pdev);
899 if (rc) {
900 platform_device_put(pdev);
901 goto err_port;
902 }
903 cxl_root_single[i] = pdev;
904 }
905
906 for (i = 0; i < ARRAY_SIZE(cxl_swu_single); i++) {
907 struct platform_device *root_port = cxl_root_single[i];
908 struct platform_device *pdev;
909
910 pdev = platform_device_alloc("cxl_switch_uport",
911 NR_MULTI_ROOT + i);
912 if (!pdev)
913 goto err_uport;
914 pdev->dev.parent = &root_port->dev;
915
916 rc = platform_device_add(pdev);
917 if (rc) {
918 platform_device_put(pdev);
919 goto err_uport;
920 }
921 cxl_swu_single[i] = pdev;
922 }
923
924 for (i = 0; i < ARRAY_SIZE(cxl_swd_single); i++) {
925 struct platform_device *uport =
926 cxl_swu_single[i % ARRAY_SIZE(cxl_swu_single)];
927 struct platform_device *pdev;
928
929 pdev = platform_device_alloc("cxl_switch_dport",
930 i + NR_MEM_MULTI);
931 if (!pdev)
932 goto err_dport;
933 pdev->dev.parent = &uport->dev;
934
935 rc = platform_device_add(pdev);
936 if (rc) {
937 platform_device_put(pdev);
938 goto err_dport;
939 }
940 cxl_swd_single[i] = pdev;
941 }
942
943 for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++) {
944 struct platform_device *dport = cxl_swd_single[i];
945 struct platform_device *pdev;
946
947 pdev = platform_device_alloc("cxl_mem", NR_MEM_MULTI + i);
948 if (!pdev)
949 goto err_mem;
950 pdev->dev.parent = &dport->dev;
951 set_dev_node(&pdev->dev, i % 2);
952
953 rc = platform_device_add(pdev);
954 if (rc) {
955 platform_device_put(pdev);
956 goto err_mem;
957 }
958 cxl_mem_single[i] = pdev;
959 }
960
961 return 0;
962
963 err_mem:
964 for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--)
965 platform_device_unregister(cxl_mem_single[i]);
966 err_dport:
967 for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--)
968 platform_device_unregister(cxl_swd_single[i]);
969 err_uport:
970 for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--)
971 platform_device_unregister(cxl_swu_single[i]);
972 err_port:
973 for (i = ARRAY_SIZE(cxl_root_single) - 1; i >= 0; i--)
974 platform_device_unregister(cxl_root_single[i]);
975 err_bridge:
976 for (i = ARRAY_SIZE(cxl_hb_single) - 1; i >= 0; i--) {
977 struct platform_device *pdev = cxl_hb_single[i];
978
979 if (!pdev)
980 continue;
981 sysfs_remove_link(&pdev->dev.kobj, "physical_node");
982 platform_device_unregister(cxl_hb_single[i]);
983 }
984
985 return rc;
986 }
987
cxl_single_exit(void)988 static void cxl_single_exit(void)
989 {
990 int i;
991
992 for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--)
993 platform_device_unregister(cxl_mem_single[i]);
994 for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--)
995 platform_device_unregister(cxl_swd_single[i]);
996 for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--)
997 platform_device_unregister(cxl_swu_single[i]);
998 for (i = ARRAY_SIZE(cxl_root_single) - 1; i >= 0; i--)
999 platform_device_unregister(cxl_root_single[i]);
1000 for (i = ARRAY_SIZE(cxl_hb_single) - 1; i >= 0; i--) {
1001 struct platform_device *pdev = cxl_hb_single[i];
1002
1003 if (!pdev)
1004 continue;
1005 sysfs_remove_link(&pdev->dev.kobj, "physical_node");
1006 platform_device_unregister(cxl_hb_single[i]);
1007 }
1008 }
1009
cxl_test_init(void)1010 static __init int cxl_test_init(void)
1011 {
1012 int rc, i;
1013
1014 register_cxl_mock_ops(&cxl_mock_ops);
1015
1016 cxl_mock_pool = gen_pool_create(ilog2(SZ_2M), NUMA_NO_NODE);
1017 if (!cxl_mock_pool) {
1018 rc = -ENOMEM;
1019 goto err_gen_pool_create;
1020 }
1021
1022 rc = gen_pool_add(cxl_mock_pool, iomem_resource.end + 1 - SZ_64G,
1023 SZ_64G, NUMA_NO_NODE);
1024 if (rc)
1025 goto err_gen_pool_add;
1026
1027 rc = populate_cedt();
1028 if (rc)
1029 goto err_populate;
1030
1031 for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++) {
1032 struct acpi_device *adev = &host_bridge[i];
1033 struct platform_device *pdev;
1034
1035 pdev = platform_device_alloc("cxl_host_bridge", i);
1036 if (!pdev)
1037 goto err_bridge;
1038
1039 mock_companion(adev, &pdev->dev);
1040 rc = platform_device_add(pdev);
1041 if (rc) {
1042 platform_device_put(pdev);
1043 goto err_bridge;
1044 }
1045
1046 cxl_host_bridge[i] = pdev;
1047 rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj,
1048 "physical_node");
1049 if (rc)
1050 goto err_bridge;
1051 }
1052
1053 for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++) {
1054 struct platform_device *bridge =
1055 cxl_host_bridge[i % ARRAY_SIZE(cxl_host_bridge)];
1056 struct platform_device *pdev;
1057
1058 pdev = platform_device_alloc("cxl_root_port", i);
1059 if (!pdev)
1060 goto err_port;
1061 pdev->dev.parent = &bridge->dev;
1062
1063 rc = platform_device_add(pdev);
1064 if (rc) {
1065 platform_device_put(pdev);
1066 goto err_port;
1067 }
1068 cxl_root_port[i] = pdev;
1069 }
1070
1071 BUILD_BUG_ON(ARRAY_SIZE(cxl_switch_uport) != ARRAY_SIZE(cxl_root_port));
1072 for (i = 0; i < ARRAY_SIZE(cxl_switch_uport); i++) {
1073 struct platform_device *root_port = cxl_root_port[i];
1074 struct platform_device *pdev;
1075
1076 pdev = platform_device_alloc("cxl_switch_uport", i);
1077 if (!pdev)
1078 goto err_uport;
1079 pdev->dev.parent = &root_port->dev;
1080
1081 rc = platform_device_add(pdev);
1082 if (rc) {
1083 platform_device_put(pdev);
1084 goto err_uport;
1085 }
1086 cxl_switch_uport[i] = pdev;
1087 }
1088
1089 for (i = 0; i < ARRAY_SIZE(cxl_switch_dport); i++) {
1090 struct platform_device *uport =
1091 cxl_switch_uport[i % ARRAY_SIZE(cxl_switch_uport)];
1092 struct platform_device *pdev;
1093
1094 pdev = platform_device_alloc("cxl_switch_dport", i);
1095 if (!pdev)
1096 goto err_dport;
1097 pdev->dev.parent = &uport->dev;
1098
1099 rc = platform_device_add(pdev);
1100 if (rc) {
1101 platform_device_put(pdev);
1102 goto err_dport;
1103 }
1104 cxl_switch_dport[i] = pdev;
1105 }
1106
1107 for (i = 0; i < ARRAY_SIZE(cxl_mem); i++) {
1108 struct platform_device *dport = cxl_switch_dport[i];
1109 struct platform_device *pdev;
1110
1111 pdev = platform_device_alloc("cxl_mem", i);
1112 if (!pdev)
1113 goto err_mem;
1114 pdev->dev.parent = &dport->dev;
1115 set_dev_node(&pdev->dev, i % 2);
1116
1117 rc = platform_device_add(pdev);
1118 if (rc) {
1119 platform_device_put(pdev);
1120 goto err_mem;
1121 }
1122 cxl_mem[i] = pdev;
1123 }
1124
1125 rc = cxl_single_init();
1126 if (rc)
1127 goto err_mem;
1128
1129 cxl_acpi = platform_device_alloc("cxl_acpi", 0);
1130 if (!cxl_acpi)
1131 goto err_single;
1132
1133 mock_companion(&acpi0017_mock, &cxl_acpi->dev);
1134 acpi0017_mock.dev.bus = &platform_bus_type;
1135
1136 rc = platform_device_add(cxl_acpi);
1137 if (rc)
1138 goto err_add;
1139
1140 return 0;
1141
1142 err_add:
1143 platform_device_put(cxl_acpi);
1144 err_single:
1145 cxl_single_exit();
1146 err_mem:
1147 for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
1148 platform_device_unregister(cxl_mem[i]);
1149 err_dport:
1150 for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--)
1151 platform_device_unregister(cxl_switch_dport[i]);
1152 err_uport:
1153 for (i = ARRAY_SIZE(cxl_switch_uport) - 1; i >= 0; i--)
1154 platform_device_unregister(cxl_switch_uport[i]);
1155 err_port:
1156 for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--)
1157 platform_device_unregister(cxl_root_port[i]);
1158 err_bridge:
1159 for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--) {
1160 struct platform_device *pdev = cxl_host_bridge[i];
1161
1162 if (!pdev)
1163 continue;
1164 sysfs_remove_link(&pdev->dev.kobj, "physical_node");
1165 platform_device_unregister(cxl_host_bridge[i]);
1166 }
1167 err_populate:
1168 depopulate_all_mock_resources();
1169 err_gen_pool_add:
1170 gen_pool_destroy(cxl_mock_pool);
1171 err_gen_pool_create:
1172 unregister_cxl_mock_ops(&cxl_mock_ops);
1173 return rc;
1174 }
1175
cxl_test_exit(void)1176 static __exit void cxl_test_exit(void)
1177 {
1178 int i;
1179
1180 platform_device_unregister(cxl_acpi);
1181 cxl_single_exit();
1182 for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
1183 platform_device_unregister(cxl_mem[i]);
1184 for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--)
1185 platform_device_unregister(cxl_switch_dport[i]);
1186 for (i = ARRAY_SIZE(cxl_switch_uport) - 1; i >= 0; i--)
1187 platform_device_unregister(cxl_switch_uport[i]);
1188 for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--)
1189 platform_device_unregister(cxl_root_port[i]);
1190 for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--) {
1191 struct platform_device *pdev = cxl_host_bridge[i];
1192
1193 if (!pdev)
1194 continue;
1195 sysfs_remove_link(&pdev->dev.kobj, "physical_node");
1196 platform_device_unregister(cxl_host_bridge[i]);
1197 }
1198 depopulate_all_mock_resources();
1199 gen_pool_destroy(cxl_mock_pool);
1200 unregister_cxl_mock_ops(&cxl_mock_ops);
1201 }
1202
1203 module_init(cxl_test_init);
1204 module_exit(cxl_test_exit);
1205 MODULE_LICENSE("GPL v2");
1206 MODULE_IMPORT_NS(ACPI);
1207 MODULE_IMPORT_NS(CXL);
1208