1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * This file implements the DMA operations for NVLink devices. The NPU
4 * devices all point to the same iommu table as the parent PCI device.
5 *
6 * Copyright Alistair Popple, IBM Corporation 2015.
7 */
8
9 #include <linux/mmu_notifier.h>
10 #include <linux/mmu_context.h>
11 #include <linux/of.h>
12 #include <linux/pci.h>
13 #include <linux/memblock.h>
14 #include <linux/sizes.h>
15
16 #include <asm/debugfs.h>
17 #include <asm/powernv.h>
18 #include <asm/ppc-pci.h>
19 #include <asm/opal.h>
20
21 #include "pci.h"
22
get_pci_dev(struct device_node * dn)23 static struct pci_dev *get_pci_dev(struct device_node *dn)
24 {
25 struct pci_dn *pdn = PCI_DN(dn);
26 struct pci_dev *pdev;
27
28 pdev = pci_get_domain_bus_and_slot(pci_domain_nr(pdn->phb->bus),
29 pdn->busno, pdn->devfn);
30
31 /*
32 * pci_get_domain_bus_and_slot() increased the reference count of
33 * the PCI device, but callers don't need that actually as the PE
34 * already holds a reference to the device. Since callers aren't
35 * aware of the reference count change, call pci_dev_put() now to
36 * avoid leaks.
37 */
38 if (pdev)
39 pci_dev_put(pdev);
40
41 return pdev;
42 }
43
44 /* Given a NPU device get the associated PCI device. */
pnv_pci_get_gpu_dev(struct pci_dev * npdev)45 struct pci_dev *pnv_pci_get_gpu_dev(struct pci_dev *npdev)
46 {
47 struct device_node *dn;
48 struct pci_dev *gpdev;
49
50 if (WARN_ON(!npdev))
51 return NULL;
52
53 if (WARN_ON(!npdev->dev.of_node))
54 return NULL;
55
56 /* Get assoicated PCI device */
57 dn = of_parse_phandle(npdev->dev.of_node, "ibm,gpu", 0);
58 if (!dn)
59 return NULL;
60
61 gpdev = get_pci_dev(dn);
62 of_node_put(dn);
63
64 return gpdev;
65 }
66 EXPORT_SYMBOL(pnv_pci_get_gpu_dev);
67
68 /* Given the real PCI device get a linked NPU device. */
pnv_pci_get_npu_dev(struct pci_dev * gpdev,int index)69 struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index)
70 {
71 struct device_node *dn;
72 struct pci_dev *npdev;
73
74 if (WARN_ON(!gpdev))
75 return NULL;
76
77 /* Not all PCI devices have device-tree nodes */
78 if (!gpdev->dev.of_node)
79 return NULL;
80
81 /* Get assoicated PCI device */
82 dn = of_parse_phandle(gpdev->dev.of_node, "ibm,npu", index);
83 if (!dn)
84 return NULL;
85
86 npdev = get_pci_dev(dn);
87 of_node_put(dn);
88
89 return npdev;
90 }
91 EXPORT_SYMBOL(pnv_pci_get_npu_dev);
92
93 #ifdef CONFIG_IOMMU_API
94 /*
95 * Returns the PE assoicated with the PCI device of the given
96 * NPU. Returns the linked pci device if pci_dev != NULL.
97 */
get_gpu_pci_dev_and_pe(struct pnv_ioda_pe * npe,struct pci_dev ** gpdev)98 static struct pnv_ioda_pe *get_gpu_pci_dev_and_pe(struct pnv_ioda_pe *npe,
99 struct pci_dev **gpdev)
100 {
101 struct pnv_phb *phb;
102 struct pci_controller *hose;
103 struct pci_dev *pdev;
104 struct pnv_ioda_pe *pe;
105 struct pci_dn *pdn;
106
107 pdev = pnv_pci_get_gpu_dev(npe->pdev);
108 if (!pdev)
109 return NULL;
110
111 pdn = pci_get_pdn(pdev);
112 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
113 return NULL;
114
115 hose = pci_bus_to_host(pdev->bus);
116 phb = hose->private_data;
117 pe = &phb->ioda.pe_array[pdn->pe_number];
118
119 if (gpdev)
120 *gpdev = pdev;
121
122 return pe;
123 }
124
125 static long pnv_npu_unset_window(struct iommu_table_group *table_group,
126 int num);
127
pnv_npu_set_window(struct iommu_table_group * table_group,int num,struct iommu_table * tbl)128 static long pnv_npu_set_window(struct iommu_table_group *table_group, int num,
129 struct iommu_table *tbl)
130 {
131 struct pnv_ioda_pe *npe = container_of(table_group, struct pnv_ioda_pe,
132 table_group);
133 struct pnv_phb *phb = npe->phb;
134 int64_t rc;
135 const unsigned long size = tbl->it_indirect_levels ?
136 tbl->it_level_size : tbl->it_size;
137 const __u64 start_addr = tbl->it_offset << tbl->it_page_shift;
138 const __u64 win_size = tbl->it_size << tbl->it_page_shift;
139 int num2 = (num == 0) ? 1 : 0;
140
141 /* NPU has just one TVE so if there is another table, remove it first */
142 if (npe->table_group.tables[num2])
143 pnv_npu_unset_window(&npe->table_group, num2);
144
145 pe_info(npe, "Setting up window %llx..%llx pg=%lx\n",
146 start_addr, start_addr + win_size - 1,
147 IOMMU_PAGE_SIZE(tbl));
148
149 rc = opal_pci_map_pe_dma_window(phb->opal_id,
150 npe->pe_number,
151 npe->pe_number,
152 tbl->it_indirect_levels + 1,
153 __pa(tbl->it_base),
154 size << 3,
155 IOMMU_PAGE_SIZE(tbl));
156 if (rc) {
157 pe_err(npe, "Failed to configure TCE table, err %lld\n", rc);
158 return rc;
159 }
160 pnv_pci_ioda2_tce_invalidate_entire(phb, false);
161
162 /* Add the table to the list so its TCE cache will get invalidated */
163 pnv_pci_link_table_and_group(phb->hose->node, num,
164 tbl, &npe->table_group);
165
166 return 0;
167 }
168
pnv_npu_unset_window(struct iommu_table_group * table_group,int num)169 static long pnv_npu_unset_window(struct iommu_table_group *table_group, int num)
170 {
171 struct pnv_ioda_pe *npe = container_of(table_group, struct pnv_ioda_pe,
172 table_group);
173 struct pnv_phb *phb = npe->phb;
174 int64_t rc;
175
176 if (!npe->table_group.tables[num])
177 return 0;
178
179 pe_info(npe, "Removing DMA window\n");
180
181 rc = opal_pci_map_pe_dma_window(phb->opal_id, npe->pe_number,
182 npe->pe_number,
183 0/* levels */, 0/* table address */,
184 0/* table size */, 0/* page size */);
185 if (rc) {
186 pe_err(npe, "Unmapping failed, ret = %lld\n", rc);
187 return rc;
188 }
189 pnv_pci_ioda2_tce_invalidate_entire(phb, false);
190
191 pnv_pci_unlink_table_and_group(npe->table_group.tables[num],
192 &npe->table_group);
193
194 return 0;
195 }
196
197 /* Switch ownership from platform code to external user (e.g. VFIO) */
pnv_npu_take_ownership(struct iommu_table_group * table_group)198 static void pnv_npu_take_ownership(struct iommu_table_group *table_group)
199 {
200 struct pnv_ioda_pe *npe = container_of(table_group, struct pnv_ioda_pe,
201 table_group);
202 struct pnv_phb *phb = npe->phb;
203 int64_t rc;
204 struct pci_dev *gpdev = NULL;
205
206 /*
207 * Note: NPU has just a single TVE in the hardware which means that
208 * while used by the kernel, it can have either 32bit window or
209 * DMA bypass but never both. So we deconfigure 32bit window only
210 * if it was enabled at the moment of ownership change.
211 */
212 if (npe->table_group.tables[0]) {
213 pnv_npu_unset_window(&npe->table_group, 0);
214 return;
215 }
216
217 /* Disable bypass */
218 rc = opal_pci_map_pe_dma_window_real(phb->opal_id,
219 npe->pe_number, npe->pe_number,
220 0 /* bypass base */, 0);
221 if (rc) {
222 pe_err(npe, "Failed to disable bypass, err %lld\n", rc);
223 return;
224 }
225 pnv_pci_ioda2_tce_invalidate_entire(npe->phb, false);
226
227 get_gpu_pci_dev_and_pe(npe, &gpdev);
228 if (gpdev)
229 pnv_npu2_unmap_lpar_dev(gpdev);
230 }
231
pnv_npu_release_ownership(struct iommu_table_group * table_group)232 static void pnv_npu_release_ownership(struct iommu_table_group *table_group)
233 {
234 struct pnv_ioda_pe *npe = container_of(table_group, struct pnv_ioda_pe,
235 table_group);
236 struct pci_dev *gpdev = NULL;
237
238 get_gpu_pci_dev_and_pe(npe, &gpdev);
239 if (gpdev)
240 pnv_npu2_map_lpar_dev(gpdev, 0, MSR_DR | MSR_PR | MSR_HV);
241 }
242
243 static struct iommu_table_group_ops pnv_pci_npu_ops = {
244 .set_window = pnv_npu_set_window,
245 .unset_window = pnv_npu_unset_window,
246 .take_ownership = pnv_npu_take_ownership,
247 .release_ownership = pnv_npu_release_ownership,
248 };
249 #endif /* !CONFIG_IOMMU_API */
250
251 /*
252 * NPU2 ATS
253 */
254 /* Maximum possible number of ATSD MMIO registers per NPU */
255 #define NV_NMMU_ATSD_REGS 8
256 #define NV_NPU_MAX_PE_NUM 16
257
258 /*
259 * A compound NPU IOMMU group which might consist of 1 GPU + 2xNPUs (POWER8) or
260 * up to 3 x (GPU + 2xNPUs) (POWER9).
261 */
262 struct npu_comp {
263 struct iommu_table_group table_group;
264 int pe_num;
265 struct pnv_ioda_pe *pe[NV_NPU_MAX_PE_NUM];
266 };
267
268 /* An NPU descriptor, valid for POWER9 only */
269 struct npu {
270 int index;
271 struct npu_comp npucomp;
272 };
273
274 #ifdef CONFIG_IOMMU_API
pnv_npu_peers_create_table_userspace(struct iommu_table_group * table_group,int num,__u32 page_shift,__u64 window_size,__u32 levels,struct iommu_table ** ptbl)275 static long pnv_npu_peers_create_table_userspace(
276 struct iommu_table_group *table_group,
277 int num, __u32 page_shift, __u64 window_size, __u32 levels,
278 struct iommu_table **ptbl)
279 {
280 struct npu_comp *npucomp = container_of(table_group, struct npu_comp,
281 table_group);
282
283 if (!npucomp->pe_num || !npucomp->pe[0] ||
284 !npucomp->pe[0]->table_group.ops ||
285 !npucomp->pe[0]->table_group.ops->create_table)
286 return -EFAULT;
287
288 return npucomp->pe[0]->table_group.ops->create_table(
289 &npucomp->pe[0]->table_group, num, page_shift,
290 window_size, levels, ptbl);
291 }
292
pnv_npu_peers_set_window(struct iommu_table_group * table_group,int num,struct iommu_table * tbl)293 static long pnv_npu_peers_set_window(struct iommu_table_group *table_group,
294 int num, struct iommu_table *tbl)
295 {
296 int i, j;
297 long ret = 0;
298 struct npu_comp *npucomp = container_of(table_group, struct npu_comp,
299 table_group);
300
301 for (i = 0; i < npucomp->pe_num; ++i) {
302 struct pnv_ioda_pe *pe = npucomp->pe[i];
303
304 if (!pe->table_group.ops->set_window)
305 continue;
306
307 ret = pe->table_group.ops->set_window(&pe->table_group,
308 num, tbl);
309 if (ret)
310 break;
311 }
312
313 if (ret) {
314 for (j = 0; j < i; ++j) {
315 struct pnv_ioda_pe *pe = npucomp->pe[j];
316
317 if (!pe->table_group.ops->unset_window)
318 continue;
319
320 ret = pe->table_group.ops->unset_window(
321 &pe->table_group, num);
322 if (ret)
323 break;
324 }
325 } else {
326 table_group->tables[num] = iommu_tce_table_get(tbl);
327 }
328
329 return ret;
330 }
331
pnv_npu_peers_unset_window(struct iommu_table_group * table_group,int num)332 static long pnv_npu_peers_unset_window(struct iommu_table_group *table_group,
333 int num)
334 {
335 int i, j;
336 long ret = 0;
337 struct npu_comp *npucomp = container_of(table_group, struct npu_comp,
338 table_group);
339
340 for (i = 0; i < npucomp->pe_num; ++i) {
341 struct pnv_ioda_pe *pe = npucomp->pe[i];
342
343 WARN_ON(npucomp->table_group.tables[num] !=
344 table_group->tables[num]);
345 if (!npucomp->table_group.tables[num])
346 continue;
347
348 if (!pe->table_group.ops->unset_window)
349 continue;
350
351 ret = pe->table_group.ops->unset_window(&pe->table_group, num);
352 if (ret)
353 break;
354 }
355
356 if (ret) {
357 for (j = 0; j < i; ++j) {
358 struct pnv_ioda_pe *pe = npucomp->pe[j];
359
360 if (!npucomp->table_group.tables[num])
361 continue;
362
363 if (!pe->table_group.ops->set_window)
364 continue;
365
366 ret = pe->table_group.ops->set_window(&pe->table_group,
367 num, table_group->tables[num]);
368 if (ret)
369 break;
370 }
371 } else if (table_group->tables[num]) {
372 iommu_tce_table_put(table_group->tables[num]);
373 table_group->tables[num] = NULL;
374 }
375
376 return ret;
377 }
378
pnv_npu_peers_take_ownership(struct iommu_table_group * table_group)379 static void pnv_npu_peers_take_ownership(struct iommu_table_group *table_group)
380 {
381 int i;
382 struct npu_comp *npucomp = container_of(table_group, struct npu_comp,
383 table_group);
384
385 for (i = 0; i < npucomp->pe_num; ++i) {
386 struct pnv_ioda_pe *pe = npucomp->pe[i];
387
388 if (!pe->table_group.ops ||
389 !pe->table_group.ops->take_ownership)
390 continue;
391 pe->table_group.ops->take_ownership(&pe->table_group);
392 }
393 }
394
pnv_npu_peers_release_ownership(struct iommu_table_group * table_group)395 static void pnv_npu_peers_release_ownership(
396 struct iommu_table_group *table_group)
397 {
398 int i;
399 struct npu_comp *npucomp = container_of(table_group, struct npu_comp,
400 table_group);
401
402 for (i = 0; i < npucomp->pe_num; ++i) {
403 struct pnv_ioda_pe *pe = npucomp->pe[i];
404
405 if (!pe->table_group.ops ||
406 !pe->table_group.ops->release_ownership)
407 continue;
408 pe->table_group.ops->release_ownership(&pe->table_group);
409 }
410 }
411
412 static struct iommu_table_group_ops pnv_npu_peers_ops = {
413 .get_table_size = pnv_pci_ioda2_get_table_size,
414 .create_table = pnv_npu_peers_create_table_userspace,
415 .set_window = pnv_npu_peers_set_window,
416 .unset_window = pnv_npu_peers_unset_window,
417 .take_ownership = pnv_npu_peers_take_ownership,
418 .release_ownership = pnv_npu_peers_release_ownership,
419 };
420
pnv_comp_attach_table_group(struct npu_comp * npucomp,struct pnv_ioda_pe * pe)421 static void pnv_comp_attach_table_group(struct npu_comp *npucomp,
422 struct pnv_ioda_pe *pe)
423 {
424 if (WARN_ON(npucomp->pe_num == NV_NPU_MAX_PE_NUM))
425 return;
426
427 npucomp->pe[npucomp->pe_num] = pe;
428 ++npucomp->pe_num;
429 }
430
431 static struct iommu_table_group *
pnv_try_setup_npu_table_group(struct pnv_ioda_pe * pe)432 pnv_try_setup_npu_table_group(struct pnv_ioda_pe *pe)
433 {
434 struct iommu_table_group *compound_group;
435 struct npu_comp *npucomp;
436 struct pci_dev *gpdev = NULL;
437 struct pci_controller *hose;
438 struct pci_dev *npdev = NULL;
439
440 list_for_each_entry(gpdev, &pe->pbus->devices, bus_list) {
441 npdev = pnv_pci_get_npu_dev(gpdev, 0);
442 if (npdev)
443 break;
444 }
445
446 if (!npdev)
447 /* It is not an NPU attached device, skip */
448 return NULL;
449
450 hose = pci_bus_to_host(npdev->bus);
451
452 if (hose->npu) {
453 /* P9 case: compound group is per-NPU (all gpus, all links) */
454 npucomp = &hose->npu->npucomp;
455 } else {
456 /* P8 case: Compound group is per-GPU (1 gpu, 2 links) */
457 npucomp = pe->npucomp = kzalloc(sizeof(*npucomp), GFP_KERNEL);
458 }
459
460 compound_group = &npucomp->table_group;
461 if (!compound_group->group) {
462 compound_group->ops = &pnv_npu_peers_ops;
463 iommu_register_group(compound_group, hose->global_number,
464 pe->pe_number);
465
466 /* Steal capabilities from a GPU PE */
467 compound_group->max_dynamic_windows_supported =
468 pe->table_group.max_dynamic_windows_supported;
469 compound_group->tce32_start = pe->table_group.tce32_start;
470 compound_group->tce32_size = pe->table_group.tce32_size;
471 compound_group->max_levels = pe->table_group.max_levels;
472 if (!compound_group->pgsizes)
473 compound_group->pgsizes = pe->table_group.pgsizes;
474 }
475
476 /*
477 * The gpu would have been added to the iommu group that's created
478 * for the PE. Pull it out now.
479 */
480 iommu_del_device(&gpdev->dev);
481
482 /*
483 * I'm not sure this is strictly required, but it's probably a good idea
484 * since the table_group for the PE is going to be attached to the
485 * compound table group. If we leave the PE's iommu group active then
486 * we might have the same table_group being modifiable via two sepeate
487 * iommu groups.
488 */
489 iommu_group_put(pe->table_group.group);
490
491 /* now put the GPU into the compound group */
492 pnv_comp_attach_table_group(npucomp, pe);
493 iommu_add_device(compound_group, &gpdev->dev);
494
495 return compound_group;
496 }
497
pnv_npu_compound_attach(struct pnv_ioda_pe * pe)498 static struct iommu_table_group *pnv_npu_compound_attach(struct pnv_ioda_pe *pe)
499 {
500 struct iommu_table_group *table_group;
501 struct npu_comp *npucomp;
502 struct pci_dev *gpdev = NULL;
503 struct pci_dev *npdev;
504 struct pnv_ioda_pe *gpe = get_gpu_pci_dev_and_pe(pe, &gpdev);
505
506 WARN_ON(!(pe->flags & PNV_IODA_PE_DEV));
507 if (!gpe)
508 return NULL;
509
510 /*
511 * IODA2 bridges get this set up from pci_controller_ops::setup_bridge
512 * but NPU bridges do not have this hook defined so we do it here.
513 * We do not setup other table group parameters as they won't be used
514 * anyway - NVLink bridges are subordinate PEs.
515 */
516 pe->table_group.ops = &pnv_pci_npu_ops;
517
518 table_group = iommu_group_get_iommudata(
519 iommu_group_get(&gpdev->dev));
520
521 /*
522 * On P9 NPU PHB and PCI PHB support different page sizes,
523 * keep only matching. We expect here that NVLink bridge PE pgsizes is
524 * initialized by the caller.
525 */
526 table_group->pgsizes &= pe->table_group.pgsizes;
527 npucomp = container_of(table_group, struct npu_comp, table_group);
528 pnv_comp_attach_table_group(npucomp, pe);
529
530 list_for_each_entry(npdev, &pe->phb->hose->bus->devices, bus_list) {
531 struct pci_dev *gpdevtmp = pnv_pci_get_gpu_dev(npdev);
532
533 if (gpdevtmp != gpdev)
534 continue;
535
536 iommu_add_device(table_group, &npdev->dev);
537 }
538
539 return table_group;
540 }
541
pnv_pci_npu_setup_iommu_groups(void)542 void pnv_pci_npu_setup_iommu_groups(void)
543 {
544 struct pci_controller *hose;
545 struct pnv_phb *phb;
546 struct pnv_ioda_pe *pe;
547
548 /*
549 * For non-nvlink devices the IOMMU group is registered when the PE is
550 * configured and devices are added to the group when the per-device
551 * DMA setup is run. That's done in hose->ops.dma_dev_setup() which is
552 * only initialise for "normal" IODA PHBs.
553 *
554 * For NVLink devices we need to ensure the NVLinks and the GPU end up
555 * in the same IOMMU group, so that's handled here.
556 */
557 list_for_each_entry(hose, &hose_list, list_node) {
558 phb = hose->private_data;
559
560 if (phb->type == PNV_PHB_IODA2)
561 list_for_each_entry(pe, &phb->ioda.pe_list, list)
562 pnv_try_setup_npu_table_group(pe);
563 }
564
565 /*
566 * Now we have all PHBs discovered, time to add NPU devices to
567 * the corresponding IOMMU groups.
568 */
569 list_for_each_entry(hose, &hose_list, list_node) {
570 unsigned long pgsizes;
571
572 phb = hose->private_data;
573
574 if (phb->type != PNV_PHB_NPU_NVLINK)
575 continue;
576
577 pgsizes = pnv_ioda_parse_tce_sizes(phb);
578 list_for_each_entry(pe, &phb->ioda.pe_list, list) {
579 /*
580 * IODA2 bridges get this set up from
581 * pci_controller_ops::setup_bridge but NPU bridges
582 * do not have this hook defined so we do it here.
583 */
584 pe->table_group.pgsizes = pgsizes;
585 pnv_npu_compound_attach(pe);
586 }
587 }
588 }
589 #endif /* CONFIG_IOMMU_API */
590
pnv_npu2_init(struct pci_controller * hose)591 int pnv_npu2_init(struct pci_controller *hose)
592 {
593 static int npu_index;
594 struct npu *npu;
595 int ret;
596
597 npu = kzalloc(sizeof(*npu), GFP_KERNEL);
598 if (!npu)
599 return -ENOMEM;
600
601 npu_index++;
602 if (WARN_ON(npu_index >= NV_MAX_NPUS)) {
603 ret = -ENOSPC;
604 goto fail_exit;
605 }
606 npu->index = npu_index;
607 hose->npu = npu;
608
609 return 0;
610
611 fail_exit:
612 kfree(npu);
613 return ret;
614 }
615
pnv_npu2_map_lpar_dev(struct pci_dev * gpdev,unsigned int lparid,unsigned long msr)616 int pnv_npu2_map_lpar_dev(struct pci_dev *gpdev, unsigned int lparid,
617 unsigned long msr)
618 {
619 int ret;
620 struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
621 struct pci_controller *hose;
622 struct pnv_phb *nphb;
623
624 if (!npdev)
625 return -ENODEV;
626
627 hose = pci_bus_to_host(npdev->bus);
628 if (hose->npu == NULL) {
629 dev_info_once(&npdev->dev, "Nvlink1 does not support contexts");
630 return 0;
631 }
632
633 nphb = hose->private_data;
634
635 dev_dbg(&gpdev->dev, "Map LPAR opalid=%llu lparid=%u\n",
636 nphb->opal_id, lparid);
637 /*
638 * Currently we only support radix and non-zero LPCR only makes sense
639 * for hash tables so skiboot expects the LPCR parameter to be a zero.
640 */
641 ret = opal_npu_map_lpar(nphb->opal_id, pci_dev_id(gpdev), lparid,
642 0 /* LPCR bits */);
643 if (ret) {
644 dev_err(&gpdev->dev, "Error %d mapping device to LPAR\n", ret);
645 return ret;
646 }
647
648 dev_dbg(&gpdev->dev, "init context opalid=%llu msr=%lx\n",
649 nphb->opal_id, msr);
650 ret = opal_npu_init_context(nphb->opal_id, 0/*__unused*/, msr,
651 pci_dev_id(gpdev));
652 if (ret < 0)
653 dev_err(&gpdev->dev, "Failed to init context: %d\n", ret);
654 else
655 ret = 0;
656
657 return 0;
658 }
659 EXPORT_SYMBOL_GPL(pnv_npu2_map_lpar_dev);
660
pnv_npu2_map_lpar(struct pnv_ioda_pe * gpe,unsigned long msr)661 void pnv_npu2_map_lpar(struct pnv_ioda_pe *gpe, unsigned long msr)
662 {
663 struct pci_dev *gpdev;
664
665 list_for_each_entry(gpdev, &gpe->pbus->devices, bus_list)
666 pnv_npu2_map_lpar_dev(gpdev, 0, msr);
667 }
668
pnv_npu2_unmap_lpar_dev(struct pci_dev * gpdev)669 int pnv_npu2_unmap_lpar_dev(struct pci_dev *gpdev)
670 {
671 int ret;
672 struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
673 struct pci_controller *hose;
674 struct pnv_phb *nphb;
675
676 if (!npdev)
677 return -ENODEV;
678
679 hose = pci_bus_to_host(npdev->bus);
680 if (hose->npu == NULL) {
681 dev_info_once(&npdev->dev, "Nvlink1 does not support contexts");
682 return 0;
683 }
684
685 nphb = hose->private_data;
686
687 dev_dbg(&gpdev->dev, "destroy context opalid=%llu\n",
688 nphb->opal_id);
689 ret = opal_npu_destroy_context(nphb->opal_id, 0/*__unused*/,
690 pci_dev_id(gpdev));
691 if (ret < 0) {
692 dev_err(&gpdev->dev, "Failed to destroy context: %d\n", ret);
693 return ret;
694 }
695
696 /* Set LPID to 0 anyway, just to be safe */
697 dev_dbg(&gpdev->dev, "Map LPAR opalid=%llu lparid=0\n", nphb->opal_id);
698 ret = opal_npu_map_lpar(nphb->opal_id, pci_dev_id(gpdev), 0 /*LPID*/,
699 0 /* LPCR bits */);
700 if (ret)
701 dev_err(&gpdev->dev, "Error %d mapping device to LPAR\n", ret);
702
703 return ret;
704 }
705 EXPORT_SYMBOL_GPL(pnv_npu2_unmap_lpar_dev);
706