1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Support PCI/PCIe on PowerNV platforms
4 *
5 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
6 */
7
8 #undef DEBUG
9
10 #include <linux/kernel.h>
11 #include <linux/pci.h>
12 #include <linux/crash_dump.h>
13 #include <linux/delay.h>
14 #include <linux/string.h>
15 #include <linux/init.h>
16 #include <linux/memblock.h>
17 #include <linux/irq.h>
18 #include <linux/io.h>
19 #include <linux/msi.h>
20 #include <linux/iommu.h>
21 #include <linux/rculist.h>
22 #include <linux/sizes.h>
23 #include <linux/debugfs.h>
24
25 #include <asm/sections.h>
26 #include <asm/io.h>
27 #include <asm/prom.h>
28 #include <asm/pci-bridge.h>
29 #include <asm/machdep.h>
30 #include <asm/msi_bitmap.h>
31 #include <asm/ppc-pci.h>
32 #include <asm/opal.h>
33 #include <asm/iommu.h>
34 #include <asm/tce.h>
35 #include <asm/xics.h>
36 #include <asm/firmware.h>
37 #include <asm/pnv-pci.h>
38 #include <asm/mmzone.h>
39 #include <asm/xive.h>
40
41 #include <misc/cxl-base.h>
42
43 #include "powernv.h"
44 #include "pci.h"
45 #include "../../../../drivers/pci/pci.h"
46
47 #define PNV_IODA1_M64_NUM 16 /* Number of M64 BARs */
48 #define PNV_IODA1_M64_SEGS 8 /* Segments per M64 BAR */
49 #define PNV_IODA1_DMA32_SEGSIZE 0x10000000
50
51 static const char * const pnv_phb_names[] = { "IODA1", "IODA2", "NPU_OCAPI" };
52
53 static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable);
54 static void pnv_pci_configure_bus(struct pci_bus *bus);
55
pe_level_printk(const struct pnv_ioda_pe * pe,const char * level,const char * fmt,...)56 void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
57 const char *fmt, ...)
58 {
59 struct va_format vaf;
60 va_list args;
61 char pfix[32];
62
63 va_start(args, fmt);
64
65 vaf.fmt = fmt;
66 vaf.va = &args;
67
68 if (pe->flags & PNV_IODA_PE_DEV)
69 strlcpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix));
70 else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
71 sprintf(pfix, "%04x:%02x ",
72 pci_domain_nr(pe->pbus), pe->pbus->number);
73 #ifdef CONFIG_PCI_IOV
74 else if (pe->flags & PNV_IODA_PE_VF)
75 sprintf(pfix, "%04x:%02x:%2x.%d",
76 pci_domain_nr(pe->parent_dev->bus),
77 (pe->rid & 0xff00) >> 8,
78 PCI_SLOT(pe->rid), PCI_FUNC(pe->rid));
79 #endif /* CONFIG_PCI_IOV*/
80
81 printk("%spci %s: [PE# %.2x] %pV",
82 level, pfix, pe->pe_number, &vaf);
83
84 va_end(args);
85 }
86
87 static bool pnv_iommu_bypass_disabled __read_mostly;
88 static bool pci_reset_phbs __read_mostly;
89
iommu_setup(char * str)90 static int __init iommu_setup(char *str)
91 {
92 if (!str)
93 return -EINVAL;
94
95 while (*str) {
96 if (!strncmp(str, "nobypass", 8)) {
97 pnv_iommu_bypass_disabled = true;
98 pr_info("PowerNV: IOMMU bypass window disabled.\n");
99 break;
100 }
101 str += strcspn(str, ",");
102 if (*str == ',')
103 str++;
104 }
105
106 return 0;
107 }
108 early_param("iommu", iommu_setup);
109
pci_reset_phbs_setup(char * str)110 static int __init pci_reset_phbs_setup(char *str)
111 {
112 pci_reset_phbs = true;
113 return 0;
114 }
115
116 early_param("ppc_pci_reset_phbs", pci_reset_phbs_setup);
117
pnv_ioda_init_pe(struct pnv_phb * phb,int pe_no)118 static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no)
119 {
120 s64 rc;
121
122 phb->ioda.pe_array[pe_no].phb = phb;
123 phb->ioda.pe_array[pe_no].pe_number = pe_no;
124 phb->ioda.pe_array[pe_no].dma_setup_done = false;
125
126 /*
127 * Clear the PE frozen state as it might be put into frozen state
128 * in the last PCI remove path. It's not harmful to do so when the
129 * PE is already in unfrozen state.
130 */
131 rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
132 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
133 if (rc != OPAL_SUCCESS && rc != OPAL_UNSUPPORTED)
134 pr_warn("%s: Error %lld unfreezing PHB#%x-PE#%x\n",
135 __func__, rc, phb->hose->global_number, pe_no);
136
137 return &phb->ioda.pe_array[pe_no];
138 }
139
pnv_ioda_reserve_pe(struct pnv_phb * phb,int pe_no)140 static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no)
141 {
142 if (!(pe_no >= 0 && pe_no < phb->ioda.total_pe_num)) {
143 pr_warn("%s: Invalid PE %x on PHB#%x\n",
144 __func__, pe_no, phb->hose->global_number);
145 return;
146 }
147
148 mutex_lock(&phb->ioda.pe_alloc_mutex);
149 if (test_and_set_bit(pe_no, phb->ioda.pe_alloc))
150 pr_debug("%s: PE %x was reserved on PHB#%x\n",
151 __func__, pe_no, phb->hose->global_number);
152 mutex_unlock(&phb->ioda.pe_alloc_mutex);
153
154 pnv_ioda_init_pe(phb, pe_no);
155 }
156
pnv_ioda_alloc_pe(struct pnv_phb * phb,int count)157 struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb, int count)
158 {
159 struct pnv_ioda_pe *ret = NULL;
160 int run = 0, pe, i;
161
162 mutex_lock(&phb->ioda.pe_alloc_mutex);
163
164 /* scan backwards for a run of @count cleared bits */
165 for (pe = phb->ioda.total_pe_num - 1; pe >= 0; pe--) {
166 if (test_bit(pe, phb->ioda.pe_alloc)) {
167 run = 0;
168 continue;
169 }
170
171 run++;
172 if (run == count)
173 break;
174 }
175 if (run != count)
176 goto out;
177
178 for (i = pe; i < pe + count; i++) {
179 set_bit(i, phb->ioda.pe_alloc);
180 pnv_ioda_init_pe(phb, i);
181 }
182 ret = &phb->ioda.pe_array[pe];
183
184 out:
185 mutex_unlock(&phb->ioda.pe_alloc_mutex);
186 return ret;
187 }
188
pnv_ioda_free_pe(struct pnv_ioda_pe * pe)189 void pnv_ioda_free_pe(struct pnv_ioda_pe *pe)
190 {
191 struct pnv_phb *phb = pe->phb;
192 unsigned int pe_num = pe->pe_number;
193
194 WARN_ON(pe->pdev);
195 memset(pe, 0, sizeof(struct pnv_ioda_pe));
196
197 mutex_lock(&phb->ioda.pe_alloc_mutex);
198 clear_bit(pe_num, phb->ioda.pe_alloc);
199 mutex_unlock(&phb->ioda.pe_alloc_mutex);
200 }
201
202 /* The default M64 BAR is shared by all PEs */
pnv_ioda2_init_m64(struct pnv_phb * phb)203 static int pnv_ioda2_init_m64(struct pnv_phb *phb)
204 {
205 const char *desc;
206 struct resource *r;
207 s64 rc;
208
209 /* Configure the default M64 BAR */
210 rc = opal_pci_set_phb_mem_window(phb->opal_id,
211 OPAL_M64_WINDOW_TYPE,
212 phb->ioda.m64_bar_idx,
213 phb->ioda.m64_base,
214 0, /* unused */
215 phb->ioda.m64_size);
216 if (rc != OPAL_SUCCESS) {
217 desc = "configuring";
218 goto fail;
219 }
220
221 /* Enable the default M64 BAR */
222 rc = opal_pci_phb_mmio_enable(phb->opal_id,
223 OPAL_M64_WINDOW_TYPE,
224 phb->ioda.m64_bar_idx,
225 OPAL_ENABLE_M64_SPLIT);
226 if (rc != OPAL_SUCCESS) {
227 desc = "enabling";
228 goto fail;
229 }
230
231 /*
232 * Exclude the segments for reserved and root bus PE, which
233 * are first or last two PEs.
234 */
235 r = &phb->hose->mem_resources[1];
236 if (phb->ioda.reserved_pe_idx == 0)
237 r->start += (2 * phb->ioda.m64_segsize);
238 else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1))
239 r->end -= (2 * phb->ioda.m64_segsize);
240 else
241 pr_warn(" Cannot strip M64 segment for reserved PE#%x\n",
242 phb->ioda.reserved_pe_idx);
243
244 return 0;
245
246 fail:
247 pr_warn(" Failure %lld %s M64 BAR#%d\n",
248 rc, desc, phb->ioda.m64_bar_idx);
249 opal_pci_phb_mmio_enable(phb->opal_id,
250 OPAL_M64_WINDOW_TYPE,
251 phb->ioda.m64_bar_idx,
252 OPAL_DISABLE_M64);
253 return -EIO;
254 }
255
pnv_ioda_reserve_dev_m64_pe(struct pci_dev * pdev,unsigned long * pe_bitmap)256 static void pnv_ioda_reserve_dev_m64_pe(struct pci_dev *pdev,
257 unsigned long *pe_bitmap)
258 {
259 struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus);
260 struct resource *r;
261 resource_size_t base, sgsz, start, end;
262 int segno, i;
263
264 base = phb->ioda.m64_base;
265 sgsz = phb->ioda.m64_segsize;
266 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
267 r = &pdev->resource[i];
268 if (!r->parent || !pnv_pci_is_m64(phb, r))
269 continue;
270
271 start = ALIGN_DOWN(r->start - base, sgsz);
272 end = ALIGN(r->end - base, sgsz);
273 for (segno = start / sgsz; segno < end / sgsz; segno++) {
274 if (pe_bitmap)
275 set_bit(segno, pe_bitmap);
276 else
277 pnv_ioda_reserve_pe(phb, segno);
278 }
279 }
280 }
281
pnv_ioda1_init_m64(struct pnv_phb * phb)282 static int pnv_ioda1_init_m64(struct pnv_phb *phb)
283 {
284 struct resource *r;
285 int index;
286
287 /*
288 * There are 16 M64 BARs, each of which has 8 segments. So
289 * there are as many M64 segments as the maximum number of
290 * PEs, which is 128.
291 */
292 for (index = 0; index < PNV_IODA1_M64_NUM; index++) {
293 unsigned long base, segsz = phb->ioda.m64_segsize;
294 int64_t rc;
295
296 base = phb->ioda.m64_base +
297 index * PNV_IODA1_M64_SEGS * segsz;
298 rc = opal_pci_set_phb_mem_window(phb->opal_id,
299 OPAL_M64_WINDOW_TYPE, index, base, 0,
300 PNV_IODA1_M64_SEGS * segsz);
301 if (rc != OPAL_SUCCESS) {
302 pr_warn(" Error %lld setting M64 PHB#%x-BAR#%d\n",
303 rc, phb->hose->global_number, index);
304 goto fail;
305 }
306
307 rc = opal_pci_phb_mmio_enable(phb->opal_id,
308 OPAL_M64_WINDOW_TYPE, index,
309 OPAL_ENABLE_M64_SPLIT);
310 if (rc != OPAL_SUCCESS) {
311 pr_warn(" Error %lld enabling M64 PHB#%x-BAR#%d\n",
312 rc, phb->hose->global_number, index);
313 goto fail;
314 }
315 }
316
317 for (index = 0; index < phb->ioda.total_pe_num; index++) {
318 int64_t rc;
319
320 /*
321 * P7IOC supports M64DT, which helps mapping M64 segment
322 * to one particular PE#. However, PHB3 has fixed mapping
323 * between M64 segment and PE#. In order to have same logic
324 * for P7IOC and PHB3, we enforce fixed mapping between M64
325 * segment and PE# on P7IOC.
326 */
327 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
328 index, OPAL_M64_WINDOW_TYPE,
329 index / PNV_IODA1_M64_SEGS,
330 index % PNV_IODA1_M64_SEGS);
331 if (rc != OPAL_SUCCESS) {
332 pr_warn("%s: Error %lld mapping M64 for PHB#%x-PE#%x\n",
333 __func__, rc, phb->hose->global_number,
334 index);
335 goto fail;
336 }
337 }
338
339 /*
340 * Exclude the segments for reserved and root bus PE, which
341 * are first or last two PEs.
342 */
343 r = &phb->hose->mem_resources[1];
344 if (phb->ioda.reserved_pe_idx == 0)
345 r->start += (2 * phb->ioda.m64_segsize);
346 else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1))
347 r->end -= (2 * phb->ioda.m64_segsize);
348 else
349 WARN(1, "Wrong reserved PE#%x on PHB#%x\n",
350 phb->ioda.reserved_pe_idx, phb->hose->global_number);
351
352 return 0;
353
354 fail:
355 for ( ; index >= 0; index--)
356 opal_pci_phb_mmio_enable(phb->opal_id,
357 OPAL_M64_WINDOW_TYPE, index, OPAL_DISABLE_M64);
358
359 return -EIO;
360 }
361
pnv_ioda_reserve_m64_pe(struct pci_bus * bus,unsigned long * pe_bitmap,bool all)362 static void pnv_ioda_reserve_m64_pe(struct pci_bus *bus,
363 unsigned long *pe_bitmap,
364 bool all)
365 {
366 struct pci_dev *pdev;
367
368 list_for_each_entry(pdev, &bus->devices, bus_list) {
369 pnv_ioda_reserve_dev_m64_pe(pdev, pe_bitmap);
370
371 if (all && pdev->subordinate)
372 pnv_ioda_reserve_m64_pe(pdev->subordinate,
373 pe_bitmap, all);
374 }
375 }
376
pnv_ioda_pick_m64_pe(struct pci_bus * bus,bool all)377 static struct pnv_ioda_pe *pnv_ioda_pick_m64_pe(struct pci_bus *bus, bool all)
378 {
379 struct pnv_phb *phb = pci_bus_to_pnvhb(bus);
380 struct pnv_ioda_pe *master_pe, *pe;
381 unsigned long size, *pe_alloc;
382 int i;
383
384 /* Root bus shouldn't use M64 */
385 if (pci_is_root_bus(bus))
386 return NULL;
387
388 /* Allocate bitmap */
389 size = ALIGN(phb->ioda.total_pe_num / 8, sizeof(unsigned long));
390 pe_alloc = kzalloc(size, GFP_KERNEL);
391 if (!pe_alloc) {
392 pr_warn("%s: Out of memory !\n",
393 __func__);
394 return NULL;
395 }
396
397 /* Figure out reserved PE numbers by the PE */
398 pnv_ioda_reserve_m64_pe(bus, pe_alloc, all);
399
400 /*
401 * the current bus might not own M64 window and that's all
402 * contributed by its child buses. For the case, we needn't
403 * pick M64 dependent PE#.
404 */
405 if (bitmap_empty(pe_alloc, phb->ioda.total_pe_num)) {
406 kfree(pe_alloc);
407 return NULL;
408 }
409
410 /*
411 * Figure out the master PE and put all slave PEs to master
412 * PE's list to form compound PE.
413 */
414 master_pe = NULL;
415 i = -1;
416 while ((i = find_next_bit(pe_alloc, phb->ioda.total_pe_num, i + 1)) <
417 phb->ioda.total_pe_num) {
418 pe = &phb->ioda.pe_array[i];
419
420 phb->ioda.m64_segmap[pe->pe_number] = pe->pe_number;
421 if (!master_pe) {
422 pe->flags |= PNV_IODA_PE_MASTER;
423 INIT_LIST_HEAD(&pe->slaves);
424 master_pe = pe;
425 } else {
426 pe->flags |= PNV_IODA_PE_SLAVE;
427 pe->master = master_pe;
428 list_add_tail(&pe->list, &master_pe->slaves);
429 }
430 }
431
432 kfree(pe_alloc);
433 return master_pe;
434 }
435
pnv_ioda_parse_m64_window(struct pnv_phb * phb)436 static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
437 {
438 struct pci_controller *hose = phb->hose;
439 struct device_node *dn = hose->dn;
440 struct resource *res;
441 u32 m64_range[2], i;
442 const __be32 *r;
443 u64 pci_addr;
444
445 if (phb->type != PNV_PHB_IODA1 && phb->type != PNV_PHB_IODA2) {
446 pr_info(" Not support M64 window\n");
447 return;
448 }
449
450 if (!firmware_has_feature(FW_FEATURE_OPAL)) {
451 pr_info(" Firmware too old to support M64 window\n");
452 return;
453 }
454
455 r = of_get_property(dn, "ibm,opal-m64-window", NULL);
456 if (!r) {
457 pr_info(" No <ibm,opal-m64-window> on %pOF\n",
458 dn);
459 return;
460 }
461
462 /*
463 * Find the available M64 BAR range and pickup the last one for
464 * covering the whole 64-bits space. We support only one range.
465 */
466 if (of_property_read_u32_array(dn, "ibm,opal-available-m64-ranges",
467 m64_range, 2)) {
468 /* In absence of the property, assume 0..15 */
469 m64_range[0] = 0;
470 m64_range[1] = 16;
471 }
472 /* We only support 64 bits in our allocator */
473 if (m64_range[1] > 63) {
474 pr_warn("%s: Limiting M64 range to 63 (from %d) on PHB#%x\n",
475 __func__, m64_range[1], phb->hose->global_number);
476 m64_range[1] = 63;
477 }
478 /* Empty range, no m64 */
479 if (m64_range[1] <= m64_range[0]) {
480 pr_warn("%s: M64 empty, disabling M64 usage on PHB#%x\n",
481 __func__, phb->hose->global_number);
482 return;
483 }
484
485 /* Configure M64 informations */
486 res = &hose->mem_resources[1];
487 res->name = dn->full_name;
488 res->start = of_translate_address(dn, r + 2);
489 res->end = res->start + of_read_number(r + 4, 2) - 1;
490 res->flags = (IORESOURCE_MEM | IORESOURCE_MEM_64 | IORESOURCE_PREFETCH);
491 pci_addr = of_read_number(r, 2);
492 hose->mem_offset[1] = res->start - pci_addr;
493
494 phb->ioda.m64_size = resource_size(res);
495 phb->ioda.m64_segsize = phb->ioda.m64_size / phb->ioda.total_pe_num;
496 phb->ioda.m64_base = pci_addr;
497
498 /* This lines up nicely with the display from processing OF ranges */
499 pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx (M64 #%d..%d)\n",
500 res->start, res->end, pci_addr, m64_range[0],
501 m64_range[0] + m64_range[1] - 1);
502
503 /* Mark all M64 used up by default */
504 phb->ioda.m64_bar_alloc = (unsigned long)-1;
505
506 /* Use last M64 BAR to cover M64 window */
507 m64_range[1]--;
508 phb->ioda.m64_bar_idx = m64_range[0] + m64_range[1];
509
510 pr_info(" Using M64 #%d as default window\n", phb->ioda.m64_bar_idx);
511
512 /* Mark remaining ones free */
513 for (i = m64_range[0]; i < m64_range[1]; i++)
514 clear_bit(i, &phb->ioda.m64_bar_alloc);
515
516 /*
517 * Setup init functions for M64 based on IODA version, IODA3 uses
518 * the IODA2 code.
519 */
520 if (phb->type == PNV_PHB_IODA1)
521 phb->init_m64 = pnv_ioda1_init_m64;
522 else
523 phb->init_m64 = pnv_ioda2_init_m64;
524 }
525
pnv_ioda_freeze_pe(struct pnv_phb * phb,int pe_no)526 static void pnv_ioda_freeze_pe(struct pnv_phb *phb, int pe_no)
527 {
528 struct pnv_ioda_pe *pe = &phb->ioda.pe_array[pe_no];
529 struct pnv_ioda_pe *slave;
530 s64 rc;
531
532 /* Fetch master PE */
533 if (pe->flags & PNV_IODA_PE_SLAVE) {
534 pe = pe->master;
535 if (WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER)))
536 return;
537
538 pe_no = pe->pe_number;
539 }
540
541 /* Freeze master PE */
542 rc = opal_pci_eeh_freeze_set(phb->opal_id,
543 pe_no,
544 OPAL_EEH_ACTION_SET_FREEZE_ALL);
545 if (rc != OPAL_SUCCESS) {
546 pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n",
547 __func__, rc, phb->hose->global_number, pe_no);
548 return;
549 }
550
551 /* Freeze slave PEs */
552 if (!(pe->flags & PNV_IODA_PE_MASTER))
553 return;
554
555 list_for_each_entry(slave, &pe->slaves, list) {
556 rc = opal_pci_eeh_freeze_set(phb->opal_id,
557 slave->pe_number,
558 OPAL_EEH_ACTION_SET_FREEZE_ALL);
559 if (rc != OPAL_SUCCESS)
560 pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n",
561 __func__, rc, phb->hose->global_number,
562 slave->pe_number);
563 }
564 }
565
pnv_ioda_unfreeze_pe(struct pnv_phb * phb,int pe_no,int opt)566 static int pnv_ioda_unfreeze_pe(struct pnv_phb *phb, int pe_no, int opt)
567 {
568 struct pnv_ioda_pe *pe, *slave;
569 s64 rc;
570
571 /* Find master PE */
572 pe = &phb->ioda.pe_array[pe_no];
573 if (pe->flags & PNV_IODA_PE_SLAVE) {
574 pe = pe->master;
575 WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER));
576 pe_no = pe->pe_number;
577 }
578
579 /* Clear frozen state for master PE */
580 rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no, opt);
581 if (rc != OPAL_SUCCESS) {
582 pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n",
583 __func__, rc, opt, phb->hose->global_number, pe_no);
584 return -EIO;
585 }
586
587 if (!(pe->flags & PNV_IODA_PE_MASTER))
588 return 0;
589
590 /* Clear frozen state for slave PEs */
591 list_for_each_entry(slave, &pe->slaves, list) {
592 rc = opal_pci_eeh_freeze_clear(phb->opal_id,
593 slave->pe_number,
594 opt);
595 if (rc != OPAL_SUCCESS) {
596 pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n",
597 __func__, rc, opt, phb->hose->global_number,
598 slave->pe_number);
599 return -EIO;
600 }
601 }
602
603 return 0;
604 }
605
pnv_ioda_get_pe_state(struct pnv_phb * phb,int pe_no)606 static int pnv_ioda_get_pe_state(struct pnv_phb *phb, int pe_no)
607 {
608 struct pnv_ioda_pe *slave, *pe;
609 u8 fstate = 0, state;
610 __be16 pcierr = 0;
611 s64 rc;
612
613 /* Sanity check on PE number */
614 if (pe_no < 0 || pe_no >= phb->ioda.total_pe_num)
615 return OPAL_EEH_STOPPED_PERM_UNAVAIL;
616
617 /*
618 * Fetch the master PE and the PE instance might be
619 * not initialized yet.
620 */
621 pe = &phb->ioda.pe_array[pe_no];
622 if (pe->flags & PNV_IODA_PE_SLAVE) {
623 pe = pe->master;
624 WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER));
625 pe_no = pe->pe_number;
626 }
627
628 /* Check the master PE */
629 rc = opal_pci_eeh_freeze_status(phb->opal_id, pe_no,
630 &state, &pcierr, NULL);
631 if (rc != OPAL_SUCCESS) {
632 pr_warn("%s: Failure %lld getting "
633 "PHB#%x-PE#%x state\n",
634 __func__, rc,
635 phb->hose->global_number, pe_no);
636 return OPAL_EEH_STOPPED_TEMP_UNAVAIL;
637 }
638
639 /* Check the slave PE */
640 if (!(pe->flags & PNV_IODA_PE_MASTER))
641 return state;
642
643 list_for_each_entry(slave, &pe->slaves, list) {
644 rc = opal_pci_eeh_freeze_status(phb->opal_id,
645 slave->pe_number,
646 &fstate,
647 &pcierr,
648 NULL);
649 if (rc != OPAL_SUCCESS) {
650 pr_warn("%s: Failure %lld getting "
651 "PHB#%x-PE#%x state\n",
652 __func__, rc,
653 phb->hose->global_number, slave->pe_number);
654 return OPAL_EEH_STOPPED_TEMP_UNAVAIL;
655 }
656
657 /*
658 * Override the result based on the ascending
659 * priority.
660 */
661 if (fstate > state)
662 state = fstate;
663 }
664
665 return state;
666 }
667
pnv_pci_bdfn_to_pe(struct pnv_phb * phb,u16 bdfn)668 struct pnv_ioda_pe *pnv_pci_bdfn_to_pe(struct pnv_phb *phb, u16 bdfn)
669 {
670 int pe_number = phb->ioda.pe_rmap[bdfn];
671
672 if (pe_number == IODA_INVALID_PE)
673 return NULL;
674
675 return &phb->ioda.pe_array[pe_number];
676 }
677
pnv_ioda_get_pe(struct pci_dev * dev)678 struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
679 {
680 struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus);
681 struct pci_dn *pdn = pci_get_pdn(dev);
682
683 if (!pdn)
684 return NULL;
685 if (pdn->pe_number == IODA_INVALID_PE)
686 return NULL;
687 return &phb->ioda.pe_array[pdn->pe_number];
688 }
689
pnv_ioda_set_one_peltv(struct pnv_phb * phb,struct pnv_ioda_pe * parent,struct pnv_ioda_pe * child,bool is_add)690 static int pnv_ioda_set_one_peltv(struct pnv_phb *phb,
691 struct pnv_ioda_pe *parent,
692 struct pnv_ioda_pe *child,
693 bool is_add)
694 {
695 const char *desc = is_add ? "adding" : "removing";
696 uint8_t op = is_add ? OPAL_ADD_PE_TO_DOMAIN :
697 OPAL_REMOVE_PE_FROM_DOMAIN;
698 struct pnv_ioda_pe *slave;
699 long rc;
700
701 /* Parent PE affects child PE */
702 rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number,
703 child->pe_number, op);
704 if (rc != OPAL_SUCCESS) {
705 pe_warn(child, "OPAL error %ld %s to parent PELTV\n",
706 rc, desc);
707 return -ENXIO;
708 }
709
710 if (!(child->flags & PNV_IODA_PE_MASTER))
711 return 0;
712
713 /* Compound case: parent PE affects slave PEs */
714 list_for_each_entry(slave, &child->slaves, list) {
715 rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number,
716 slave->pe_number, op);
717 if (rc != OPAL_SUCCESS) {
718 pe_warn(slave, "OPAL error %ld %s to parent PELTV\n",
719 rc, desc);
720 return -ENXIO;
721 }
722 }
723
724 return 0;
725 }
726
pnv_ioda_set_peltv(struct pnv_phb * phb,struct pnv_ioda_pe * pe,bool is_add)727 static int pnv_ioda_set_peltv(struct pnv_phb *phb,
728 struct pnv_ioda_pe *pe,
729 bool is_add)
730 {
731 struct pnv_ioda_pe *slave;
732 struct pci_dev *pdev = NULL;
733 int ret;
734
735 /*
736 * Clear PE frozen state. If it's master PE, we need
737 * clear slave PE frozen state as well.
738 */
739 if (is_add) {
740 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
741 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
742 if (pe->flags & PNV_IODA_PE_MASTER) {
743 list_for_each_entry(slave, &pe->slaves, list)
744 opal_pci_eeh_freeze_clear(phb->opal_id,
745 slave->pe_number,
746 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
747 }
748 }
749
750 /*
751 * Associate PE in PELT. We need add the PE into the
752 * corresponding PELT-V as well. Otherwise, the error
753 * originated from the PE might contribute to other
754 * PEs.
755 */
756 ret = pnv_ioda_set_one_peltv(phb, pe, pe, is_add);
757 if (ret)
758 return ret;
759
760 /* For compound PEs, any one affects all of them */
761 if (pe->flags & PNV_IODA_PE_MASTER) {
762 list_for_each_entry(slave, &pe->slaves, list) {
763 ret = pnv_ioda_set_one_peltv(phb, slave, pe, is_add);
764 if (ret)
765 return ret;
766 }
767 }
768
769 if (pe->flags & (PNV_IODA_PE_BUS_ALL | PNV_IODA_PE_BUS))
770 pdev = pe->pbus->self;
771 else if (pe->flags & PNV_IODA_PE_DEV)
772 pdev = pe->pdev->bus->self;
773 #ifdef CONFIG_PCI_IOV
774 else if (pe->flags & PNV_IODA_PE_VF)
775 pdev = pe->parent_dev;
776 #endif /* CONFIG_PCI_IOV */
777 while (pdev) {
778 struct pci_dn *pdn = pci_get_pdn(pdev);
779 struct pnv_ioda_pe *parent;
780
781 if (pdn && pdn->pe_number != IODA_INVALID_PE) {
782 parent = &phb->ioda.pe_array[pdn->pe_number];
783 ret = pnv_ioda_set_one_peltv(phb, parent, pe, is_add);
784 if (ret)
785 return ret;
786 }
787
788 pdev = pdev->bus->self;
789 }
790
791 return 0;
792 }
793
pnv_ioda_unset_peltv(struct pnv_phb * phb,struct pnv_ioda_pe * pe,struct pci_dev * parent)794 static void pnv_ioda_unset_peltv(struct pnv_phb *phb,
795 struct pnv_ioda_pe *pe,
796 struct pci_dev *parent)
797 {
798 int64_t rc;
799
800 while (parent) {
801 struct pci_dn *pdn = pci_get_pdn(parent);
802
803 if (pdn && pdn->pe_number != IODA_INVALID_PE) {
804 rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
805 pe->pe_number,
806 OPAL_REMOVE_PE_FROM_DOMAIN);
807 /* XXX What to do in case of error ? */
808 }
809 parent = parent->bus->self;
810 }
811
812 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
813 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
814
815 /* Disassociate PE in PELT */
816 rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number,
817 pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
818 if (rc)
819 pe_warn(pe, "OPAL error %lld remove self from PELTV\n", rc);
820 }
821
pnv_ioda_deconfigure_pe(struct pnv_phb * phb,struct pnv_ioda_pe * pe)822 int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
823 {
824 struct pci_dev *parent;
825 uint8_t bcomp, dcomp, fcomp;
826 int64_t rc;
827 long rid_end, rid;
828
829 /* Currently, we just deconfigure VF PE. Bus PE will always there.*/
830 if (pe->pbus) {
831 int count;
832
833 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
834 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
835 parent = pe->pbus->self;
836 if (pe->flags & PNV_IODA_PE_BUS_ALL)
837 count = resource_size(&pe->pbus->busn_res);
838 else
839 count = 1;
840
841 switch(count) {
842 case 1: bcomp = OpalPciBusAll; break;
843 case 2: bcomp = OpalPciBus7Bits; break;
844 case 4: bcomp = OpalPciBus6Bits; break;
845 case 8: bcomp = OpalPciBus5Bits; break;
846 case 16: bcomp = OpalPciBus4Bits; break;
847 case 32: bcomp = OpalPciBus3Bits; break;
848 default:
849 dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n",
850 count);
851 /* Do an exact match only */
852 bcomp = OpalPciBusAll;
853 }
854 rid_end = pe->rid + (count << 8);
855 } else {
856 #ifdef CONFIG_PCI_IOV
857 if (pe->flags & PNV_IODA_PE_VF)
858 parent = pe->parent_dev;
859 else
860 #endif
861 parent = pe->pdev->bus->self;
862 bcomp = OpalPciBusAll;
863 dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
864 fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
865 rid_end = pe->rid + 1;
866 }
867
868 /* Clear the reverse map */
869 for (rid = pe->rid; rid < rid_end; rid++)
870 phb->ioda.pe_rmap[rid] = IODA_INVALID_PE;
871
872 /*
873 * Release from all parents PELT-V. NPUs don't have a PELTV
874 * table
875 */
876 if (phb->type != PNV_PHB_NPU_OCAPI)
877 pnv_ioda_unset_peltv(phb, pe, parent);
878
879 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
880 bcomp, dcomp, fcomp, OPAL_UNMAP_PE);
881 if (rc)
882 pe_err(pe, "OPAL error %lld trying to setup PELT table\n", rc);
883
884 pe->pbus = NULL;
885 pe->pdev = NULL;
886 #ifdef CONFIG_PCI_IOV
887 pe->parent_dev = NULL;
888 #endif
889
890 return 0;
891 }
892
pnv_ioda_configure_pe(struct pnv_phb * phb,struct pnv_ioda_pe * pe)893 int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
894 {
895 uint8_t bcomp, dcomp, fcomp;
896 long rc, rid_end, rid;
897
898 /* Bus validation ? */
899 if (pe->pbus) {
900 int count;
901
902 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
903 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
904 if (pe->flags & PNV_IODA_PE_BUS_ALL)
905 count = resource_size(&pe->pbus->busn_res);
906 else
907 count = 1;
908
909 switch(count) {
910 case 1: bcomp = OpalPciBusAll; break;
911 case 2: bcomp = OpalPciBus7Bits; break;
912 case 4: bcomp = OpalPciBus6Bits; break;
913 case 8: bcomp = OpalPciBus5Bits; break;
914 case 16: bcomp = OpalPciBus4Bits; break;
915 case 32: bcomp = OpalPciBus3Bits; break;
916 default:
917 dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n",
918 count);
919 /* Do an exact match only */
920 bcomp = OpalPciBusAll;
921 }
922 rid_end = pe->rid + (count << 8);
923 } else {
924 bcomp = OpalPciBusAll;
925 dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
926 fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
927 rid_end = pe->rid + 1;
928 }
929
930 /*
931 * Associate PE in PELT. We need add the PE into the
932 * corresponding PELT-V as well. Otherwise, the error
933 * originated from the PE might contribute to other
934 * PEs.
935 */
936 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
937 bcomp, dcomp, fcomp, OPAL_MAP_PE);
938 if (rc) {
939 pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
940 return -ENXIO;
941 }
942
943 /*
944 * Configure PELTV. NPUs don't have a PELTV table so skip
945 * configuration on them.
946 */
947 if (phb->type != PNV_PHB_NPU_OCAPI)
948 pnv_ioda_set_peltv(phb, pe, true);
949
950 /* Setup reverse map */
951 for (rid = pe->rid; rid < rid_end; rid++)
952 phb->ioda.pe_rmap[rid] = pe->pe_number;
953
954 /* Setup one MVTs on IODA1 */
955 if (phb->type != PNV_PHB_IODA1) {
956 pe->mve_number = 0;
957 goto out;
958 }
959
960 pe->mve_number = pe->pe_number;
961 rc = opal_pci_set_mve(phb->opal_id, pe->mve_number, pe->pe_number);
962 if (rc != OPAL_SUCCESS) {
963 pe_err(pe, "OPAL error %ld setting up MVE %x\n",
964 rc, pe->mve_number);
965 pe->mve_number = -1;
966 } else {
967 rc = opal_pci_set_mve_enable(phb->opal_id,
968 pe->mve_number, OPAL_ENABLE_MVE);
969 if (rc) {
970 pe_err(pe, "OPAL error %ld enabling MVE %x\n",
971 rc, pe->mve_number);
972 pe->mve_number = -1;
973 }
974 }
975
976 out:
977 return 0;
978 }
979
pnv_ioda_setup_dev_PE(struct pci_dev * dev)980 static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
981 {
982 struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus);
983 struct pci_dn *pdn = pci_get_pdn(dev);
984 struct pnv_ioda_pe *pe;
985
986 if (!pdn) {
987 pr_err("%s: Device tree node not associated properly\n",
988 pci_name(dev));
989 return NULL;
990 }
991 if (pdn->pe_number != IODA_INVALID_PE)
992 return NULL;
993
994 pe = pnv_ioda_alloc_pe(phb, 1);
995 if (!pe) {
996 pr_warn("%s: Not enough PE# available, disabling device\n",
997 pci_name(dev));
998 return NULL;
999 }
1000
1001 /* NOTE: We don't get a reference for the pointer in the PE
1002 * data structure, both the device and PE structures should be
1003 * destroyed at the same time.
1004 *
1005 * At some point we want to remove the PDN completely anyways
1006 */
1007 pdn->pe_number = pe->pe_number;
1008 pe->flags = PNV_IODA_PE_DEV;
1009 pe->pdev = dev;
1010 pe->pbus = NULL;
1011 pe->mve_number = -1;
1012 pe->rid = dev->bus->number << 8 | pdn->devfn;
1013 pe->device_count++;
1014
1015 pe_info(pe, "Associated device to PE\n");
1016
1017 if (pnv_ioda_configure_pe(phb, pe)) {
1018 /* XXX What do we do here ? */
1019 pnv_ioda_free_pe(pe);
1020 pdn->pe_number = IODA_INVALID_PE;
1021 pe->pdev = NULL;
1022 return NULL;
1023 }
1024
1025 /* Put PE to the list */
1026 mutex_lock(&phb->ioda.pe_list_mutex);
1027 list_add_tail(&pe->list, &phb->ioda.pe_list);
1028 mutex_unlock(&phb->ioda.pe_list_mutex);
1029 return pe;
1030 }
1031
1032 /*
1033 * There're 2 types of PCI bus sensitive PEs: One that is compromised of
1034 * single PCI bus. Another one that contains the primary PCI bus and its
1035 * subordinate PCI devices and buses. The second type of PE is normally
1036 * orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports.
1037 */
pnv_ioda_setup_bus_PE(struct pci_bus * bus,bool all)1038 static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all)
1039 {
1040 struct pnv_phb *phb = pci_bus_to_pnvhb(bus);
1041 struct pnv_ioda_pe *pe = NULL;
1042 unsigned int pe_num;
1043
1044 /*
1045 * In partial hotplug case, the PE instance might be still alive.
1046 * We should reuse it instead of allocating a new one.
1047 */
1048 pe_num = phb->ioda.pe_rmap[bus->number << 8];
1049 if (WARN_ON(pe_num != IODA_INVALID_PE)) {
1050 pe = &phb->ioda.pe_array[pe_num];
1051 return NULL;
1052 }
1053
1054 /* PE number for root bus should have been reserved */
1055 if (pci_is_root_bus(bus))
1056 pe = &phb->ioda.pe_array[phb->ioda.root_pe_idx];
1057
1058 /* Check if PE is determined by M64 */
1059 if (!pe)
1060 pe = pnv_ioda_pick_m64_pe(bus, all);
1061
1062 /* The PE number isn't pinned by M64 */
1063 if (!pe)
1064 pe = pnv_ioda_alloc_pe(phb, 1);
1065
1066 if (!pe) {
1067 pr_warn("%s: Not enough PE# available for PCI bus %04x:%02x\n",
1068 __func__, pci_domain_nr(bus), bus->number);
1069 return NULL;
1070 }
1071
1072 pe->flags |= (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS);
1073 pe->pbus = bus;
1074 pe->pdev = NULL;
1075 pe->mve_number = -1;
1076 pe->rid = bus->busn_res.start << 8;
1077
1078 if (all)
1079 pe_info(pe, "Secondary bus %pad..%pad associated with PE#%x\n",
1080 &bus->busn_res.start, &bus->busn_res.end,
1081 pe->pe_number);
1082 else
1083 pe_info(pe, "Secondary bus %pad associated with PE#%x\n",
1084 &bus->busn_res.start, pe->pe_number);
1085
1086 if (pnv_ioda_configure_pe(phb, pe)) {
1087 /* XXX What do we do here ? */
1088 pnv_ioda_free_pe(pe);
1089 pe->pbus = NULL;
1090 return NULL;
1091 }
1092
1093 /* Put PE to the list */
1094 list_add_tail(&pe->list, &phb->ioda.pe_list);
1095
1096 return pe;
1097 }
1098
1099 static void pnv_pci_ioda1_setup_dma_pe(struct pnv_phb *phb,
1100 struct pnv_ioda_pe *pe);
1101
pnv_pci_ioda_dma_dev_setup(struct pci_dev * pdev)1102 static void pnv_pci_ioda_dma_dev_setup(struct pci_dev *pdev)
1103 {
1104 struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus);
1105 struct pci_dn *pdn = pci_get_pdn(pdev);
1106 struct pnv_ioda_pe *pe;
1107
1108 /* Check if the BDFN for this device is associated with a PE yet */
1109 pe = pnv_pci_bdfn_to_pe(phb, pdev->devfn | (pdev->bus->number << 8));
1110 if (!pe) {
1111 /* VF PEs should be pre-configured in pnv_pci_sriov_enable() */
1112 if (WARN_ON(pdev->is_virtfn))
1113 return;
1114
1115 pnv_pci_configure_bus(pdev->bus);
1116 pe = pnv_pci_bdfn_to_pe(phb, pdev->devfn | (pdev->bus->number << 8));
1117 pci_info(pdev, "Configured PE#%x\n", pe ? pe->pe_number : 0xfffff);
1118
1119
1120 /*
1121 * If we can't setup the IODA PE something has gone horribly
1122 * wrong and we can't enable DMA for the device.
1123 */
1124 if (WARN_ON(!pe))
1125 return;
1126 } else {
1127 pci_info(pdev, "Added to existing PE#%x\n", pe->pe_number);
1128 }
1129
1130 /*
1131 * We assume that bridges *probably* don't need to do any DMA so we can
1132 * skip allocating a TCE table, etc unless we get a non-bridge device.
1133 */
1134 if (!pe->dma_setup_done && !pci_is_bridge(pdev)) {
1135 switch (phb->type) {
1136 case PNV_PHB_IODA1:
1137 pnv_pci_ioda1_setup_dma_pe(phb, pe);
1138 break;
1139 case PNV_PHB_IODA2:
1140 pnv_pci_ioda2_setup_dma_pe(phb, pe);
1141 break;
1142 default:
1143 pr_warn("%s: No DMA for PHB#%x (type %d)\n",
1144 __func__, phb->hose->global_number, phb->type);
1145 }
1146 }
1147
1148 if (pdn)
1149 pdn->pe_number = pe->pe_number;
1150 pe->device_count++;
1151
1152 WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
1153 pdev->dev.archdata.dma_offset = pe->tce_bypass_base;
1154 set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]);
1155
1156 /* PEs with a DMA weight of zero won't have a group */
1157 if (pe->table_group.group)
1158 iommu_add_device(&pe->table_group, &pdev->dev);
1159 }
1160
1161 /*
1162 * Reconfigure TVE#0 to be usable as 64-bit DMA space.
1163 *
1164 * The first 4GB of virtual memory for a PE is reserved for 32-bit accesses.
1165 * Devices can only access more than that if bit 59 of the PCI address is set
1166 * by hardware, which indicates TVE#1 should be used instead of TVE#0.
1167 * Many PCI devices are not capable of addressing that many bits, and as a
1168 * result are limited to the 4GB of virtual memory made available to 32-bit
1169 * devices in TVE#0.
1170 *
1171 * In order to work around this, reconfigure TVE#0 to be suitable for 64-bit
1172 * devices by configuring the virtual memory past the first 4GB inaccessible
1173 * by 64-bit DMAs. This should only be used by devices that want more than
1174 * 4GB, and only on PEs that have no 32-bit devices.
1175 *
1176 * Currently this will only work on PHB3 (POWER8).
1177 */
pnv_pci_ioda_dma_64bit_bypass(struct pnv_ioda_pe * pe)1178 static int pnv_pci_ioda_dma_64bit_bypass(struct pnv_ioda_pe *pe)
1179 {
1180 u64 window_size, table_size, tce_count, addr;
1181 struct page *table_pages;
1182 u64 tce_order = 28; /* 256MB TCEs */
1183 __be64 *tces;
1184 s64 rc;
1185
1186 /*
1187 * Window size needs to be a power of two, but needs to account for
1188 * shifting memory by the 4GB offset required to skip 32bit space.
1189 */
1190 window_size = roundup_pow_of_two(memory_hotplug_max() + (1ULL << 32));
1191 tce_count = window_size >> tce_order;
1192 table_size = tce_count << 3;
1193
1194 if (table_size < PAGE_SIZE)
1195 table_size = PAGE_SIZE;
1196
1197 table_pages = alloc_pages_node(pe->phb->hose->node, GFP_KERNEL,
1198 get_order(table_size));
1199 if (!table_pages)
1200 goto err;
1201
1202 tces = page_address(table_pages);
1203 if (!tces)
1204 goto err;
1205
1206 memset(tces, 0, table_size);
1207
1208 for (addr = 0; addr < memory_hotplug_max(); addr += (1 << tce_order)) {
1209 tces[(addr + (1ULL << 32)) >> tce_order] =
1210 cpu_to_be64(addr | TCE_PCI_READ | TCE_PCI_WRITE);
1211 }
1212
1213 rc = opal_pci_map_pe_dma_window(pe->phb->opal_id,
1214 pe->pe_number,
1215 /* reconfigure window 0 */
1216 (pe->pe_number << 1) + 0,
1217 1,
1218 __pa(tces),
1219 table_size,
1220 1 << tce_order);
1221 if (rc == OPAL_SUCCESS) {
1222 pe_info(pe, "Using 64-bit DMA iommu bypass (through TVE#0)\n");
1223 return 0;
1224 }
1225 err:
1226 pe_err(pe, "Error configuring 64-bit DMA bypass\n");
1227 return -EIO;
1228 }
1229
pnv_pci_ioda_iommu_bypass_supported(struct pci_dev * pdev,u64 dma_mask)1230 static bool pnv_pci_ioda_iommu_bypass_supported(struct pci_dev *pdev,
1231 u64 dma_mask)
1232 {
1233 struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus);
1234 struct pci_dn *pdn = pci_get_pdn(pdev);
1235 struct pnv_ioda_pe *pe;
1236
1237 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
1238 return false;
1239
1240 pe = &phb->ioda.pe_array[pdn->pe_number];
1241 if (pe->tce_bypass_enabled) {
1242 u64 top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1;
1243 if (dma_mask >= top)
1244 return true;
1245 }
1246
1247 /*
1248 * If the device can't set the TCE bypass bit but still wants
1249 * to access 4GB or more, on PHB3 we can reconfigure TVE#0 to
1250 * bypass the 32-bit region and be usable for 64-bit DMAs.
1251 * The device needs to be able to address all of this space.
1252 */
1253 if (dma_mask >> 32 &&
1254 dma_mask > (memory_hotplug_max() + (1ULL << 32)) &&
1255 /* pe->pdev should be set if it's a single device, pe->pbus if not */
1256 (pe->device_count == 1 || !pe->pbus) &&
1257 phb->model == PNV_PHB_MODEL_PHB3) {
1258 /* Configure the bypass mode */
1259 s64 rc = pnv_pci_ioda_dma_64bit_bypass(pe);
1260 if (rc)
1261 return false;
1262 /* 4GB offset bypasses 32-bit space */
1263 pdev->dev.archdata.dma_offset = (1ULL << 32);
1264 return true;
1265 }
1266
1267 return false;
1268 }
1269
pnv_ioda_get_inval_reg(struct pnv_phb * phb,bool real_mode)1270 static inline __be64 __iomem *pnv_ioda_get_inval_reg(struct pnv_phb *phb,
1271 bool real_mode)
1272 {
1273 return real_mode ? (__be64 __iomem *)(phb->regs_phys + 0x210) :
1274 (phb->regs + 0x210);
1275 }
1276
pnv_pci_p7ioc_tce_invalidate(struct iommu_table * tbl,unsigned long index,unsigned long npages,bool rm)1277 static void pnv_pci_p7ioc_tce_invalidate(struct iommu_table *tbl,
1278 unsigned long index, unsigned long npages, bool rm)
1279 {
1280 struct iommu_table_group_link *tgl = list_first_entry_or_null(
1281 &tbl->it_group_list, struct iommu_table_group_link,
1282 next);
1283 struct pnv_ioda_pe *pe = container_of(tgl->table_group,
1284 struct pnv_ioda_pe, table_group);
1285 __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm);
1286 unsigned long start, end, inc;
1287
1288 start = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset);
1289 end = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset +
1290 npages - 1);
1291
1292 /* p7ioc-style invalidation, 2 TCEs per write */
1293 start |= (1ull << 63);
1294 end |= (1ull << 63);
1295 inc = 16;
1296 end |= inc - 1; /* round up end to be different than start */
1297
1298 mb(); /* Ensure above stores are visible */
1299 while (start <= end) {
1300 if (rm)
1301 __raw_rm_writeq_be(start, invalidate);
1302 else
1303 __raw_writeq_be(start, invalidate);
1304
1305 start += inc;
1306 }
1307
1308 /*
1309 * The iommu layer will do another mb() for us on build()
1310 * and we don't care on free()
1311 */
1312 }
1313
pnv_ioda1_tce_build(struct iommu_table * tbl,long index,long npages,unsigned long uaddr,enum dma_data_direction direction,unsigned long attrs)1314 static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index,
1315 long npages, unsigned long uaddr,
1316 enum dma_data_direction direction,
1317 unsigned long attrs)
1318 {
1319 int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
1320 attrs);
1321
1322 if (!ret)
1323 pnv_pci_p7ioc_tce_invalidate(tbl, index, npages, false);
1324
1325 return ret;
1326 }
1327
1328 #ifdef CONFIG_IOMMU_API
1329 /* Common for IODA1 and IODA2 */
pnv_ioda_tce_xchg_no_kill(struct iommu_table * tbl,long index,unsigned long * hpa,enum dma_data_direction * direction,bool realmode)1330 static int pnv_ioda_tce_xchg_no_kill(struct iommu_table *tbl, long index,
1331 unsigned long *hpa, enum dma_data_direction *direction,
1332 bool realmode)
1333 {
1334 return pnv_tce_xchg(tbl, index, hpa, direction, !realmode);
1335 }
1336 #endif
1337
pnv_ioda1_tce_free(struct iommu_table * tbl,long index,long npages)1338 static void pnv_ioda1_tce_free(struct iommu_table *tbl, long index,
1339 long npages)
1340 {
1341 pnv_tce_free(tbl, index, npages);
1342
1343 pnv_pci_p7ioc_tce_invalidate(tbl, index, npages, false);
1344 }
1345
1346 static struct iommu_table_ops pnv_ioda1_iommu_ops = {
1347 .set = pnv_ioda1_tce_build,
1348 #ifdef CONFIG_IOMMU_API
1349 .xchg_no_kill = pnv_ioda_tce_xchg_no_kill,
1350 .tce_kill = pnv_pci_p7ioc_tce_invalidate,
1351 .useraddrptr = pnv_tce_useraddrptr,
1352 #endif
1353 .clear = pnv_ioda1_tce_free,
1354 .get = pnv_tce_get,
1355 };
1356
1357 #define PHB3_TCE_KILL_INVAL_ALL PPC_BIT(0)
1358 #define PHB3_TCE_KILL_INVAL_PE PPC_BIT(1)
1359 #define PHB3_TCE_KILL_INVAL_ONE PPC_BIT(2)
1360
pnv_pci_phb3_tce_invalidate_pe(struct pnv_ioda_pe * pe)1361 static inline void pnv_pci_phb3_tce_invalidate_pe(struct pnv_ioda_pe *pe)
1362 {
1363 /* 01xb - invalidate TCEs that match the specified PE# */
1364 __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, false);
1365 unsigned long val = PHB3_TCE_KILL_INVAL_PE | (pe->pe_number & 0xFF);
1366
1367 mb(); /* Ensure above stores are visible */
1368 __raw_writeq_be(val, invalidate);
1369 }
1370
pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe * pe,bool rm,unsigned shift,unsigned long index,unsigned long npages)1371 static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, bool rm,
1372 unsigned shift, unsigned long index,
1373 unsigned long npages)
1374 {
1375 __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm);
1376 unsigned long start, end, inc;
1377
1378 /* We'll invalidate DMA address in PE scope */
1379 start = PHB3_TCE_KILL_INVAL_ONE;
1380 start |= (pe->pe_number & 0xFF);
1381 end = start;
1382
1383 /* Figure out the start, end and step */
1384 start |= (index << shift);
1385 end |= ((index + npages - 1) << shift);
1386 inc = (0x1ull << shift);
1387 mb();
1388
1389 while (start <= end) {
1390 if (rm)
1391 __raw_rm_writeq_be(start, invalidate);
1392 else
1393 __raw_writeq_be(start, invalidate);
1394 start += inc;
1395 }
1396 }
1397
pnv_pci_ioda2_tce_invalidate_pe(struct pnv_ioda_pe * pe)1398 static inline void pnv_pci_ioda2_tce_invalidate_pe(struct pnv_ioda_pe *pe)
1399 {
1400 struct pnv_phb *phb = pe->phb;
1401
1402 if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs)
1403 pnv_pci_phb3_tce_invalidate_pe(pe);
1404 else
1405 opal_pci_tce_kill(phb->opal_id, OPAL_PCI_TCE_KILL_PE,
1406 pe->pe_number, 0, 0, 0);
1407 }
1408
pnv_pci_ioda2_tce_invalidate(struct iommu_table * tbl,unsigned long index,unsigned long npages,bool rm)1409 static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
1410 unsigned long index, unsigned long npages, bool rm)
1411 {
1412 struct iommu_table_group_link *tgl;
1413
1414 list_for_each_entry_lockless(tgl, &tbl->it_group_list, next) {
1415 struct pnv_ioda_pe *pe = container_of(tgl->table_group,
1416 struct pnv_ioda_pe, table_group);
1417 struct pnv_phb *phb = pe->phb;
1418 unsigned int shift = tbl->it_page_shift;
1419
1420 if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs)
1421 pnv_pci_phb3_tce_invalidate(pe, rm, shift,
1422 index, npages);
1423 else
1424 opal_pci_tce_kill(phb->opal_id,
1425 OPAL_PCI_TCE_KILL_PAGES,
1426 pe->pe_number, 1u << shift,
1427 index << shift, npages);
1428 }
1429 }
1430
pnv_ioda2_tce_build(struct iommu_table * tbl,long index,long npages,unsigned long uaddr,enum dma_data_direction direction,unsigned long attrs)1431 static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index,
1432 long npages, unsigned long uaddr,
1433 enum dma_data_direction direction,
1434 unsigned long attrs)
1435 {
1436 int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
1437 attrs);
1438
1439 if (!ret)
1440 pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
1441
1442 return ret;
1443 }
1444
pnv_ioda2_tce_free(struct iommu_table * tbl,long index,long npages)1445 static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index,
1446 long npages)
1447 {
1448 pnv_tce_free(tbl, index, npages);
1449
1450 pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
1451 }
1452
1453 static struct iommu_table_ops pnv_ioda2_iommu_ops = {
1454 .set = pnv_ioda2_tce_build,
1455 #ifdef CONFIG_IOMMU_API
1456 .xchg_no_kill = pnv_ioda_tce_xchg_no_kill,
1457 .tce_kill = pnv_pci_ioda2_tce_invalidate,
1458 .useraddrptr = pnv_tce_useraddrptr,
1459 #endif
1460 .clear = pnv_ioda2_tce_free,
1461 .get = pnv_tce_get,
1462 .free = pnv_pci_ioda2_table_free_pages,
1463 };
1464
pnv_pci_ioda_dev_dma_weight(struct pci_dev * dev,void * data)1465 static int pnv_pci_ioda_dev_dma_weight(struct pci_dev *dev, void *data)
1466 {
1467 unsigned int *weight = (unsigned int *)data;
1468
1469 /* This is quite simplistic. The "base" weight of a device
1470 * is 10. 0 means no DMA is to be accounted for it.
1471 */
1472 if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL)
1473 return 0;
1474
1475 if (dev->class == PCI_CLASS_SERIAL_USB_UHCI ||
1476 dev->class == PCI_CLASS_SERIAL_USB_OHCI ||
1477 dev->class == PCI_CLASS_SERIAL_USB_EHCI)
1478 *weight += 3;
1479 else if ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID)
1480 *weight += 15;
1481 else
1482 *weight += 10;
1483
1484 return 0;
1485 }
1486
pnv_pci_ioda_pe_dma_weight(struct pnv_ioda_pe * pe)1487 static unsigned int pnv_pci_ioda_pe_dma_weight(struct pnv_ioda_pe *pe)
1488 {
1489 unsigned int weight = 0;
1490
1491 /* SRIOV VF has same DMA32 weight as its PF */
1492 #ifdef CONFIG_PCI_IOV
1493 if ((pe->flags & PNV_IODA_PE_VF) && pe->parent_dev) {
1494 pnv_pci_ioda_dev_dma_weight(pe->parent_dev, &weight);
1495 return weight;
1496 }
1497 #endif
1498
1499 if ((pe->flags & PNV_IODA_PE_DEV) && pe->pdev) {
1500 pnv_pci_ioda_dev_dma_weight(pe->pdev, &weight);
1501 } else if ((pe->flags & PNV_IODA_PE_BUS) && pe->pbus) {
1502 struct pci_dev *pdev;
1503
1504 list_for_each_entry(pdev, &pe->pbus->devices, bus_list)
1505 pnv_pci_ioda_dev_dma_weight(pdev, &weight);
1506 } else if ((pe->flags & PNV_IODA_PE_BUS_ALL) && pe->pbus) {
1507 pci_walk_bus(pe->pbus, pnv_pci_ioda_dev_dma_weight, &weight);
1508 }
1509
1510 return weight;
1511 }
1512
pnv_pci_ioda1_setup_dma_pe(struct pnv_phb * phb,struct pnv_ioda_pe * pe)1513 static void pnv_pci_ioda1_setup_dma_pe(struct pnv_phb *phb,
1514 struct pnv_ioda_pe *pe)
1515 {
1516
1517 struct page *tce_mem = NULL;
1518 struct iommu_table *tbl;
1519 unsigned int weight, total_weight = 0;
1520 unsigned int tce32_segsz, base, segs, avail, i;
1521 int64_t rc;
1522 void *addr;
1523
1524 /* XXX FIXME: Handle 64-bit only DMA devices */
1525 /* XXX FIXME: Provide 64-bit DMA facilities & non-4K TCE tables etc.. */
1526 /* XXX FIXME: Allocate multi-level tables on PHB3 */
1527 weight = pnv_pci_ioda_pe_dma_weight(pe);
1528 if (!weight)
1529 return;
1530
1531 pci_walk_bus(phb->hose->bus, pnv_pci_ioda_dev_dma_weight,
1532 &total_weight);
1533 segs = (weight * phb->ioda.dma32_count) / total_weight;
1534 if (!segs)
1535 segs = 1;
1536
1537 /*
1538 * Allocate contiguous DMA32 segments. We begin with the expected
1539 * number of segments. With one more attempt, the number of DMA32
1540 * segments to be allocated is decreased by one until one segment
1541 * is allocated successfully.
1542 */
1543 do {
1544 for (base = 0; base <= phb->ioda.dma32_count - segs; base++) {
1545 for (avail = 0, i = base; i < base + segs; i++) {
1546 if (phb->ioda.dma32_segmap[i] ==
1547 IODA_INVALID_PE)
1548 avail++;
1549 }
1550
1551 if (avail == segs)
1552 goto found;
1553 }
1554 } while (--segs);
1555
1556 if (!segs) {
1557 pe_warn(pe, "No available DMA32 segments\n");
1558 return;
1559 }
1560
1561 found:
1562 tbl = pnv_pci_table_alloc(phb->hose->node);
1563 if (WARN_ON(!tbl))
1564 return;
1565
1566 iommu_register_group(&pe->table_group, phb->hose->global_number,
1567 pe->pe_number);
1568 pnv_pci_link_table_and_group(phb->hose->node, 0, tbl, &pe->table_group);
1569
1570 /* Grab a 32-bit TCE table */
1571 pe_info(pe, "DMA weight %d (%d), assigned (%d) %d DMA32 segments\n",
1572 weight, total_weight, base, segs);
1573 pe_info(pe, " Setting up 32-bit TCE table at %08x..%08x\n",
1574 base * PNV_IODA1_DMA32_SEGSIZE,
1575 (base + segs) * PNV_IODA1_DMA32_SEGSIZE - 1);
1576
1577 /* XXX Currently, we allocate one big contiguous table for the
1578 * TCEs. We only really need one chunk per 256M of TCE space
1579 * (ie per segment) but that's an optimization for later, it
1580 * requires some added smarts with our get/put_tce implementation
1581 *
1582 * Each TCE page is 4KB in size and each TCE entry occupies 8
1583 * bytes
1584 */
1585 tce32_segsz = PNV_IODA1_DMA32_SEGSIZE >> (IOMMU_PAGE_SHIFT_4K - 3);
1586 tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
1587 get_order(tce32_segsz * segs));
1588 if (!tce_mem) {
1589 pe_err(pe, " Failed to allocate a 32-bit TCE memory\n");
1590 goto fail;
1591 }
1592 addr = page_address(tce_mem);
1593 memset(addr, 0, tce32_segsz * segs);
1594
1595 /* Configure HW */
1596 for (i = 0; i < segs; i++) {
1597 rc = opal_pci_map_pe_dma_window(phb->opal_id,
1598 pe->pe_number,
1599 base + i, 1,
1600 __pa(addr) + tce32_segsz * i,
1601 tce32_segsz, IOMMU_PAGE_SIZE_4K);
1602 if (rc) {
1603 pe_err(pe, " Failed to configure 32-bit TCE table, err %lld\n",
1604 rc);
1605 goto fail;
1606 }
1607 }
1608
1609 /* Setup DMA32 segment mapping */
1610 for (i = base; i < base + segs; i++)
1611 phb->ioda.dma32_segmap[i] = pe->pe_number;
1612
1613 /* Setup linux iommu table */
1614 pnv_pci_setup_iommu_table(tbl, addr, tce32_segsz * segs,
1615 base * PNV_IODA1_DMA32_SEGSIZE,
1616 IOMMU_PAGE_SHIFT_4K);
1617
1618 tbl->it_ops = &pnv_ioda1_iommu_ops;
1619 pe->table_group.tce32_start = tbl->it_offset << tbl->it_page_shift;
1620 pe->table_group.tce32_size = tbl->it_size << tbl->it_page_shift;
1621 tbl->it_index = (phb->hose->global_number << 16) | pe->pe_number;
1622 if (!iommu_init_table(tbl, phb->hose->node, 0, 0))
1623 panic("Failed to initialize iommu table");
1624
1625 pe->dma_setup_done = true;
1626 return;
1627 fail:
1628 /* XXX Failure: Try to fallback to 64-bit only ? */
1629 if (tce_mem)
1630 __free_pages(tce_mem, get_order(tce32_segsz * segs));
1631 if (tbl) {
1632 pnv_pci_unlink_table_and_group(tbl, &pe->table_group);
1633 iommu_tce_table_put(tbl);
1634 }
1635 }
1636
pnv_pci_ioda2_set_window(struct iommu_table_group * table_group,int num,struct iommu_table * tbl)1637 static long pnv_pci_ioda2_set_window(struct iommu_table_group *table_group,
1638 int num, struct iommu_table *tbl)
1639 {
1640 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
1641 table_group);
1642 struct pnv_phb *phb = pe->phb;
1643 int64_t rc;
1644 const unsigned long size = tbl->it_indirect_levels ?
1645 tbl->it_level_size : tbl->it_size;
1646 const __u64 start_addr = tbl->it_offset << tbl->it_page_shift;
1647 const __u64 win_size = tbl->it_size << tbl->it_page_shift;
1648
1649 pe_info(pe, "Setting up window#%d %llx..%llx pg=%lx\n",
1650 num, start_addr, start_addr + win_size - 1,
1651 IOMMU_PAGE_SIZE(tbl));
1652
1653 /*
1654 * Map TCE table through TVT. The TVE index is the PE number
1655 * shifted by 1 bit for 32-bits DMA space.
1656 */
1657 rc = opal_pci_map_pe_dma_window(phb->opal_id,
1658 pe->pe_number,
1659 (pe->pe_number << 1) + num,
1660 tbl->it_indirect_levels + 1,
1661 __pa(tbl->it_base),
1662 size << 3,
1663 IOMMU_PAGE_SIZE(tbl));
1664 if (rc) {
1665 pe_err(pe, "Failed to configure TCE table, err %lld\n", rc);
1666 return rc;
1667 }
1668
1669 pnv_pci_link_table_and_group(phb->hose->node, num,
1670 tbl, &pe->table_group);
1671 pnv_pci_ioda2_tce_invalidate_pe(pe);
1672
1673 return 0;
1674 }
1675
pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe * pe,bool enable)1676 static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable)
1677 {
1678 uint16_t window_id = (pe->pe_number << 1 ) + 1;
1679 int64_t rc;
1680
1681 pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis");
1682 if (enable) {
1683 phys_addr_t top = memblock_end_of_DRAM();
1684
1685 top = roundup_pow_of_two(top);
1686 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
1687 pe->pe_number,
1688 window_id,
1689 pe->tce_bypass_base,
1690 top);
1691 } else {
1692 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
1693 pe->pe_number,
1694 window_id,
1695 pe->tce_bypass_base,
1696 0);
1697 }
1698 if (rc)
1699 pe_err(pe, "OPAL error %lld configuring bypass window\n", rc);
1700 else
1701 pe->tce_bypass_enabled = enable;
1702 }
1703
pnv_pci_ioda2_create_table(struct iommu_table_group * table_group,int num,__u32 page_shift,__u64 window_size,__u32 levels,bool alloc_userspace_copy,struct iommu_table ** ptbl)1704 static long pnv_pci_ioda2_create_table(struct iommu_table_group *table_group,
1705 int num, __u32 page_shift, __u64 window_size, __u32 levels,
1706 bool alloc_userspace_copy, struct iommu_table **ptbl)
1707 {
1708 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
1709 table_group);
1710 int nid = pe->phb->hose->node;
1711 __u64 bus_offset = num ? pe->tce_bypass_base : table_group->tce32_start;
1712 long ret;
1713 struct iommu_table *tbl;
1714
1715 tbl = pnv_pci_table_alloc(nid);
1716 if (!tbl)
1717 return -ENOMEM;
1718
1719 tbl->it_ops = &pnv_ioda2_iommu_ops;
1720
1721 ret = pnv_pci_ioda2_table_alloc_pages(nid,
1722 bus_offset, page_shift, window_size,
1723 levels, alloc_userspace_copy, tbl);
1724 if (ret) {
1725 iommu_tce_table_put(tbl);
1726 return ret;
1727 }
1728
1729 *ptbl = tbl;
1730
1731 return 0;
1732 }
1733
pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe * pe)1734 static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe)
1735 {
1736 struct iommu_table *tbl = NULL;
1737 long rc;
1738 unsigned long res_start, res_end;
1739
1740 /*
1741 * crashkernel= specifies the kdump kernel's maximum memory at
1742 * some offset and there is no guaranteed the result is a power
1743 * of 2, which will cause errors later.
1744 */
1745 const u64 max_memory = __rounddown_pow_of_two(memory_hotplug_max());
1746
1747 /*
1748 * In memory constrained environments, e.g. kdump kernel, the
1749 * DMA window can be larger than available memory, which will
1750 * cause errors later.
1751 */
1752 const u64 maxblock = 1UL << (PAGE_SHIFT + MAX_ORDER - 1);
1753
1754 /*
1755 * We create the default window as big as we can. The constraint is
1756 * the max order of allocation possible. The TCE table is likely to
1757 * end up being multilevel and with on-demand allocation in place,
1758 * the initial use is not going to be huge as the default window aims
1759 * to support crippled devices (i.e. not fully 64bit DMAble) only.
1760 */
1761 /* iommu_table::it_map uses 1 bit per IOMMU page, hence 8 */
1762 const u64 window_size = min((maxblock * 8) << PAGE_SHIFT, max_memory);
1763 /* Each TCE level cannot exceed maxblock so go multilevel if needed */
1764 unsigned long tces_order = ilog2(window_size >> PAGE_SHIFT);
1765 unsigned long tcelevel_order = ilog2(maxblock >> 3);
1766 unsigned int levels = tces_order / tcelevel_order;
1767
1768 if (tces_order % tcelevel_order)
1769 levels += 1;
1770 /*
1771 * We try to stick to default levels (which is >1 at the moment) in
1772 * order to save memory by relying on on-demain TCE level allocation.
1773 */
1774 levels = max_t(unsigned int, levels, POWERNV_IOMMU_DEFAULT_LEVELS);
1775
1776 rc = pnv_pci_ioda2_create_table(&pe->table_group, 0, PAGE_SHIFT,
1777 window_size, levels, false, &tbl);
1778 if (rc) {
1779 pe_err(pe, "Failed to create 32-bit TCE table, err %ld",
1780 rc);
1781 return rc;
1782 }
1783
1784 /* We use top part of 32bit space for MMIO so exclude it from DMA */
1785 res_start = 0;
1786 res_end = 0;
1787 if (window_size > pe->phb->ioda.m32_pci_base) {
1788 res_start = pe->phb->ioda.m32_pci_base >> tbl->it_page_shift;
1789 res_end = min(window_size, SZ_4G) >> tbl->it_page_shift;
1790 }
1791
1792 tbl->it_index = (pe->phb->hose->global_number << 16) | pe->pe_number;
1793 if (iommu_init_table(tbl, pe->phb->hose->node, res_start, res_end))
1794 rc = pnv_pci_ioda2_set_window(&pe->table_group, 0, tbl);
1795 else
1796 rc = -ENOMEM;
1797 if (rc) {
1798 pe_err(pe, "Failed to configure 32-bit TCE table, err %ld\n", rc);
1799 iommu_tce_table_put(tbl);
1800 tbl = NULL; /* This clears iommu_table_base below */
1801 }
1802 if (!pnv_iommu_bypass_disabled)
1803 pnv_pci_ioda2_set_bypass(pe, true);
1804
1805 /*
1806 * Set table base for the case of IOMMU DMA use. Usually this is done
1807 * from dma_dev_setup() which is not called when a device is returned
1808 * from VFIO so do it here.
1809 */
1810 if (pe->pdev)
1811 set_iommu_table_base(&pe->pdev->dev, tbl);
1812
1813 return 0;
1814 }
1815
pnv_pci_ioda2_unset_window(struct iommu_table_group * table_group,int num)1816 static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group,
1817 int num)
1818 {
1819 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
1820 table_group);
1821 struct pnv_phb *phb = pe->phb;
1822 long ret;
1823
1824 pe_info(pe, "Removing DMA window #%d\n", num);
1825
1826 ret = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
1827 (pe->pe_number << 1) + num,
1828 0/* levels */, 0/* table address */,
1829 0/* table size */, 0/* page size */);
1830 if (ret)
1831 pe_warn(pe, "Unmapping failed, ret = %ld\n", ret);
1832 else
1833 pnv_pci_ioda2_tce_invalidate_pe(pe);
1834
1835 pnv_pci_unlink_table_and_group(table_group->tables[num], table_group);
1836
1837 return ret;
1838 }
1839
1840 #ifdef CONFIG_IOMMU_API
pnv_pci_ioda2_get_table_size(__u32 page_shift,__u64 window_size,__u32 levels)1841 unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift,
1842 __u64 window_size, __u32 levels)
1843 {
1844 unsigned long bytes = 0;
1845 const unsigned window_shift = ilog2(window_size);
1846 unsigned entries_shift = window_shift - page_shift;
1847 unsigned table_shift = entries_shift + 3;
1848 unsigned long tce_table_size = max(0x1000UL, 1UL << table_shift);
1849 unsigned long direct_table_size;
1850
1851 if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS) ||
1852 !is_power_of_2(window_size))
1853 return 0;
1854
1855 /* Calculate a direct table size from window_size and levels */
1856 entries_shift = (entries_shift + levels - 1) / levels;
1857 table_shift = entries_shift + 3;
1858 table_shift = max_t(unsigned, table_shift, PAGE_SHIFT);
1859 direct_table_size = 1UL << table_shift;
1860
1861 for ( ; levels; --levels) {
1862 bytes += ALIGN(tce_table_size, direct_table_size);
1863
1864 tce_table_size /= direct_table_size;
1865 tce_table_size <<= 3;
1866 tce_table_size = max_t(unsigned long,
1867 tce_table_size, direct_table_size);
1868 }
1869
1870 return bytes + bytes; /* one for HW table, one for userspace copy */
1871 }
1872
pnv_pci_ioda2_create_table_userspace(struct iommu_table_group * table_group,int num,__u32 page_shift,__u64 window_size,__u32 levels,struct iommu_table ** ptbl)1873 static long pnv_pci_ioda2_create_table_userspace(
1874 struct iommu_table_group *table_group,
1875 int num, __u32 page_shift, __u64 window_size, __u32 levels,
1876 struct iommu_table **ptbl)
1877 {
1878 long ret = pnv_pci_ioda2_create_table(table_group,
1879 num, page_shift, window_size, levels, true, ptbl);
1880
1881 if (!ret)
1882 (*ptbl)->it_allocated_size = pnv_pci_ioda2_get_table_size(
1883 page_shift, window_size, levels);
1884 return ret;
1885 }
1886
pnv_ioda_setup_bus_dma(struct pnv_ioda_pe * pe,struct pci_bus * bus)1887 static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
1888 {
1889 struct pci_dev *dev;
1890
1891 list_for_each_entry(dev, &bus->devices, bus_list) {
1892 set_iommu_table_base(&dev->dev, pe->table_group.tables[0]);
1893 dev->dev.archdata.dma_offset = pe->tce_bypass_base;
1894
1895 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
1896 pnv_ioda_setup_bus_dma(pe, dev->subordinate);
1897 }
1898 }
1899
pnv_ioda2_take_ownership(struct iommu_table_group * table_group)1900 static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
1901 {
1902 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
1903 table_group);
1904 /* Store @tbl as pnv_pci_ioda2_unset_window() resets it */
1905 struct iommu_table *tbl = pe->table_group.tables[0];
1906
1907 pnv_pci_ioda2_set_bypass(pe, false);
1908 pnv_pci_ioda2_unset_window(&pe->table_group, 0);
1909 if (pe->pbus)
1910 pnv_ioda_setup_bus_dma(pe, pe->pbus);
1911 else if (pe->pdev)
1912 set_iommu_table_base(&pe->pdev->dev, NULL);
1913 iommu_tce_table_put(tbl);
1914 }
1915
pnv_ioda2_release_ownership(struct iommu_table_group * table_group)1916 static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group)
1917 {
1918 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
1919 table_group);
1920
1921 pnv_pci_ioda2_setup_default_config(pe);
1922 if (pe->pbus)
1923 pnv_ioda_setup_bus_dma(pe, pe->pbus);
1924 }
1925
1926 static struct iommu_table_group_ops pnv_pci_ioda2_ops = {
1927 .get_table_size = pnv_pci_ioda2_get_table_size,
1928 .create_table = pnv_pci_ioda2_create_table_userspace,
1929 .set_window = pnv_pci_ioda2_set_window,
1930 .unset_window = pnv_pci_ioda2_unset_window,
1931 .take_ownership = pnv_ioda2_take_ownership,
1932 .release_ownership = pnv_ioda2_release_ownership,
1933 };
1934 #endif
1935
pnv_pci_ioda2_setup_dma_pe(struct pnv_phb * phb,struct pnv_ioda_pe * pe)1936 void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
1937 struct pnv_ioda_pe *pe)
1938 {
1939 int64_t rc;
1940
1941 /* TVE #1 is selected by PCI address bit 59 */
1942 pe->tce_bypass_base = 1ull << 59;
1943
1944 /* The PE will reserve all possible 32-bits space */
1945 pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n",
1946 phb->ioda.m32_pci_base);
1947
1948 /* Setup linux iommu table */
1949 pe->table_group.tce32_start = 0;
1950 pe->table_group.tce32_size = phb->ioda.m32_pci_base;
1951 pe->table_group.max_dynamic_windows_supported =
1952 IOMMU_TABLE_GROUP_MAX_TABLES;
1953 pe->table_group.max_levels = POWERNV_IOMMU_MAX_LEVELS;
1954 pe->table_group.pgsizes = pnv_ioda_parse_tce_sizes(phb);
1955
1956 rc = pnv_pci_ioda2_setup_default_config(pe);
1957 if (rc)
1958 return;
1959
1960 #ifdef CONFIG_IOMMU_API
1961 pe->table_group.ops = &pnv_pci_ioda2_ops;
1962 iommu_register_group(&pe->table_group, phb->hose->global_number,
1963 pe->pe_number);
1964 #endif
1965 pe->dma_setup_done = true;
1966 }
1967
1968 /*
1969 * Called from KVM in real mode to EOI passthru interrupts. The ICP
1970 * EOI is handled directly in KVM in kvmppc_deliver_irq_passthru().
1971 *
1972 * The IRQ data is mapped in the PCI-MSI domain and the EOI OPAL call
1973 * needs an HW IRQ number mapped in the XICS IRQ domain. The HW IRQ
1974 * numbers of the in-the-middle MSI domain are vector numbers and it's
1975 * good enough for OPAL. Use that.
1976 */
pnv_opal_pci_msi_eoi(struct irq_data * d)1977 int64_t pnv_opal_pci_msi_eoi(struct irq_data *d)
1978 {
1979 struct pci_controller *hose = irq_data_get_irq_chip_data(d->parent_data);
1980 struct pnv_phb *phb = hose->private_data;
1981
1982 return opal_pci_msi_eoi(phb->opal_id, d->parent_data->hwirq);
1983 }
1984
1985 /*
1986 * The IRQ data is mapped in the XICS domain, with OPAL HW IRQ numbers
1987 */
pnv_ioda2_msi_eoi(struct irq_data * d)1988 static void pnv_ioda2_msi_eoi(struct irq_data *d)
1989 {
1990 int64_t rc;
1991 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
1992 struct pci_controller *hose = irq_data_get_irq_chip_data(d);
1993 struct pnv_phb *phb = hose->private_data;
1994
1995 rc = opal_pci_msi_eoi(phb->opal_id, hw_irq);
1996 WARN_ON_ONCE(rc);
1997
1998 icp_native_eoi(d);
1999 }
2000
2001 /* P8/CXL only */
pnv_set_msi_irq_chip(struct pnv_phb * phb,unsigned int virq)2002 void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq)
2003 {
2004 struct irq_data *idata;
2005 struct irq_chip *ichip;
2006
2007 /* The MSI EOI OPAL call is only needed on PHB3 */
2008 if (phb->model != PNV_PHB_MODEL_PHB3)
2009 return;
2010
2011 if (!phb->ioda.irq_chip_init) {
2012 /*
2013 * First time we setup an MSI IRQ, we need to setup the
2014 * corresponding IRQ chip to route correctly.
2015 */
2016 idata = irq_get_irq_data(virq);
2017 ichip = irq_data_get_irq_chip(idata);
2018 phb->ioda.irq_chip_init = 1;
2019 phb->ioda.irq_chip = *ichip;
2020 phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi;
2021 }
2022 irq_set_chip(virq, &phb->ioda.irq_chip);
2023 irq_set_chip_data(virq, phb->hose);
2024 }
2025
2026 static struct irq_chip pnv_pci_msi_irq_chip;
2027
2028 /*
2029 * Returns true iff chip is something that we could call
2030 * pnv_opal_pci_msi_eoi for.
2031 */
is_pnv_opal_msi(struct irq_chip * chip)2032 bool is_pnv_opal_msi(struct irq_chip *chip)
2033 {
2034 return chip == &pnv_pci_msi_irq_chip;
2035 }
2036 EXPORT_SYMBOL_GPL(is_pnv_opal_msi);
2037
__pnv_pci_ioda_msi_setup(struct pnv_phb * phb,struct pci_dev * dev,unsigned int xive_num,unsigned int is_64,struct msi_msg * msg)2038 static int __pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
2039 unsigned int xive_num,
2040 unsigned int is_64, struct msi_msg *msg)
2041 {
2042 struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
2043 __be32 data;
2044 int rc;
2045
2046 dev_dbg(&dev->dev, "%s: setup %s-bit MSI for vector #%d\n", __func__,
2047 is_64 ? "64" : "32", xive_num);
2048
2049 /* No PE assigned ? bail out ... no MSI for you ! */
2050 if (pe == NULL)
2051 return -ENXIO;
2052
2053 /* Check if we have an MVE */
2054 if (pe->mve_number < 0)
2055 return -ENXIO;
2056
2057 /* Force 32-bit MSI on some broken devices */
2058 if (dev->no_64bit_msi)
2059 is_64 = 0;
2060
2061 /* Assign XIVE to PE */
2062 rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
2063 if (rc) {
2064 pr_warn("%s: OPAL error %d setting XIVE %d PE\n",
2065 pci_name(dev), rc, xive_num);
2066 return -EIO;
2067 }
2068
2069 if (is_64) {
2070 __be64 addr64;
2071
2072 rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1,
2073 &addr64, &data);
2074 if (rc) {
2075 pr_warn("%s: OPAL error %d getting 64-bit MSI data\n",
2076 pci_name(dev), rc);
2077 return -EIO;
2078 }
2079 msg->address_hi = be64_to_cpu(addr64) >> 32;
2080 msg->address_lo = be64_to_cpu(addr64) & 0xfffffffful;
2081 } else {
2082 __be32 addr32;
2083
2084 rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1,
2085 &addr32, &data);
2086 if (rc) {
2087 pr_warn("%s: OPAL error %d getting 32-bit MSI data\n",
2088 pci_name(dev), rc);
2089 return -EIO;
2090 }
2091 msg->address_hi = 0;
2092 msg->address_lo = be32_to_cpu(addr32);
2093 }
2094 msg->data = be32_to_cpu(data);
2095
2096 return 0;
2097 }
2098
2099 /*
2100 * The msi_free() op is called before irq_domain_free_irqs_top() when
2101 * the handler data is still available. Use that to clear the XIVE
2102 * controller.
2103 */
pnv_msi_ops_msi_free(struct irq_domain * domain,struct msi_domain_info * info,unsigned int irq)2104 static void pnv_msi_ops_msi_free(struct irq_domain *domain,
2105 struct msi_domain_info *info,
2106 unsigned int irq)
2107 {
2108 if (xive_enabled())
2109 xive_irq_free_data(irq);
2110 }
2111
2112 static struct msi_domain_ops pnv_pci_msi_domain_ops = {
2113 .msi_free = pnv_msi_ops_msi_free,
2114 };
2115
pnv_msi_shutdown(struct irq_data * d)2116 static void pnv_msi_shutdown(struct irq_data *d)
2117 {
2118 d = d->parent_data;
2119 if (d->chip->irq_shutdown)
2120 d->chip->irq_shutdown(d);
2121 }
2122
pnv_msi_mask(struct irq_data * d)2123 static void pnv_msi_mask(struct irq_data *d)
2124 {
2125 pci_msi_mask_irq(d);
2126 irq_chip_mask_parent(d);
2127 }
2128
pnv_msi_unmask(struct irq_data * d)2129 static void pnv_msi_unmask(struct irq_data *d)
2130 {
2131 pci_msi_unmask_irq(d);
2132 irq_chip_unmask_parent(d);
2133 }
2134
2135 static struct irq_chip pnv_pci_msi_irq_chip = {
2136 .name = "PNV-PCI-MSI",
2137 .irq_shutdown = pnv_msi_shutdown,
2138 .irq_mask = pnv_msi_mask,
2139 .irq_unmask = pnv_msi_unmask,
2140 .irq_eoi = irq_chip_eoi_parent,
2141 };
2142
2143 static struct msi_domain_info pnv_msi_domain_info = {
2144 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
2145 MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
2146 .ops = &pnv_pci_msi_domain_ops,
2147 .chip = &pnv_pci_msi_irq_chip,
2148 };
2149
pnv_msi_compose_msg(struct irq_data * d,struct msi_msg * msg)2150 static void pnv_msi_compose_msg(struct irq_data *d, struct msi_msg *msg)
2151 {
2152 struct msi_desc *entry = irq_data_get_msi_desc(d);
2153 struct pci_dev *pdev = msi_desc_to_pci_dev(entry);
2154 struct pci_controller *hose = irq_data_get_irq_chip_data(d);
2155 struct pnv_phb *phb = hose->private_data;
2156 int rc;
2157
2158 rc = __pnv_pci_ioda_msi_setup(phb, pdev, d->hwirq,
2159 entry->msi_attrib.is_64, msg);
2160 if (rc)
2161 dev_err(&pdev->dev, "Failed to setup %s-bit MSI #%ld : %d\n",
2162 entry->msi_attrib.is_64 ? "64" : "32", d->hwirq, rc);
2163 }
2164
2165 /*
2166 * The IRQ data is mapped in the MSI domain in which HW IRQ numbers
2167 * correspond to vector numbers.
2168 */
pnv_msi_eoi(struct irq_data * d)2169 static void pnv_msi_eoi(struct irq_data *d)
2170 {
2171 struct pci_controller *hose = irq_data_get_irq_chip_data(d);
2172 struct pnv_phb *phb = hose->private_data;
2173
2174 if (phb->model == PNV_PHB_MODEL_PHB3) {
2175 /*
2176 * The EOI OPAL call takes an OPAL HW IRQ number but
2177 * since it is translated into a vector number in
2178 * OPAL, use that directly.
2179 */
2180 WARN_ON_ONCE(opal_pci_msi_eoi(phb->opal_id, d->hwirq));
2181 }
2182
2183 irq_chip_eoi_parent(d);
2184 }
2185
2186 static struct irq_chip pnv_msi_irq_chip = {
2187 .name = "PNV-MSI",
2188 .irq_shutdown = pnv_msi_shutdown,
2189 .irq_mask = irq_chip_mask_parent,
2190 .irq_unmask = irq_chip_unmask_parent,
2191 .irq_eoi = pnv_msi_eoi,
2192 .irq_set_affinity = irq_chip_set_affinity_parent,
2193 .irq_compose_msi_msg = pnv_msi_compose_msg,
2194 };
2195
pnv_irq_parent_domain_alloc(struct irq_domain * domain,unsigned int virq,int hwirq)2196 static int pnv_irq_parent_domain_alloc(struct irq_domain *domain,
2197 unsigned int virq, int hwirq)
2198 {
2199 struct irq_fwspec parent_fwspec;
2200 int ret;
2201
2202 parent_fwspec.fwnode = domain->parent->fwnode;
2203 parent_fwspec.param_count = 2;
2204 parent_fwspec.param[0] = hwirq;
2205 parent_fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
2206
2207 ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec);
2208 if (ret)
2209 return ret;
2210
2211 return 0;
2212 }
2213
pnv_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)2214 static int pnv_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
2215 unsigned int nr_irqs, void *arg)
2216 {
2217 struct pci_controller *hose = domain->host_data;
2218 struct pnv_phb *phb = hose->private_data;
2219 msi_alloc_info_t *info = arg;
2220 struct pci_dev *pdev = msi_desc_to_pci_dev(info->desc);
2221 int hwirq;
2222 int i, ret;
2223
2224 hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, nr_irqs);
2225 if (hwirq < 0) {
2226 dev_warn(&pdev->dev, "failed to find a free MSI\n");
2227 return -ENOSPC;
2228 }
2229
2230 dev_dbg(&pdev->dev, "%s bridge %pOF %d/%x #%d\n", __func__,
2231 hose->dn, virq, hwirq, nr_irqs);
2232
2233 for (i = 0; i < nr_irqs; i++) {
2234 ret = pnv_irq_parent_domain_alloc(domain, virq + i,
2235 phb->msi_base + hwirq + i);
2236 if (ret)
2237 goto out;
2238
2239 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
2240 &pnv_msi_irq_chip, hose);
2241 }
2242
2243 return 0;
2244
2245 out:
2246 irq_domain_free_irqs_parent(domain, virq, i - 1);
2247 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, nr_irqs);
2248 return ret;
2249 }
2250
pnv_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)2251 static void pnv_irq_domain_free(struct irq_domain *domain, unsigned int virq,
2252 unsigned int nr_irqs)
2253 {
2254 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
2255 struct pci_controller *hose = irq_data_get_irq_chip_data(d);
2256 struct pnv_phb *phb = hose->private_data;
2257
2258 pr_debug("%s bridge %pOF %d/%lx #%d\n", __func__, hose->dn,
2259 virq, d->hwirq, nr_irqs);
2260
2261 msi_bitmap_free_hwirqs(&phb->msi_bmp, d->hwirq, nr_irqs);
2262 /* XIVE domain is cleared through ->msi_free() */
2263 }
2264
2265 static const struct irq_domain_ops pnv_irq_domain_ops = {
2266 .alloc = pnv_irq_domain_alloc,
2267 .free = pnv_irq_domain_free,
2268 };
2269
pnv_msi_allocate_domains(struct pci_controller * hose,unsigned int count)2270 static int pnv_msi_allocate_domains(struct pci_controller *hose, unsigned int count)
2271 {
2272 struct pnv_phb *phb = hose->private_data;
2273 struct irq_domain *parent = irq_get_default_host();
2274
2275 hose->fwnode = irq_domain_alloc_named_id_fwnode("PNV-MSI", phb->opal_id);
2276 if (!hose->fwnode)
2277 return -ENOMEM;
2278
2279 hose->dev_domain = irq_domain_create_hierarchy(parent, 0, count,
2280 hose->fwnode,
2281 &pnv_irq_domain_ops, hose);
2282 if (!hose->dev_domain) {
2283 pr_err("PCI: failed to create IRQ domain bridge %pOF (domain %d)\n",
2284 hose->dn, hose->global_number);
2285 irq_domain_free_fwnode(hose->fwnode);
2286 return -ENOMEM;
2287 }
2288
2289 hose->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(hose->dn),
2290 &pnv_msi_domain_info,
2291 hose->dev_domain);
2292 if (!hose->msi_domain) {
2293 pr_err("PCI: failed to create MSI IRQ domain bridge %pOF (domain %d)\n",
2294 hose->dn, hose->global_number);
2295 irq_domain_free_fwnode(hose->fwnode);
2296 irq_domain_remove(hose->dev_domain);
2297 return -ENOMEM;
2298 }
2299
2300 return 0;
2301 }
2302
pnv_pci_init_ioda_msis(struct pnv_phb * phb)2303 static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
2304 {
2305 unsigned int count;
2306 const __be32 *prop = of_get_property(phb->hose->dn,
2307 "ibm,opal-msi-ranges", NULL);
2308 if (!prop) {
2309 /* BML Fallback */
2310 prop = of_get_property(phb->hose->dn, "msi-ranges", NULL);
2311 }
2312 if (!prop)
2313 return;
2314
2315 phb->msi_base = be32_to_cpup(prop);
2316 count = be32_to_cpup(prop + 1);
2317 if (msi_bitmap_alloc(&phb->msi_bmp, count, phb->hose->dn)) {
2318 pr_err("PCI %d: Failed to allocate MSI bitmap !\n",
2319 phb->hose->global_number);
2320 return;
2321 }
2322
2323 pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n",
2324 count, phb->msi_base);
2325
2326 pnv_msi_allocate_domains(phb->hose, count);
2327 }
2328
pnv_ioda_setup_pe_res(struct pnv_ioda_pe * pe,struct resource * res)2329 static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe,
2330 struct resource *res)
2331 {
2332 struct pnv_phb *phb = pe->phb;
2333 struct pci_bus_region region;
2334 int index;
2335 int64_t rc;
2336
2337 if (!res || !res->flags || res->start > res->end ||
2338 res->flags & IORESOURCE_UNSET)
2339 return;
2340
2341 if (res->flags & IORESOURCE_IO) {
2342 region.start = res->start - phb->ioda.io_pci_base;
2343 region.end = res->end - phb->ioda.io_pci_base;
2344 index = region.start / phb->ioda.io_segsize;
2345
2346 while (index < phb->ioda.total_pe_num &&
2347 region.start <= region.end) {
2348 phb->ioda.io_segmap[index] = pe->pe_number;
2349 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
2350 pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index);
2351 if (rc != OPAL_SUCCESS) {
2352 pr_err("%s: Error %lld mapping IO segment#%d to PE#%x\n",
2353 __func__, rc, index, pe->pe_number);
2354 break;
2355 }
2356
2357 region.start += phb->ioda.io_segsize;
2358 index++;
2359 }
2360 } else if ((res->flags & IORESOURCE_MEM) &&
2361 !pnv_pci_is_m64(phb, res)) {
2362 region.start = res->start -
2363 phb->hose->mem_offset[0] -
2364 phb->ioda.m32_pci_base;
2365 region.end = res->end -
2366 phb->hose->mem_offset[0] -
2367 phb->ioda.m32_pci_base;
2368 index = region.start / phb->ioda.m32_segsize;
2369
2370 while (index < phb->ioda.total_pe_num &&
2371 region.start <= region.end) {
2372 phb->ioda.m32_segmap[index] = pe->pe_number;
2373 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
2374 pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index);
2375 if (rc != OPAL_SUCCESS) {
2376 pr_err("%s: Error %lld mapping M32 segment#%d to PE#%x",
2377 __func__, rc, index, pe->pe_number);
2378 break;
2379 }
2380
2381 region.start += phb->ioda.m32_segsize;
2382 index++;
2383 }
2384 }
2385 }
2386
2387 /*
2388 * This function is supposed to be called on basis of PE from top
2389 * to bottom style. So the the I/O or MMIO segment assigned to
2390 * parent PE could be overridden by its child PEs if necessary.
2391 */
pnv_ioda_setup_pe_seg(struct pnv_ioda_pe * pe)2392 static void pnv_ioda_setup_pe_seg(struct pnv_ioda_pe *pe)
2393 {
2394 struct pci_dev *pdev;
2395 int i;
2396
2397 /*
2398 * NOTE: We only care PCI bus based PE for now. For PCI
2399 * device based PE, for example SRIOV sensitive VF should
2400 * be figured out later.
2401 */
2402 BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)));
2403
2404 list_for_each_entry(pdev, &pe->pbus->devices, bus_list) {
2405 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
2406 pnv_ioda_setup_pe_res(pe, &pdev->resource[i]);
2407
2408 /*
2409 * If the PE contains all subordinate PCI buses, the
2410 * windows of the child bridges should be mapped to
2411 * the PE as well.
2412 */
2413 if (!(pe->flags & PNV_IODA_PE_BUS_ALL) || !pci_is_bridge(pdev))
2414 continue;
2415 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
2416 pnv_ioda_setup_pe_res(pe,
2417 &pdev->resource[PCI_BRIDGE_RESOURCES + i]);
2418 }
2419 }
2420
2421 #ifdef CONFIG_DEBUG_FS
pnv_pci_diag_data_set(void * data,u64 val)2422 static int pnv_pci_diag_data_set(void *data, u64 val)
2423 {
2424 struct pnv_phb *phb = data;
2425 s64 ret;
2426
2427 /* Retrieve the diag data from firmware */
2428 ret = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag_data,
2429 phb->diag_data_size);
2430 if (ret != OPAL_SUCCESS)
2431 return -EIO;
2432
2433 /* Print the diag data to the kernel log */
2434 pnv_pci_dump_phb_diag_data(phb->hose, phb->diag_data);
2435 return 0;
2436 }
2437
2438 DEFINE_DEBUGFS_ATTRIBUTE(pnv_pci_diag_data_fops, NULL, pnv_pci_diag_data_set,
2439 "%llu\n");
2440
pnv_pci_ioda_pe_dump(void * data,u64 val)2441 static int pnv_pci_ioda_pe_dump(void *data, u64 val)
2442 {
2443 struct pnv_phb *phb = data;
2444 int pe_num;
2445
2446 for (pe_num = 0; pe_num < phb->ioda.total_pe_num; pe_num++) {
2447 struct pnv_ioda_pe *pe = &phb->ioda.pe_array[pe_num];
2448
2449 if (!test_bit(pe_num, phb->ioda.pe_alloc))
2450 continue;
2451
2452 pe_warn(pe, "rid: %04x dev count: %2d flags: %s%s%s%s%s%s\n",
2453 pe->rid, pe->device_count,
2454 (pe->flags & PNV_IODA_PE_DEV) ? "dev " : "",
2455 (pe->flags & PNV_IODA_PE_BUS) ? "bus " : "",
2456 (pe->flags & PNV_IODA_PE_BUS_ALL) ? "all " : "",
2457 (pe->flags & PNV_IODA_PE_MASTER) ? "master " : "",
2458 (pe->flags & PNV_IODA_PE_SLAVE) ? "slave " : "",
2459 (pe->flags & PNV_IODA_PE_VF) ? "vf " : "");
2460 }
2461
2462 return 0;
2463 }
2464
2465 DEFINE_DEBUGFS_ATTRIBUTE(pnv_pci_ioda_pe_dump_fops, NULL,
2466 pnv_pci_ioda_pe_dump, "%llu\n");
2467
2468 #endif /* CONFIG_DEBUG_FS */
2469
pnv_pci_ioda_create_dbgfs(void)2470 static void pnv_pci_ioda_create_dbgfs(void)
2471 {
2472 #ifdef CONFIG_DEBUG_FS
2473 struct pci_controller *hose, *tmp;
2474 struct pnv_phb *phb;
2475 char name[16];
2476
2477 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
2478 phb = hose->private_data;
2479
2480 sprintf(name, "PCI%04x", hose->global_number);
2481 phb->dbgfs = debugfs_create_dir(name, arch_debugfs_dir);
2482
2483 debugfs_create_file_unsafe("dump_diag_regs", 0200, phb->dbgfs,
2484 phb, &pnv_pci_diag_data_fops);
2485 debugfs_create_file_unsafe("dump_ioda_pe_state", 0200, phb->dbgfs,
2486 phb, &pnv_pci_ioda_pe_dump_fops);
2487 }
2488 #endif /* CONFIG_DEBUG_FS */
2489 }
2490
pnv_pci_enable_bridge(struct pci_bus * bus)2491 static void pnv_pci_enable_bridge(struct pci_bus *bus)
2492 {
2493 struct pci_dev *dev = bus->self;
2494 struct pci_bus *child;
2495
2496 /* Empty bus ? bail */
2497 if (list_empty(&bus->devices))
2498 return;
2499
2500 /*
2501 * If there's a bridge associated with that bus enable it. This works
2502 * around races in the generic code if the enabling is done during
2503 * parallel probing. This can be removed once those races have been
2504 * fixed.
2505 */
2506 if (dev) {
2507 int rc = pci_enable_device(dev);
2508 if (rc)
2509 pci_err(dev, "Error enabling bridge (%d)\n", rc);
2510 pci_set_master(dev);
2511 }
2512
2513 /* Perform the same to child busses */
2514 list_for_each_entry(child, &bus->children, node)
2515 pnv_pci_enable_bridge(child);
2516 }
2517
pnv_pci_enable_bridges(void)2518 static void pnv_pci_enable_bridges(void)
2519 {
2520 struct pci_controller *hose;
2521
2522 list_for_each_entry(hose, &hose_list, list_node)
2523 pnv_pci_enable_bridge(hose->bus);
2524 }
2525
pnv_pci_ioda_fixup(void)2526 static void pnv_pci_ioda_fixup(void)
2527 {
2528 pnv_pci_ioda_create_dbgfs();
2529
2530 pnv_pci_enable_bridges();
2531
2532 #ifdef CONFIG_EEH
2533 pnv_eeh_post_init();
2534 #endif
2535 }
2536
2537 /*
2538 * Returns the alignment for I/O or memory windows for P2P
2539 * bridges. That actually depends on how PEs are segmented.
2540 * For now, we return I/O or M32 segment size for PE sensitive
2541 * P2P bridges. Otherwise, the default values (4KiB for I/O,
2542 * 1MiB for memory) will be returned.
2543 *
2544 * The current PCI bus might be put into one PE, which was
2545 * create against the parent PCI bridge. For that case, we
2546 * needn't enlarge the alignment so that we can save some
2547 * resources.
2548 */
pnv_pci_window_alignment(struct pci_bus * bus,unsigned long type)2549 static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
2550 unsigned long type)
2551 {
2552 struct pnv_phb *phb = pci_bus_to_pnvhb(bus);
2553 int num_pci_bridges = 0;
2554 struct pci_dev *bridge;
2555
2556 bridge = bus->self;
2557 while (bridge) {
2558 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) {
2559 num_pci_bridges++;
2560 if (num_pci_bridges >= 2)
2561 return 1;
2562 }
2563
2564 bridge = bridge->bus->self;
2565 }
2566
2567 /*
2568 * We fall back to M32 if M64 isn't supported. We enforce the M64
2569 * alignment for any 64-bit resource, PCIe doesn't care and
2570 * bridges only do 64-bit prefetchable anyway.
2571 */
2572 if (phb->ioda.m64_segsize && pnv_pci_is_m64_flags(type))
2573 return phb->ioda.m64_segsize;
2574 if (type & IORESOURCE_MEM)
2575 return phb->ioda.m32_segsize;
2576
2577 return phb->ioda.io_segsize;
2578 }
2579
2580 /*
2581 * We are updating root port or the upstream port of the
2582 * bridge behind the root port with PHB's windows in order
2583 * to accommodate the changes on required resources during
2584 * PCI (slot) hotplug, which is connected to either root
2585 * port or the downstream ports of PCIe switch behind the
2586 * root port.
2587 */
pnv_pci_fixup_bridge_resources(struct pci_bus * bus,unsigned long type)2588 static void pnv_pci_fixup_bridge_resources(struct pci_bus *bus,
2589 unsigned long type)
2590 {
2591 struct pci_controller *hose = pci_bus_to_host(bus);
2592 struct pnv_phb *phb = hose->private_data;
2593 struct pci_dev *bridge = bus->self;
2594 struct resource *r, *w;
2595 bool msi_region = false;
2596 int i;
2597
2598 /* Check if we need apply fixup to the bridge's windows */
2599 if (!pci_is_root_bus(bridge->bus) &&
2600 !pci_is_root_bus(bridge->bus->self->bus))
2601 return;
2602
2603 /* Fixup the resources */
2604 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
2605 r = &bridge->resource[PCI_BRIDGE_RESOURCES + i];
2606 if (!r->flags || !r->parent)
2607 continue;
2608
2609 w = NULL;
2610 if (r->flags & type & IORESOURCE_IO)
2611 w = &hose->io_resource;
2612 else if (pnv_pci_is_m64(phb, r) &&
2613 (type & IORESOURCE_PREFETCH) &&
2614 phb->ioda.m64_segsize)
2615 w = &hose->mem_resources[1];
2616 else if (r->flags & type & IORESOURCE_MEM) {
2617 w = &hose->mem_resources[0];
2618 msi_region = true;
2619 }
2620
2621 r->start = w->start;
2622 r->end = w->end;
2623
2624 /* The 64KB 32-bits MSI region shouldn't be included in
2625 * the 32-bits bridge window. Otherwise, we can see strange
2626 * issues. One of them is EEH error observed on Garrison.
2627 *
2628 * Exclude top 1MB region which is the minimal alignment of
2629 * 32-bits bridge window.
2630 */
2631 if (msi_region) {
2632 r->end += 0x10000;
2633 r->end -= 0x100000;
2634 }
2635 }
2636 }
2637
pnv_pci_configure_bus(struct pci_bus * bus)2638 static void pnv_pci_configure_bus(struct pci_bus *bus)
2639 {
2640 struct pci_dev *bridge = bus->self;
2641 struct pnv_ioda_pe *pe;
2642 bool all = (bridge && pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE);
2643
2644 dev_info(&bus->dev, "Configuring PE for bus\n");
2645
2646 /* Don't assign PE to PCI bus, which doesn't have subordinate devices */
2647 if (WARN_ON(list_empty(&bus->devices)))
2648 return;
2649
2650 /* Reserve PEs according to used M64 resources */
2651 pnv_ioda_reserve_m64_pe(bus, NULL, all);
2652
2653 /*
2654 * Assign PE. We might run here because of partial hotplug.
2655 * For the case, we just pick up the existing PE and should
2656 * not allocate resources again.
2657 */
2658 pe = pnv_ioda_setup_bus_PE(bus, all);
2659 if (!pe)
2660 return;
2661
2662 pnv_ioda_setup_pe_seg(pe);
2663 }
2664
pnv_pci_default_alignment(void)2665 static resource_size_t pnv_pci_default_alignment(void)
2666 {
2667 return PAGE_SIZE;
2668 }
2669
2670 /* Prevent enabling devices for which we couldn't properly
2671 * assign a PE
2672 */
pnv_pci_enable_device_hook(struct pci_dev * dev)2673 static bool pnv_pci_enable_device_hook(struct pci_dev *dev)
2674 {
2675 struct pci_dn *pdn;
2676
2677 pdn = pci_get_pdn(dev);
2678 if (!pdn || pdn->pe_number == IODA_INVALID_PE) {
2679 pci_err(dev, "pci_enable_device() blocked, no PE assigned.\n");
2680 return false;
2681 }
2682
2683 return true;
2684 }
2685
pnv_ocapi_enable_device_hook(struct pci_dev * dev)2686 static bool pnv_ocapi_enable_device_hook(struct pci_dev *dev)
2687 {
2688 struct pci_dn *pdn;
2689 struct pnv_ioda_pe *pe;
2690
2691 pdn = pci_get_pdn(dev);
2692 if (!pdn)
2693 return false;
2694
2695 if (pdn->pe_number == IODA_INVALID_PE) {
2696 pe = pnv_ioda_setup_dev_PE(dev);
2697 if (!pe)
2698 return false;
2699 }
2700 return true;
2701 }
2702
pnv_pci_ioda1_unset_window(struct iommu_table_group * table_group,int num)2703 static long pnv_pci_ioda1_unset_window(struct iommu_table_group *table_group,
2704 int num)
2705 {
2706 struct pnv_ioda_pe *pe = container_of(table_group,
2707 struct pnv_ioda_pe, table_group);
2708 struct pnv_phb *phb = pe->phb;
2709 unsigned int idx;
2710 long rc;
2711
2712 pe_info(pe, "Removing DMA window #%d\n", num);
2713 for (idx = 0; idx < phb->ioda.dma32_count; idx++) {
2714 if (phb->ioda.dma32_segmap[idx] != pe->pe_number)
2715 continue;
2716
2717 rc = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
2718 idx, 0, 0ul, 0ul, 0ul);
2719 if (rc != OPAL_SUCCESS) {
2720 pe_warn(pe, "Failure %ld unmapping DMA32 segment#%d\n",
2721 rc, idx);
2722 return rc;
2723 }
2724
2725 phb->ioda.dma32_segmap[idx] = IODA_INVALID_PE;
2726 }
2727
2728 pnv_pci_unlink_table_and_group(table_group->tables[num], table_group);
2729 return OPAL_SUCCESS;
2730 }
2731
pnv_pci_ioda1_release_pe_dma(struct pnv_ioda_pe * pe)2732 static void pnv_pci_ioda1_release_pe_dma(struct pnv_ioda_pe *pe)
2733 {
2734 struct iommu_table *tbl = pe->table_group.tables[0];
2735 int64_t rc;
2736
2737 if (!pe->dma_setup_done)
2738 return;
2739
2740 rc = pnv_pci_ioda1_unset_window(&pe->table_group, 0);
2741 if (rc != OPAL_SUCCESS)
2742 return;
2743
2744 pnv_pci_p7ioc_tce_invalidate(tbl, tbl->it_offset, tbl->it_size, false);
2745 if (pe->table_group.group) {
2746 iommu_group_put(pe->table_group.group);
2747 WARN_ON(pe->table_group.group);
2748 }
2749
2750 free_pages(tbl->it_base, get_order(tbl->it_size << 3));
2751 iommu_tce_table_put(tbl);
2752 }
2753
pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe * pe)2754 void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe)
2755 {
2756 struct iommu_table *tbl = pe->table_group.tables[0];
2757 int64_t rc;
2758
2759 if (!pe->dma_setup_done)
2760 return;
2761
2762 rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0);
2763 if (rc)
2764 pe_warn(pe, "OPAL error %lld release DMA window\n", rc);
2765
2766 pnv_pci_ioda2_set_bypass(pe, false);
2767 if (pe->table_group.group) {
2768 iommu_group_put(pe->table_group.group);
2769 WARN_ON(pe->table_group.group);
2770 }
2771
2772 iommu_tce_table_put(tbl);
2773 }
2774
pnv_ioda_free_pe_seg(struct pnv_ioda_pe * pe,unsigned short win,unsigned int * map)2775 static void pnv_ioda_free_pe_seg(struct pnv_ioda_pe *pe,
2776 unsigned short win,
2777 unsigned int *map)
2778 {
2779 struct pnv_phb *phb = pe->phb;
2780 int idx;
2781 int64_t rc;
2782
2783 for (idx = 0; idx < phb->ioda.total_pe_num; idx++) {
2784 if (map[idx] != pe->pe_number)
2785 continue;
2786
2787 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
2788 phb->ioda.reserved_pe_idx, win, 0, idx);
2789
2790 if (rc != OPAL_SUCCESS)
2791 pe_warn(pe, "Error %lld unmapping (%d) segment#%d\n",
2792 rc, win, idx);
2793
2794 map[idx] = IODA_INVALID_PE;
2795 }
2796 }
2797
pnv_ioda_release_pe_seg(struct pnv_ioda_pe * pe)2798 static void pnv_ioda_release_pe_seg(struct pnv_ioda_pe *pe)
2799 {
2800 struct pnv_phb *phb = pe->phb;
2801
2802 if (phb->type == PNV_PHB_IODA1) {
2803 pnv_ioda_free_pe_seg(pe, OPAL_IO_WINDOW_TYPE,
2804 phb->ioda.io_segmap);
2805 pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE,
2806 phb->ioda.m32_segmap);
2807 /* M64 is pre-configured by pnv_ioda1_init_m64() */
2808 } else if (phb->type == PNV_PHB_IODA2) {
2809 pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE,
2810 phb->ioda.m32_segmap);
2811 }
2812 }
2813
pnv_ioda_release_pe(struct pnv_ioda_pe * pe)2814 static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe)
2815 {
2816 struct pnv_phb *phb = pe->phb;
2817 struct pnv_ioda_pe *slave, *tmp;
2818
2819 pe_info(pe, "Releasing PE\n");
2820
2821 mutex_lock(&phb->ioda.pe_list_mutex);
2822 list_del(&pe->list);
2823 mutex_unlock(&phb->ioda.pe_list_mutex);
2824
2825 switch (phb->type) {
2826 case PNV_PHB_IODA1:
2827 pnv_pci_ioda1_release_pe_dma(pe);
2828 break;
2829 case PNV_PHB_IODA2:
2830 pnv_pci_ioda2_release_pe_dma(pe);
2831 break;
2832 case PNV_PHB_NPU_OCAPI:
2833 break;
2834 default:
2835 WARN_ON(1);
2836 }
2837
2838 pnv_ioda_release_pe_seg(pe);
2839 pnv_ioda_deconfigure_pe(pe->phb, pe);
2840
2841 /* Release slave PEs in the compound PE */
2842 if (pe->flags & PNV_IODA_PE_MASTER) {
2843 list_for_each_entry_safe(slave, tmp, &pe->slaves, list) {
2844 list_del(&slave->list);
2845 pnv_ioda_free_pe(slave);
2846 }
2847 }
2848
2849 /*
2850 * The PE for root bus can be removed because of hotplug in EEH
2851 * recovery for fenced PHB error. We need to mark the PE dead so
2852 * that it can be populated again in PCI hot add path. The PE
2853 * shouldn't be destroyed as it's the global reserved resource.
2854 */
2855 if (phb->ioda.root_pe_idx == pe->pe_number)
2856 return;
2857
2858 pnv_ioda_free_pe(pe);
2859 }
2860
pnv_pci_release_device(struct pci_dev * pdev)2861 static void pnv_pci_release_device(struct pci_dev *pdev)
2862 {
2863 struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus);
2864 struct pci_dn *pdn = pci_get_pdn(pdev);
2865 struct pnv_ioda_pe *pe;
2866
2867 /* The VF PE state is torn down when sriov_disable() is called */
2868 if (pdev->is_virtfn)
2869 return;
2870
2871 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
2872 return;
2873
2874 #ifdef CONFIG_PCI_IOV
2875 /*
2876 * FIXME: Try move this to sriov_disable(). It's here since we allocate
2877 * the iov state at probe time since we need to fiddle with the IOV
2878 * resources.
2879 */
2880 if (pdev->is_physfn)
2881 kfree(pdev->dev.archdata.iov_data);
2882 #endif
2883
2884 /*
2885 * PCI hotplug can happen as part of EEH error recovery. The @pdn
2886 * isn't removed and added afterwards in this scenario. We should
2887 * set the PE number in @pdn to an invalid one. Otherwise, the PE's
2888 * device count is decreased on removing devices while failing to
2889 * be increased on adding devices. It leads to unbalanced PE's device
2890 * count and eventually make normal PCI hotplug path broken.
2891 */
2892 pe = &phb->ioda.pe_array[pdn->pe_number];
2893 pdn->pe_number = IODA_INVALID_PE;
2894
2895 WARN_ON(--pe->device_count < 0);
2896 if (pe->device_count == 0)
2897 pnv_ioda_release_pe(pe);
2898 }
2899
pnv_pci_ioda_shutdown(struct pci_controller * hose)2900 static void pnv_pci_ioda_shutdown(struct pci_controller *hose)
2901 {
2902 struct pnv_phb *phb = hose->private_data;
2903
2904 opal_pci_reset(phb->opal_id, OPAL_RESET_PCI_IODA_TABLE,
2905 OPAL_ASSERT_RESET);
2906 }
2907
pnv_pci_ioda_dma_bus_setup(struct pci_bus * bus)2908 static void pnv_pci_ioda_dma_bus_setup(struct pci_bus *bus)
2909 {
2910 struct pnv_phb *phb = pci_bus_to_pnvhb(bus);
2911 struct pnv_ioda_pe *pe;
2912
2913 list_for_each_entry(pe, &phb->ioda.pe_list, list) {
2914 if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)))
2915 continue;
2916
2917 if (!pe->pbus)
2918 continue;
2919
2920 if (bus->number == ((pe->rid >> 8) & 0xFF)) {
2921 pe->pbus = bus;
2922 break;
2923 }
2924 }
2925 }
2926
2927 static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
2928 .dma_dev_setup = pnv_pci_ioda_dma_dev_setup,
2929 .dma_bus_setup = pnv_pci_ioda_dma_bus_setup,
2930 .iommu_bypass_supported = pnv_pci_ioda_iommu_bypass_supported,
2931 .enable_device_hook = pnv_pci_enable_device_hook,
2932 .release_device = pnv_pci_release_device,
2933 .window_alignment = pnv_pci_window_alignment,
2934 .setup_bridge = pnv_pci_fixup_bridge_resources,
2935 .reset_secondary_bus = pnv_pci_reset_secondary_bus,
2936 .shutdown = pnv_pci_ioda_shutdown,
2937 };
2938
2939 static const struct pci_controller_ops pnv_npu_ocapi_ioda_controller_ops = {
2940 .enable_device_hook = pnv_ocapi_enable_device_hook,
2941 .release_device = pnv_pci_release_device,
2942 .window_alignment = pnv_pci_window_alignment,
2943 .reset_secondary_bus = pnv_pci_reset_secondary_bus,
2944 .shutdown = pnv_pci_ioda_shutdown,
2945 };
2946
pnv_pci_init_ioda_phb(struct device_node * np,u64 hub_id,int ioda_type)2947 static void __init pnv_pci_init_ioda_phb(struct device_node *np,
2948 u64 hub_id, int ioda_type)
2949 {
2950 struct pci_controller *hose;
2951 struct pnv_phb *phb;
2952 unsigned long size, m64map_off, m32map_off, pemap_off;
2953 unsigned long iomap_off = 0, dma32map_off = 0;
2954 struct pnv_ioda_pe *root_pe;
2955 struct resource r;
2956 const __be64 *prop64;
2957 const __be32 *prop32;
2958 int len;
2959 unsigned int segno;
2960 u64 phb_id;
2961 void *aux;
2962 long rc;
2963
2964 if (!of_device_is_available(np))
2965 return;
2966
2967 pr_info("Initializing %s PHB (%pOF)\n", pnv_phb_names[ioda_type], np);
2968
2969 prop64 = of_get_property(np, "ibm,opal-phbid", NULL);
2970 if (!prop64) {
2971 pr_err(" Missing \"ibm,opal-phbid\" property !\n");
2972 return;
2973 }
2974 phb_id = be64_to_cpup(prop64);
2975 pr_debug(" PHB-ID : 0x%016llx\n", phb_id);
2976
2977 phb = kzalloc(sizeof(*phb), GFP_KERNEL);
2978 if (!phb)
2979 panic("%s: Failed to allocate %zu bytes\n", __func__,
2980 sizeof(*phb));
2981
2982 /* Allocate PCI controller */
2983 phb->hose = hose = pcibios_alloc_controller(np);
2984 if (!phb->hose) {
2985 pr_err(" Can't allocate PCI controller for %pOF\n",
2986 np);
2987 memblock_free(__pa(phb), sizeof(struct pnv_phb));
2988 return;
2989 }
2990
2991 spin_lock_init(&phb->lock);
2992 prop32 = of_get_property(np, "bus-range", &len);
2993 if (prop32 && len == 8) {
2994 hose->first_busno = be32_to_cpu(prop32[0]);
2995 hose->last_busno = be32_to_cpu(prop32[1]);
2996 } else {
2997 pr_warn(" Broken <bus-range> on %pOF\n", np);
2998 hose->first_busno = 0;
2999 hose->last_busno = 0xff;
3000 }
3001 hose->private_data = phb;
3002 phb->hub_id = hub_id;
3003 phb->opal_id = phb_id;
3004 phb->type = ioda_type;
3005 mutex_init(&phb->ioda.pe_alloc_mutex);
3006
3007 /* Detect specific models for error handling */
3008 if (of_device_is_compatible(np, "ibm,p7ioc-pciex"))
3009 phb->model = PNV_PHB_MODEL_P7IOC;
3010 else if (of_device_is_compatible(np, "ibm,power8-pciex"))
3011 phb->model = PNV_PHB_MODEL_PHB3;
3012 else
3013 phb->model = PNV_PHB_MODEL_UNKNOWN;
3014
3015 /* Initialize diagnostic data buffer */
3016 prop32 = of_get_property(np, "ibm,phb-diag-data-size", NULL);
3017 if (prop32)
3018 phb->diag_data_size = be32_to_cpup(prop32);
3019 else
3020 phb->diag_data_size = PNV_PCI_DIAG_BUF_SIZE;
3021
3022 phb->diag_data = kzalloc(phb->diag_data_size, GFP_KERNEL);
3023 if (!phb->diag_data)
3024 panic("%s: Failed to allocate %u bytes\n", __func__,
3025 phb->diag_data_size);
3026
3027 /* Parse 32-bit and IO ranges (if any) */
3028 pci_process_bridge_OF_ranges(hose, np, !hose->global_number);
3029
3030 /* Get registers */
3031 if (!of_address_to_resource(np, 0, &r)) {
3032 phb->regs_phys = r.start;
3033 phb->regs = ioremap(r.start, resource_size(&r));
3034 if (phb->regs == NULL)
3035 pr_err(" Failed to map registers !\n");
3036 }
3037
3038 /* Initialize more IODA stuff */
3039 phb->ioda.total_pe_num = 1;
3040 prop32 = of_get_property(np, "ibm,opal-num-pes", NULL);
3041 if (prop32)
3042 phb->ioda.total_pe_num = be32_to_cpup(prop32);
3043 prop32 = of_get_property(np, "ibm,opal-reserved-pe", NULL);
3044 if (prop32)
3045 phb->ioda.reserved_pe_idx = be32_to_cpup(prop32);
3046
3047 /* Invalidate RID to PE# mapping */
3048 for (segno = 0; segno < ARRAY_SIZE(phb->ioda.pe_rmap); segno++)
3049 phb->ioda.pe_rmap[segno] = IODA_INVALID_PE;
3050
3051 /* Parse 64-bit MMIO range */
3052 pnv_ioda_parse_m64_window(phb);
3053
3054 phb->ioda.m32_size = resource_size(&hose->mem_resources[0]);
3055 /* FW Has already off top 64k of M32 space (MSI space) */
3056 phb->ioda.m32_size += 0x10000;
3057
3058 phb->ioda.m32_segsize = phb->ioda.m32_size / phb->ioda.total_pe_num;
3059 phb->ioda.m32_pci_base = hose->mem_resources[0].start - hose->mem_offset[0];
3060 phb->ioda.io_size = hose->pci_io_size;
3061 phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe_num;
3062 phb->ioda.io_pci_base = 0; /* XXX calculate this ? */
3063
3064 /* Calculate how many 32-bit TCE segments we have */
3065 phb->ioda.dma32_count = phb->ioda.m32_pci_base /
3066 PNV_IODA1_DMA32_SEGSIZE;
3067
3068 /* Allocate aux data & arrays. We don't have IO ports on PHB3 */
3069 size = ALIGN(max_t(unsigned, phb->ioda.total_pe_num, 8) / 8,
3070 sizeof(unsigned long));
3071 m64map_off = size;
3072 size += phb->ioda.total_pe_num * sizeof(phb->ioda.m64_segmap[0]);
3073 m32map_off = size;
3074 size += phb->ioda.total_pe_num * sizeof(phb->ioda.m32_segmap[0]);
3075 if (phb->type == PNV_PHB_IODA1) {
3076 iomap_off = size;
3077 size += phb->ioda.total_pe_num * sizeof(phb->ioda.io_segmap[0]);
3078 dma32map_off = size;
3079 size += phb->ioda.dma32_count *
3080 sizeof(phb->ioda.dma32_segmap[0]);
3081 }
3082 pemap_off = size;
3083 size += phb->ioda.total_pe_num * sizeof(struct pnv_ioda_pe);
3084 aux = kzalloc(size, GFP_KERNEL);
3085 if (!aux)
3086 panic("%s: Failed to allocate %lu bytes\n", __func__, size);
3087
3088 phb->ioda.pe_alloc = aux;
3089 phb->ioda.m64_segmap = aux + m64map_off;
3090 phb->ioda.m32_segmap = aux + m32map_off;
3091 for (segno = 0; segno < phb->ioda.total_pe_num; segno++) {
3092 phb->ioda.m64_segmap[segno] = IODA_INVALID_PE;
3093 phb->ioda.m32_segmap[segno] = IODA_INVALID_PE;
3094 }
3095 if (phb->type == PNV_PHB_IODA1) {
3096 phb->ioda.io_segmap = aux + iomap_off;
3097 for (segno = 0; segno < phb->ioda.total_pe_num; segno++)
3098 phb->ioda.io_segmap[segno] = IODA_INVALID_PE;
3099
3100 phb->ioda.dma32_segmap = aux + dma32map_off;
3101 for (segno = 0; segno < phb->ioda.dma32_count; segno++)
3102 phb->ioda.dma32_segmap[segno] = IODA_INVALID_PE;
3103 }
3104 phb->ioda.pe_array = aux + pemap_off;
3105
3106 /*
3107 * Choose PE number for root bus, which shouldn't have
3108 * M64 resources consumed by its child devices. To pick
3109 * the PE number adjacent to the reserved one if possible.
3110 */
3111 pnv_ioda_reserve_pe(phb, phb->ioda.reserved_pe_idx);
3112 if (phb->ioda.reserved_pe_idx == 0) {
3113 phb->ioda.root_pe_idx = 1;
3114 pnv_ioda_reserve_pe(phb, phb->ioda.root_pe_idx);
3115 } else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1)) {
3116 phb->ioda.root_pe_idx = phb->ioda.reserved_pe_idx - 1;
3117 pnv_ioda_reserve_pe(phb, phb->ioda.root_pe_idx);
3118 } else {
3119 /* otherwise just allocate one */
3120 root_pe = pnv_ioda_alloc_pe(phb, 1);
3121 phb->ioda.root_pe_idx = root_pe->pe_number;
3122 }
3123
3124 INIT_LIST_HEAD(&phb->ioda.pe_list);
3125 mutex_init(&phb->ioda.pe_list_mutex);
3126
3127 /* Calculate how many 32-bit TCE segments we have */
3128 phb->ioda.dma32_count = phb->ioda.m32_pci_base /
3129 PNV_IODA1_DMA32_SEGSIZE;
3130
3131 #if 0 /* We should really do that ... */
3132 rc = opal_pci_set_phb_mem_window(opal->phb_id,
3133 window_type,
3134 window_num,
3135 starting_real_address,
3136 starting_pci_address,
3137 segment_size);
3138 #endif
3139
3140 pr_info(" %03d (%03d) PE's M32: 0x%x [segment=0x%x]\n",
3141 phb->ioda.total_pe_num, phb->ioda.reserved_pe_idx,
3142 phb->ioda.m32_size, phb->ioda.m32_segsize);
3143 if (phb->ioda.m64_size)
3144 pr_info(" M64: 0x%lx [segment=0x%lx]\n",
3145 phb->ioda.m64_size, phb->ioda.m64_segsize);
3146 if (phb->ioda.io_size)
3147 pr_info(" IO: 0x%x [segment=0x%x]\n",
3148 phb->ioda.io_size, phb->ioda.io_segsize);
3149
3150
3151 phb->hose->ops = &pnv_pci_ops;
3152 phb->get_pe_state = pnv_ioda_get_pe_state;
3153 phb->freeze_pe = pnv_ioda_freeze_pe;
3154 phb->unfreeze_pe = pnv_ioda_unfreeze_pe;
3155
3156 /* Setup MSI support */
3157 pnv_pci_init_ioda_msis(phb);
3158
3159 /*
3160 * We pass the PCI probe flag PCI_REASSIGN_ALL_RSRC here
3161 * to let the PCI core do resource assignment. It's supposed
3162 * that the PCI core will do correct I/O and MMIO alignment
3163 * for the P2P bridge bars so that each PCI bus (excluding
3164 * the child P2P bridges) can form individual PE.
3165 */
3166 ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
3167
3168 switch (phb->type) {
3169 case PNV_PHB_NPU_OCAPI:
3170 hose->controller_ops = pnv_npu_ocapi_ioda_controller_ops;
3171 break;
3172 default:
3173 hose->controller_ops = pnv_pci_ioda_controller_ops;
3174 }
3175
3176 ppc_md.pcibios_default_alignment = pnv_pci_default_alignment;
3177
3178 #ifdef CONFIG_PCI_IOV
3179 ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov;
3180 ppc_md.pcibios_iov_resource_alignment = pnv_pci_iov_resource_alignment;
3181 ppc_md.pcibios_sriov_enable = pnv_pcibios_sriov_enable;
3182 ppc_md.pcibios_sriov_disable = pnv_pcibios_sriov_disable;
3183 #endif
3184
3185 pci_add_flags(PCI_REASSIGN_ALL_RSRC);
3186
3187 /* Reset IODA tables to a clean state */
3188 rc = opal_pci_reset(phb_id, OPAL_RESET_PCI_IODA_TABLE, OPAL_ASSERT_RESET);
3189 if (rc)
3190 pr_warn(" OPAL Error %ld performing IODA table reset !\n", rc);
3191
3192 /*
3193 * If we're running in kdump kernel, the previous kernel never
3194 * shutdown PCI devices correctly. We already got IODA table
3195 * cleaned out. So we have to issue PHB reset to stop all PCI
3196 * transactions from previous kernel. The ppc_pci_reset_phbs
3197 * kernel parameter will force this reset too. Additionally,
3198 * if the IODA reset above failed then use a bigger hammer.
3199 * This can happen if we get a PHB fatal error in very early
3200 * boot.
3201 */
3202 if (is_kdump_kernel() || pci_reset_phbs || rc) {
3203 pr_info(" Issue PHB reset ...\n");
3204 pnv_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL);
3205 pnv_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE);
3206 }
3207
3208 /* Remove M64 resource if we can't configure it successfully */
3209 if (!phb->init_m64 || phb->init_m64(phb))
3210 hose->mem_resources[1].flags = 0;
3211
3212 /* create pci_dn's for DT nodes under this PHB */
3213 pci_devs_phb_init_dynamic(hose);
3214 }
3215
pnv_pci_init_ioda2_phb(struct device_node * np)3216 void __init pnv_pci_init_ioda2_phb(struct device_node *np)
3217 {
3218 pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2);
3219 }
3220
pnv_pci_init_npu2_opencapi_phb(struct device_node * np)3221 void __init pnv_pci_init_npu2_opencapi_phb(struct device_node *np)
3222 {
3223 pnv_pci_init_ioda_phb(np, 0, PNV_PHB_NPU_OCAPI);
3224 }
3225
pnv_npu2_opencapi_cfg_size_fixup(struct pci_dev * dev)3226 static void pnv_npu2_opencapi_cfg_size_fixup(struct pci_dev *dev)
3227 {
3228 struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus);
3229
3230 if (!machine_is(powernv))
3231 return;
3232
3233 if (phb->type == PNV_PHB_NPU_OCAPI)
3234 dev->cfg_size = PCI_CFG_SPACE_EXP_SIZE;
3235 }
3236 DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, pnv_npu2_opencapi_cfg_size_fixup);
3237
pnv_pci_init_ioda_hub(struct device_node * np)3238 void __init pnv_pci_init_ioda_hub(struct device_node *np)
3239 {
3240 struct device_node *phbn;
3241 const __be64 *prop64;
3242 u64 hub_id;
3243
3244 pr_info("Probing IODA IO-Hub %pOF\n", np);
3245
3246 prop64 = of_get_property(np, "ibm,opal-hubid", NULL);
3247 if (!prop64) {
3248 pr_err(" Missing \"ibm,opal-hubid\" property !\n");
3249 return;
3250 }
3251 hub_id = be64_to_cpup(prop64);
3252 pr_devel(" HUB-ID : 0x%016llx\n", hub_id);
3253
3254 /* Count child PHBs */
3255 for_each_child_of_node(np, phbn) {
3256 /* Look for IODA1 PHBs */
3257 if (of_device_is_compatible(phbn, "ibm,ioda-phb"))
3258 pnv_pci_init_ioda_phb(phbn, hub_id, PNV_PHB_IODA1);
3259 }
3260 }
3261