1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Synopsys DesignWare PCIe host controller driver
4 *
5 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
6 * https://www.samsung.com
7 *
8 * Author: Jingoo Han <jg1.han@samsung.com>
9 */
10
11 #include <linux/iopoll.h>
12 #include <linux/irqchip/chained_irq.h>
13 #include <linux/irqdomain.h>
14 #include <linux/msi.h>
15 #include <linux/of_address.h>
16 #include <linux/of_pci.h>
17 #include <linux/pci_regs.h>
18 #include <linux/platform_device.h>
19
20 #include "../../pci.h"
21 #include "pcie-designware.h"
22
23 static struct pci_ops dw_pcie_ops;
24 static struct pci_ops dw_child_pcie_ops;
25
dw_msi_ack_irq(struct irq_data * d)26 static void dw_msi_ack_irq(struct irq_data *d)
27 {
28 irq_chip_ack_parent(d);
29 }
30
dw_msi_mask_irq(struct irq_data * d)31 static void dw_msi_mask_irq(struct irq_data *d)
32 {
33 pci_msi_mask_irq(d);
34 irq_chip_mask_parent(d);
35 }
36
dw_msi_unmask_irq(struct irq_data * d)37 static void dw_msi_unmask_irq(struct irq_data *d)
38 {
39 pci_msi_unmask_irq(d);
40 irq_chip_unmask_parent(d);
41 }
42
43 static struct irq_chip dw_pcie_msi_irq_chip = {
44 .name = "PCI-MSI",
45 .irq_ack = dw_msi_ack_irq,
46 .irq_mask = dw_msi_mask_irq,
47 .irq_unmask = dw_msi_unmask_irq,
48 };
49
50 static struct msi_domain_info dw_pcie_msi_domain_info = {
51 .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
52 MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX |
53 MSI_FLAG_MULTI_PCI_MSI,
54 .chip = &dw_pcie_msi_irq_chip,
55 };
56
57 /* MSI int handler */
dw_handle_msi_irq(struct dw_pcie_rp * pp)58 irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
59 {
60 int i, pos;
61 unsigned long val;
62 u32 status, num_ctrls;
63 irqreturn_t ret = IRQ_NONE;
64 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
65
66 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
67
68 for (i = 0; i < num_ctrls; i++) {
69 status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
70 (i * MSI_REG_CTRL_BLOCK_SIZE));
71 if (!status)
72 continue;
73
74 ret = IRQ_HANDLED;
75 val = status;
76 pos = 0;
77 while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL,
78 pos)) != MAX_MSI_IRQS_PER_CTRL) {
79 generic_handle_domain_irq(pp->irq_domain,
80 (i * MAX_MSI_IRQS_PER_CTRL) +
81 pos);
82 pos++;
83 }
84 }
85
86 return ret;
87 }
88 EXPORT_SYMBOL_GPL(dw_handle_msi_irq);
89
90 /* Chained MSI interrupt service routine */
dw_chained_msi_isr(struct irq_desc * desc)91 static void dw_chained_msi_isr(struct irq_desc *desc)
92 {
93 struct irq_chip *chip = irq_desc_get_chip(desc);
94 struct dw_pcie_rp *pp;
95
96 chained_irq_enter(chip, desc);
97
98 pp = irq_desc_get_handler_data(desc);
99 dw_handle_msi_irq(pp);
100
101 chained_irq_exit(chip, desc);
102 }
103
dw_pci_setup_msi_msg(struct irq_data * d,struct msi_msg * msg)104 static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
105 {
106 struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
107 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
108 u64 msi_target;
109
110 msi_target = (u64)pp->msi_data;
111
112 msg->address_lo = lower_32_bits(msi_target);
113 msg->address_hi = upper_32_bits(msi_target);
114
115 msg->data = d->hwirq;
116
117 dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
118 (int)d->hwirq, msg->address_hi, msg->address_lo);
119 }
120
dw_pci_bottom_mask(struct irq_data * d)121 static void dw_pci_bottom_mask(struct irq_data *d)
122 {
123 struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
124 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
125 unsigned int res, bit, ctrl;
126 unsigned long flags;
127
128 raw_spin_lock_irqsave(&pp->lock, flags);
129
130 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
131 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
132 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
133
134 pp->irq_mask[ctrl] |= BIT(bit);
135 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
136
137 raw_spin_unlock_irqrestore(&pp->lock, flags);
138 }
139
dw_pci_bottom_unmask(struct irq_data * d)140 static void dw_pci_bottom_unmask(struct irq_data *d)
141 {
142 struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
143 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
144 unsigned int res, bit, ctrl;
145 unsigned long flags;
146
147 raw_spin_lock_irqsave(&pp->lock, flags);
148
149 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
150 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
151 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
152
153 pp->irq_mask[ctrl] &= ~BIT(bit);
154 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
155
156 raw_spin_unlock_irqrestore(&pp->lock, flags);
157 }
158
dw_pci_bottom_ack(struct irq_data * d)159 static void dw_pci_bottom_ack(struct irq_data *d)
160 {
161 struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
162 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
163 unsigned int res, bit, ctrl;
164
165 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
166 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
167 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
168
169 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit));
170 }
171
172 static struct irq_chip dw_pci_msi_bottom_irq_chip = {
173 .name = "DWPCI-MSI",
174 .irq_ack = dw_pci_bottom_ack,
175 .irq_compose_msi_msg = dw_pci_setup_msi_msg,
176 .irq_mask = dw_pci_bottom_mask,
177 .irq_unmask = dw_pci_bottom_unmask,
178 };
179
dw_pcie_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)180 static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
181 unsigned int virq, unsigned int nr_irqs,
182 void *args)
183 {
184 struct dw_pcie_rp *pp = domain->host_data;
185 unsigned long flags;
186 u32 i;
187 int bit;
188
189 raw_spin_lock_irqsave(&pp->lock, flags);
190
191 bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors,
192 order_base_2(nr_irqs));
193
194 raw_spin_unlock_irqrestore(&pp->lock, flags);
195
196 if (bit < 0)
197 return -ENOSPC;
198
199 for (i = 0; i < nr_irqs; i++)
200 irq_domain_set_info(domain, virq + i, bit + i,
201 pp->msi_irq_chip,
202 pp, handle_edge_irq,
203 NULL, NULL);
204
205 return 0;
206 }
207
dw_pcie_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)208 static void dw_pcie_irq_domain_free(struct irq_domain *domain,
209 unsigned int virq, unsigned int nr_irqs)
210 {
211 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
212 struct dw_pcie_rp *pp = domain->host_data;
213 unsigned long flags;
214
215 raw_spin_lock_irqsave(&pp->lock, flags);
216
217 bitmap_release_region(pp->msi_irq_in_use, d->hwirq,
218 order_base_2(nr_irqs));
219
220 raw_spin_unlock_irqrestore(&pp->lock, flags);
221 }
222
223 static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
224 .alloc = dw_pcie_irq_domain_alloc,
225 .free = dw_pcie_irq_domain_free,
226 };
227
dw_pcie_allocate_domains(struct dw_pcie_rp * pp)228 int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
229 {
230 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
231 struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
232
233 pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
234 &dw_pcie_msi_domain_ops, pp);
235 if (!pp->irq_domain) {
236 dev_err(pci->dev, "Failed to create IRQ domain\n");
237 return -ENOMEM;
238 }
239
240 irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS);
241
242 pp->msi_domain = pci_msi_create_irq_domain(fwnode,
243 &dw_pcie_msi_domain_info,
244 pp->irq_domain);
245 if (!pp->msi_domain) {
246 dev_err(pci->dev, "Failed to create MSI domain\n");
247 irq_domain_remove(pp->irq_domain);
248 return -ENOMEM;
249 }
250
251 return 0;
252 }
253
dw_pcie_free_msi(struct dw_pcie_rp * pp)254 static void dw_pcie_free_msi(struct dw_pcie_rp *pp)
255 {
256 u32 ctrl;
257
258 for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
259 if (pp->msi_irq[ctrl] > 0)
260 irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
261 NULL, NULL);
262 }
263
264 irq_domain_remove(pp->msi_domain);
265 irq_domain_remove(pp->irq_domain);
266 }
267
dw_pcie_msi_init(struct dw_pcie_rp * pp)268 static void dw_pcie_msi_init(struct dw_pcie_rp *pp)
269 {
270 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
271 u64 msi_target = (u64)pp->msi_data;
272
273 if (!pci_msi_enabled() || !pp->has_msi_ctrl)
274 return;
275
276 /* Program the msi_data */
277 dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target));
278 dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
279 }
280
dw_pcie_parse_split_msi_irq(struct dw_pcie_rp * pp)281 static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp)
282 {
283 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
284 struct device *dev = pci->dev;
285 struct platform_device *pdev = to_platform_device(dev);
286 u32 ctrl, max_vectors;
287 int irq;
288
289 /* Parse any "msiX" IRQs described in the devicetree */
290 for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
291 char msi_name[] = "msiX";
292
293 msi_name[3] = '0' + ctrl;
294 irq = platform_get_irq_byname_optional(pdev, msi_name);
295 if (irq == -ENXIO)
296 break;
297 if (irq < 0)
298 return dev_err_probe(dev, irq,
299 "Failed to parse MSI IRQ '%s'\n",
300 msi_name);
301
302 pp->msi_irq[ctrl] = irq;
303 }
304
305 /* If no "msiX" IRQs, caller should fallback to "msi" IRQ */
306 if (ctrl == 0)
307 return -ENXIO;
308
309 max_vectors = ctrl * MAX_MSI_IRQS_PER_CTRL;
310 if (pp->num_vectors > max_vectors) {
311 dev_warn(dev, "Exceeding number of MSI vectors, limiting to %u\n",
312 max_vectors);
313 pp->num_vectors = max_vectors;
314 }
315 if (!pp->num_vectors)
316 pp->num_vectors = max_vectors;
317
318 return 0;
319 }
320
dw_pcie_msi_host_init(struct dw_pcie_rp * pp)321 static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
322 {
323 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
324 struct device *dev = pci->dev;
325 struct platform_device *pdev = to_platform_device(dev);
326 u64 *msi_vaddr = NULL;
327 int ret;
328 u32 ctrl, num_ctrls;
329
330 for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++)
331 pp->irq_mask[ctrl] = ~0;
332
333 if (!pp->msi_irq[0]) {
334 ret = dw_pcie_parse_split_msi_irq(pp);
335 if (ret < 0 && ret != -ENXIO)
336 return ret;
337 }
338
339 if (!pp->num_vectors)
340 pp->num_vectors = MSI_DEF_NUM_VECTORS;
341 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
342
343 if (!pp->msi_irq[0]) {
344 pp->msi_irq[0] = platform_get_irq_byname_optional(pdev, "msi");
345 if (pp->msi_irq[0] < 0) {
346 pp->msi_irq[0] = platform_get_irq(pdev, 0);
347 if (pp->msi_irq[0] < 0)
348 return pp->msi_irq[0];
349 }
350 }
351
352 dev_dbg(dev, "Using %d MSI vectors\n", pp->num_vectors);
353
354 pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
355
356 ret = dw_pcie_allocate_domains(pp);
357 if (ret)
358 return ret;
359
360 for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
361 if (pp->msi_irq[ctrl] > 0)
362 irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
363 dw_chained_msi_isr, pp);
364 }
365
366 /*
367 * Even though the iMSI-RX Module supports 64-bit addresses some
368 * peripheral PCIe devices may lack 64-bit message support. In
369 * order not to miss MSI TLPs from those devices the MSI target
370 * address has to be within the lowest 4GB.
371 *
372 * Note until there is a better alternative found the reservation is
373 * done by allocating from the artificially limited DMA-coherent
374 * memory.
375 */
376 ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
377 if (!ret)
378 msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
379 GFP_KERNEL);
380
381 if (!msi_vaddr) {
382 dev_warn(dev, "Failed to allocate 32-bit MSI address\n");
383 dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
384 msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
385 GFP_KERNEL);
386 if (!msi_vaddr) {
387 dev_err(dev, "Failed to allocate MSI address\n");
388 dw_pcie_free_msi(pp);
389 return -ENOMEM;
390 }
391 }
392
393 return 0;
394 }
395
dw_pcie_host_request_msg_tlp_res(struct dw_pcie_rp * pp)396 static void dw_pcie_host_request_msg_tlp_res(struct dw_pcie_rp *pp)
397 {
398 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
399 struct resource_entry *win;
400 struct resource *res;
401
402 win = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM);
403 if (win) {
404 res = devm_kzalloc(pci->dev, sizeof(*res), GFP_KERNEL);
405 if (!res)
406 return;
407
408 /*
409 * Allocate MSG TLP region of size 'region_align' at the end of
410 * the host bridge window.
411 */
412 res->start = win->res->end - pci->region_align + 1;
413 res->end = win->res->end;
414 res->name = "msg";
415 res->flags = win->res->flags | IORESOURCE_BUSY;
416
417 if (!devm_request_resource(pci->dev, win->res, res))
418 pp->msg_res = res;
419 }
420 }
421
dw_pcie_host_init(struct dw_pcie_rp * pp)422 int dw_pcie_host_init(struct dw_pcie_rp *pp)
423 {
424 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
425 struct device *dev = pci->dev;
426 struct device_node *np = dev->of_node;
427 struct platform_device *pdev = to_platform_device(dev);
428 struct resource_entry *win;
429 struct pci_host_bridge *bridge;
430 struct resource *res;
431 int ret;
432
433 raw_spin_lock_init(&pp->lock);
434
435 ret = dw_pcie_get_resources(pci);
436 if (ret)
437 return ret;
438
439 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
440 if (res) {
441 pp->cfg0_size = resource_size(res);
442 pp->cfg0_base = res->start;
443
444 pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
445 if (IS_ERR(pp->va_cfg0_base))
446 return PTR_ERR(pp->va_cfg0_base);
447 } else {
448 dev_err(dev, "Missing *config* reg space\n");
449 return -ENODEV;
450 }
451
452 bridge = devm_pci_alloc_host_bridge(dev, 0);
453 if (!bridge)
454 return -ENOMEM;
455
456 pp->bridge = bridge;
457
458 /* Get the I/O range from DT */
459 win = resource_list_first_type(&bridge->windows, IORESOURCE_IO);
460 if (win) {
461 pp->io_size = resource_size(win->res);
462 pp->io_bus_addr = win->res->start - win->offset;
463 pp->io_base = pci_pio_to_address(win->res->start);
464 }
465
466 /* Set default bus ops */
467 bridge->ops = &dw_pcie_ops;
468 bridge->child_ops = &dw_child_pcie_ops;
469
470 if (pp->ops->init) {
471 ret = pp->ops->init(pp);
472 if (ret)
473 return ret;
474 }
475
476 if (pci_msi_enabled()) {
477 pp->has_msi_ctrl = !(pp->ops->msi_init ||
478 of_property_read_bool(np, "msi-parent") ||
479 of_property_read_bool(np, "msi-map"));
480
481 /*
482 * For the has_msi_ctrl case the default assignment is handled
483 * in the dw_pcie_msi_host_init().
484 */
485 if (!pp->has_msi_ctrl && !pp->num_vectors) {
486 pp->num_vectors = MSI_DEF_NUM_VECTORS;
487 } else if (pp->num_vectors > MAX_MSI_IRQS) {
488 dev_err(dev, "Invalid number of vectors\n");
489 ret = -EINVAL;
490 goto err_deinit_host;
491 }
492
493 if (pp->ops->msi_init) {
494 ret = pp->ops->msi_init(pp);
495 if (ret < 0)
496 goto err_deinit_host;
497 } else if (pp->has_msi_ctrl) {
498 ret = dw_pcie_msi_host_init(pp);
499 if (ret < 0)
500 goto err_deinit_host;
501 }
502 }
503
504 dw_pcie_version_detect(pci);
505
506 dw_pcie_iatu_detect(pci);
507
508 /*
509 * Allocate the resource for MSG TLP before programming the iATU
510 * outbound window in dw_pcie_setup_rc(). Since the allocation depends
511 * on the value of 'region_align', this has to be done after
512 * dw_pcie_iatu_detect().
513 *
514 * Glue drivers need to set 'use_atu_msg' before dw_pcie_host_init() to
515 * make use of the generic MSG TLP implementation.
516 */
517 if (pp->use_atu_msg)
518 dw_pcie_host_request_msg_tlp_res(pp);
519
520 ret = dw_pcie_edma_detect(pci);
521 if (ret)
522 goto err_free_msi;
523
524 ret = dw_pcie_setup_rc(pp);
525 if (ret)
526 goto err_remove_edma;
527
528 if (!dw_pcie_link_up(pci)) {
529 ret = dw_pcie_start_link(pci);
530 if (ret)
531 goto err_remove_edma;
532 }
533
534 /*
535 * Note: Skip the link up delay only when a Link Up IRQ is present.
536 * If there is no Link Up IRQ, we should not bypass the delay
537 * because that would require users to manually rescan for devices.
538 */
539 if (!pp->use_linkup_irq)
540 /* Ignore errors, the link may come up later */
541 dw_pcie_wait_for_link(pci);
542
543 bridge->sysdata = pp;
544
545 ret = pci_host_probe(bridge);
546 if (ret)
547 goto err_stop_link;
548
549 if (pp->ops->post_init)
550 pp->ops->post_init(pp);
551
552 return 0;
553
554 err_stop_link:
555 dw_pcie_stop_link(pci);
556
557 err_remove_edma:
558 dw_pcie_edma_remove(pci);
559
560 err_free_msi:
561 if (pp->has_msi_ctrl)
562 dw_pcie_free_msi(pp);
563
564 err_deinit_host:
565 if (pp->ops->deinit)
566 pp->ops->deinit(pp);
567
568 return ret;
569 }
570 EXPORT_SYMBOL_GPL(dw_pcie_host_init);
571
dw_pcie_host_deinit(struct dw_pcie_rp * pp)572 void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
573 {
574 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
575
576 pci_stop_root_bus(pp->bridge->bus);
577 pci_remove_root_bus(pp->bridge->bus);
578
579 dw_pcie_stop_link(pci);
580
581 dw_pcie_edma_remove(pci);
582
583 if (pp->has_msi_ctrl)
584 dw_pcie_free_msi(pp);
585
586 if (pp->ops->deinit)
587 pp->ops->deinit(pp);
588 }
589 EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
590
dw_pcie_other_conf_map_bus(struct pci_bus * bus,unsigned int devfn,int where)591 static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
592 unsigned int devfn, int where)
593 {
594 struct dw_pcie_rp *pp = bus->sysdata;
595 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
596 struct dw_pcie_ob_atu_cfg atu = { 0 };
597 int type, ret;
598 u32 busdev;
599
600 /*
601 * Checking whether the link is up here is a last line of defense
602 * against platforms that forward errors on the system bus as
603 * SError upon PCI configuration transactions issued when the link
604 * is down. This check is racy by definition and does not stop
605 * the system from triggering an SError if the link goes down
606 * after this check is performed.
607 */
608 if (!dw_pcie_link_up(pci))
609 return NULL;
610
611 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
612 PCIE_ATU_FUNC(PCI_FUNC(devfn));
613
614 if (pci_is_root_bus(bus->parent))
615 type = PCIE_ATU_TYPE_CFG0;
616 else
617 type = PCIE_ATU_TYPE_CFG1;
618
619 atu.type = type;
620 atu.cpu_addr = pp->cfg0_base;
621 atu.pci_addr = busdev;
622 atu.size = pp->cfg0_size;
623
624 ret = dw_pcie_prog_outbound_atu(pci, &atu);
625 if (ret)
626 return NULL;
627
628 return pp->va_cfg0_base + where;
629 }
630
dw_pcie_rd_other_conf(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)631 static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
632 int where, int size, u32 *val)
633 {
634 struct dw_pcie_rp *pp = bus->sysdata;
635 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
636 struct dw_pcie_ob_atu_cfg atu = { 0 };
637 int ret;
638
639 ret = pci_generic_config_read(bus, devfn, where, size, val);
640 if (ret != PCIBIOS_SUCCESSFUL)
641 return ret;
642
643 if (pp->cfg0_io_shared) {
644 atu.type = PCIE_ATU_TYPE_IO;
645 atu.cpu_addr = pp->io_base;
646 atu.pci_addr = pp->io_bus_addr;
647 atu.size = pp->io_size;
648
649 ret = dw_pcie_prog_outbound_atu(pci, &atu);
650 if (ret)
651 return PCIBIOS_SET_FAILED;
652 }
653
654 return PCIBIOS_SUCCESSFUL;
655 }
656
dw_pcie_wr_other_conf(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)657 static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
658 int where, int size, u32 val)
659 {
660 struct dw_pcie_rp *pp = bus->sysdata;
661 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
662 struct dw_pcie_ob_atu_cfg atu = { 0 };
663 int ret;
664
665 ret = pci_generic_config_write(bus, devfn, where, size, val);
666 if (ret != PCIBIOS_SUCCESSFUL)
667 return ret;
668
669 if (pp->cfg0_io_shared) {
670 atu.type = PCIE_ATU_TYPE_IO;
671 atu.cpu_addr = pp->io_base;
672 atu.pci_addr = pp->io_bus_addr;
673 atu.size = pp->io_size;
674
675 ret = dw_pcie_prog_outbound_atu(pci, &atu);
676 if (ret)
677 return PCIBIOS_SET_FAILED;
678 }
679
680 return PCIBIOS_SUCCESSFUL;
681 }
682
683 static struct pci_ops dw_child_pcie_ops = {
684 .map_bus = dw_pcie_other_conf_map_bus,
685 .read = dw_pcie_rd_other_conf,
686 .write = dw_pcie_wr_other_conf,
687 };
688
dw_pcie_own_conf_map_bus(struct pci_bus * bus,unsigned int devfn,int where)689 void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
690 {
691 struct dw_pcie_rp *pp = bus->sysdata;
692 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
693
694 if (PCI_SLOT(devfn) > 0)
695 return NULL;
696
697 return pci->dbi_base + where;
698 }
699 EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus);
700
701 static struct pci_ops dw_pcie_ops = {
702 .map_bus = dw_pcie_own_conf_map_bus,
703 .read = pci_generic_config_read,
704 .write = pci_generic_config_write,
705 };
706
dw_pcie_iatu_setup(struct dw_pcie_rp * pp)707 static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
708 {
709 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
710 struct dw_pcie_ob_atu_cfg atu = { 0 };
711 struct resource_entry *entry;
712 int i, ret;
713
714 /* Note the very first outbound ATU is used for CFG IOs */
715 if (!pci->num_ob_windows) {
716 dev_err(pci->dev, "No outbound iATU found\n");
717 return -EINVAL;
718 }
719
720 /*
721 * Ensure all out/inbound windows are disabled before proceeding with
722 * the MEM/IO (dma-)ranges setups.
723 */
724 for (i = 0; i < pci->num_ob_windows; i++)
725 dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, i);
726
727 for (i = 0; i < pci->num_ib_windows; i++)
728 dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, i);
729
730 i = 0;
731 resource_list_for_each_entry(entry, &pp->bridge->windows) {
732 if (resource_type(entry->res) != IORESOURCE_MEM)
733 continue;
734
735 if (pci->num_ob_windows <= ++i)
736 break;
737
738 atu.index = i;
739 atu.type = PCIE_ATU_TYPE_MEM;
740 atu.cpu_addr = entry->res->start;
741 atu.pci_addr = entry->res->start - entry->offset;
742
743 /* Adjust iATU size if MSG TLP region was allocated before */
744 if (pp->msg_res && pp->msg_res->parent == entry->res)
745 atu.size = resource_size(entry->res) -
746 resource_size(pp->msg_res);
747 else
748 atu.size = resource_size(entry->res);
749
750 ret = dw_pcie_prog_outbound_atu(pci, &atu);
751 if (ret) {
752 dev_err(pci->dev, "Failed to set MEM range %pr\n",
753 entry->res);
754 return ret;
755 }
756 }
757
758 if (pp->io_size) {
759 if (pci->num_ob_windows > ++i) {
760 atu.index = i;
761 atu.type = PCIE_ATU_TYPE_IO;
762 atu.cpu_addr = pp->io_base;
763 atu.pci_addr = pp->io_bus_addr;
764 atu.size = pp->io_size;
765
766 ret = dw_pcie_prog_outbound_atu(pci, &atu);
767 if (ret) {
768 dev_err(pci->dev, "Failed to set IO range %pr\n",
769 entry->res);
770 return ret;
771 }
772 } else {
773 pp->cfg0_io_shared = true;
774 }
775 }
776
777 if (pci->num_ob_windows <= i)
778 dev_warn(pci->dev, "Ranges exceed outbound iATU size (%d)\n",
779 pci->num_ob_windows);
780
781 pp->msg_atu_index = i;
782
783 i = 0;
784 resource_list_for_each_entry(entry, &pp->bridge->dma_ranges) {
785 if (resource_type(entry->res) != IORESOURCE_MEM)
786 continue;
787
788 if (pci->num_ib_windows <= i)
789 break;
790
791 ret = dw_pcie_prog_inbound_atu(pci, i++, PCIE_ATU_TYPE_MEM,
792 entry->res->start,
793 entry->res->start - entry->offset,
794 resource_size(entry->res));
795 if (ret) {
796 dev_err(pci->dev, "Failed to set DMA range %pr\n",
797 entry->res);
798 return ret;
799 }
800 }
801
802 if (pci->num_ib_windows <= i)
803 dev_warn(pci->dev, "Dma-ranges exceed inbound iATU size (%u)\n",
804 pci->num_ib_windows);
805
806 return 0;
807 }
808
dw_pcie_setup_rc(struct dw_pcie_rp * pp)809 int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
810 {
811 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
812 u32 val, ctrl, num_ctrls;
813 int ret;
814
815 /*
816 * Enable DBI read-only registers for writing/updating configuration.
817 * Write permission gets disabled towards the end of this function.
818 */
819 dw_pcie_dbi_ro_wr_en(pci);
820
821 dw_pcie_setup(pci);
822
823 if (pp->has_msi_ctrl) {
824 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
825
826 /* Initialize IRQ Status array */
827 for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
828 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
829 (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
830 pp->irq_mask[ctrl]);
831 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
832 (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
833 ~0);
834 }
835 }
836
837 dw_pcie_msi_init(pp);
838
839 /* Setup RC BARs */
840 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
841 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
842
843 /* Setup interrupt pins */
844 val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
845 val &= 0xffff00ff;
846 val |= 0x00000100;
847 dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
848
849 /* Setup bus numbers */
850 val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
851 val &= 0xff000000;
852 val |= 0x00ff0100;
853 dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
854
855 /* Setup command register */
856 val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
857 val &= 0xffff0000;
858 val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
859 PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
860 dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
861
862 /*
863 * If the platform provides its own child bus config accesses, it means
864 * the platform uses its own address translation component rather than
865 * ATU, so we should not program the ATU here.
866 */
867 if (pp->bridge->child_ops == &dw_child_pcie_ops) {
868 ret = dw_pcie_iatu_setup(pp);
869 if (ret)
870 return ret;
871 }
872
873 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
874
875 /* Program correct class for RC */
876 dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
877
878 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
879 val |= PORT_LOGIC_SPEED_CHANGE;
880 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
881
882 dw_pcie_dbi_ro_wr_dis(pci);
883
884 return 0;
885 }
886 EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);
887
dw_pcie_pme_turn_off(struct dw_pcie * pci)888 static int dw_pcie_pme_turn_off(struct dw_pcie *pci)
889 {
890 struct dw_pcie_ob_atu_cfg atu = { 0 };
891 void __iomem *mem;
892 int ret;
893
894 if (pci->num_ob_windows <= pci->pp.msg_atu_index)
895 return -ENOSPC;
896
897 if (!pci->pp.msg_res)
898 return -ENOSPC;
899
900 atu.code = PCIE_MSG_CODE_PME_TURN_OFF;
901 atu.routing = PCIE_MSG_TYPE_R_BC;
902 atu.type = PCIE_ATU_TYPE_MSG;
903 atu.size = resource_size(pci->pp.msg_res);
904 atu.index = pci->pp.msg_atu_index;
905
906 atu.cpu_addr = pci->pp.msg_res->start;
907
908 ret = dw_pcie_prog_outbound_atu(pci, &atu);
909 if (ret)
910 return ret;
911
912 mem = ioremap(pci->pp.msg_res->start, pci->region_align);
913 if (!mem)
914 return -ENOMEM;
915
916 /* A dummy write is converted to a Msg TLP */
917 writel(0, mem);
918
919 iounmap(mem);
920
921 return 0;
922 }
923
dw_pcie_suspend_noirq(struct dw_pcie * pci)924 int dw_pcie_suspend_noirq(struct dw_pcie *pci)
925 {
926 u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
927 u32 val;
928 int ret = 0;
929
930 /*
931 * If L1SS is supported, then do not put the link into L2 as some
932 * devices such as NVMe expect low resume latency.
933 */
934 if (dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKCTL) & PCI_EXP_LNKCTL_ASPM_L1)
935 return 0;
936
937 if (dw_pcie_get_ltssm(pci) <= DW_PCIE_LTSSM_DETECT_ACT)
938 return 0;
939
940 if (pci->pp.ops->pme_turn_off)
941 pci->pp.ops->pme_turn_off(&pci->pp);
942 else
943 ret = dw_pcie_pme_turn_off(pci);
944
945 if (ret)
946 return ret;
947
948 ret = read_poll_timeout(dw_pcie_get_ltssm, val, val == DW_PCIE_LTSSM_L2_IDLE,
949 PCIE_PME_TO_L2_TIMEOUT_US/10,
950 PCIE_PME_TO_L2_TIMEOUT_US, false, pci);
951 if (ret) {
952 dev_err(pci->dev, "Timeout waiting for L2 entry! LTSSM: 0x%x\n", val);
953 return ret;
954 }
955
956 dw_pcie_stop_link(pci);
957 if (pci->pp.ops->deinit)
958 pci->pp.ops->deinit(&pci->pp);
959
960 pci->suspended = true;
961
962 return ret;
963 }
964 EXPORT_SYMBOL_GPL(dw_pcie_suspend_noirq);
965
dw_pcie_resume_noirq(struct dw_pcie * pci)966 int dw_pcie_resume_noirq(struct dw_pcie *pci)
967 {
968 int ret;
969
970 if (!pci->suspended)
971 return 0;
972
973 pci->suspended = false;
974
975 if (pci->pp.ops->init) {
976 ret = pci->pp.ops->init(&pci->pp);
977 if (ret) {
978 dev_err(pci->dev, "Host init failed: %d\n", ret);
979 return ret;
980 }
981 }
982
983 dw_pcie_setup_rc(&pci->pp);
984
985 ret = dw_pcie_start_link(pci);
986 if (ret)
987 return ret;
988
989 ret = dw_pcie_wait_for_link(pci);
990 if (ret)
991 return ret;
992
993 return ret;
994 }
995 EXPORT_SYMBOL_GPL(dw_pcie_resume_noirq);
996