Lines Matching +full:host +full:- +full:port
1 // SPDX-License-Identifier: GPL-2.0
3 * MediaTek PCIe host controller driver.
35 /* PCIe per port registers */
71 /* PCIe V2 per-port registers */
118 (GENMASK(((size) - 1), 0) << ((where) & 0x3))
136 * struct mtk_pcie_soc - differentiate between host generations
137 * @need_fix_class_id: whether this host's class ID needed to be fixed or not
138 * @need_fix_device_id: whether this host's device ID needed to be fixed or not
139 * @device_id: device ID which this host need to be fixed
149 int (*startup)(struct mtk_pcie_port *port);
150 int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node);
154 * struct mtk_pcie_port - PCIe port information
156 * @list: port list
157 * @pcie: pointer to PCIe host info
158 * @reset: pointer to port reset control
169 * @slot: port slot
198 * struct mtk_pcie - PCIe host information
201 * @free_ck: free-run reference clock
204 * @mem: non-prefetchable memory resource
207 * @ports: pointer to PCIe port information
208 * @soc: pointer to SoC-dependent operations
229 struct device *dev = pcie->dev; in mtk_pcie_subsys_powerdown()
231 clk_disable_unprepare(pcie->free_ck); in mtk_pcie_subsys_powerdown()
233 if (dev->pm_domain) { in mtk_pcie_subsys_powerdown()
239 static void mtk_pcie_port_free(struct mtk_pcie_port *port) in mtk_pcie_port_free() argument
241 struct mtk_pcie *pcie = port->pcie; in mtk_pcie_port_free()
242 struct device *dev = pcie->dev; in mtk_pcie_port_free()
244 devm_iounmap(dev, port->base); in mtk_pcie_port_free()
245 list_del(&port->list); in mtk_pcie_port_free()
246 devm_kfree(dev, port); in mtk_pcie_port_free()
251 struct mtk_pcie_port *port, *tmp; in mtk_pcie_put_resources() local
253 list_for_each_entry_safe(port, tmp, &pcie->ports, list) { in mtk_pcie_put_resources()
254 phy_power_off(port->phy); in mtk_pcie_put_resources()
255 phy_exit(port->phy); in mtk_pcie_put_resources()
256 clk_disable_unprepare(port->pipe_ck); in mtk_pcie_put_resources()
257 clk_disable_unprepare(port->obff_ck); in mtk_pcie_put_resources()
258 clk_disable_unprepare(port->axi_ck); in mtk_pcie_put_resources()
259 clk_disable_unprepare(port->aux_ck); in mtk_pcie_put_resources()
260 clk_disable_unprepare(port->ahb_ck); in mtk_pcie_put_resources()
261 clk_disable_unprepare(port->sys_ck); in mtk_pcie_put_resources()
262 mtk_pcie_port_free(port); in mtk_pcie_put_resources()
268 static int mtk_pcie_check_cfg_cpld(struct mtk_pcie_port *port) in mtk_pcie_check_cfg_cpld() argument
273 err = readl_poll_timeout_atomic(port->base + PCIE_APP_TLP_REQ, val, in mtk_pcie_check_cfg_cpld()
279 if (readl(port->base + PCIE_APP_TLP_REQ) & APP_CPL_STATUS) in mtk_pcie_check_cfg_cpld()
285 static int mtk_pcie_hw_rd_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn, in mtk_pcie_hw_rd_cfg() argument
292 port->base + PCIE_CFG_HEADER0); in mtk_pcie_hw_rd_cfg()
293 writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1); in mtk_pcie_hw_rd_cfg()
295 port->base + PCIE_CFG_HEADER2); in mtk_pcie_hw_rd_cfg()
298 tmp = readl(port->base + PCIE_APP_TLP_REQ); in mtk_pcie_hw_rd_cfg()
300 writel(tmp, port->base + PCIE_APP_TLP_REQ); in mtk_pcie_hw_rd_cfg()
303 if (mtk_pcie_check_cfg_cpld(port)) in mtk_pcie_hw_rd_cfg()
307 *val = readl(port->base + PCIE_CFG_RDATA); in mtk_pcie_hw_rd_cfg()
317 static int mtk_pcie_hw_wr_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn, in mtk_pcie_hw_wr_cfg() argument
322 port->base + PCIE_CFG_HEADER0); in mtk_pcie_hw_wr_cfg()
323 writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1); in mtk_pcie_hw_wr_cfg()
325 port->base + PCIE_CFG_HEADER2); in mtk_pcie_hw_wr_cfg()
329 writel(val, port->base + PCIE_CFG_WDATA); in mtk_pcie_hw_wr_cfg()
332 val = readl(port->base + PCIE_APP_TLP_REQ); in mtk_pcie_hw_wr_cfg()
334 writel(val, port->base + PCIE_APP_TLP_REQ); in mtk_pcie_hw_wr_cfg()
337 return mtk_pcie_check_cfg_cpld(port); in mtk_pcie_hw_wr_cfg()
343 struct mtk_pcie *pcie = bus->sysdata; in mtk_pcie_find_port()
344 struct mtk_pcie_port *port; in mtk_pcie_find_port() local
349 * of the port in the root bus. in mtk_pcie_find_port()
351 while (bus && bus->number) { in mtk_pcie_find_port()
352 dev = bus->self; in mtk_pcie_find_port()
353 bus = dev->bus; in mtk_pcie_find_port()
354 devfn = dev->devfn; in mtk_pcie_find_port()
357 list_for_each_entry(port, &pcie->ports, list) in mtk_pcie_find_port()
358 if (port->slot == PCI_SLOT(devfn)) in mtk_pcie_find_port()
359 return port; in mtk_pcie_find_port()
367 struct mtk_pcie_port *port; in mtk_pcie_config_read() local
368 u32 bn = bus->number; in mtk_pcie_config_read()
371 port = mtk_pcie_find_port(bus, devfn); in mtk_pcie_config_read()
372 if (!port) { in mtk_pcie_config_read()
377 ret = mtk_pcie_hw_rd_cfg(port, bn, devfn, where, size, val); in mtk_pcie_config_read()
387 struct mtk_pcie_port *port; in mtk_pcie_config_write() local
388 u32 bn = bus->number; in mtk_pcie_config_write()
390 port = mtk_pcie_find_port(bus, devfn); in mtk_pcie_config_write()
391 if (!port) in mtk_pcie_config_write()
394 return mtk_pcie_hw_wr_cfg(port, bn, devfn, where, size, val); in mtk_pcie_config_write()
404 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data); in mtk_compose_msi_msg() local
407 /* MT2712/MT7622 only support 32-bit MSI addresses */ in mtk_compose_msi_msg()
408 addr = virt_to_phys(port->base + PCIE_MSI_VECTOR); in mtk_compose_msi_msg()
409 msg->address_hi = 0; in mtk_compose_msi_msg()
410 msg->address_lo = lower_32_bits(addr); in mtk_compose_msi_msg()
412 msg->data = data->hwirq; in mtk_compose_msi_msg()
414 dev_dbg(port->pcie->dev, "msi#%d address_hi %#x address_lo %#x\n", in mtk_compose_msi_msg()
415 (int)data->hwirq, msg->address_hi, msg->address_lo); in mtk_compose_msi_msg()
421 return -EINVAL; in mtk_msi_set_affinity()
426 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data); in mtk_msi_ack_irq() local
427 u32 hwirq = data->hwirq; in mtk_msi_ack_irq()
429 writel(1 << hwirq, port->base + PCIE_IMSI_STATUS); in mtk_msi_ack_irq()
442 struct mtk_pcie_port *port = domain->host_data; in mtk_pcie_irq_domain_alloc() local
446 mutex_lock(&port->lock); in mtk_pcie_irq_domain_alloc()
448 bit = find_first_zero_bit(port->msi_irq_in_use, MTK_MSI_IRQS_NUM); in mtk_pcie_irq_domain_alloc()
450 mutex_unlock(&port->lock); in mtk_pcie_irq_domain_alloc()
451 return -ENOSPC; in mtk_pcie_irq_domain_alloc()
454 __set_bit(bit, port->msi_irq_in_use); in mtk_pcie_irq_domain_alloc()
456 mutex_unlock(&port->lock); in mtk_pcie_irq_domain_alloc()
459 domain->host_data, handle_edge_irq, in mtk_pcie_irq_domain_alloc()
469 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(d); in mtk_pcie_irq_domain_free() local
471 mutex_lock(&port->lock); in mtk_pcie_irq_domain_free()
473 if (!test_bit(d->hwirq, port->msi_irq_in_use)) in mtk_pcie_irq_domain_free()
474 dev_err(port->pcie->dev, "trying to free unused MSI#%lu\n", in mtk_pcie_irq_domain_free()
475 d->hwirq); in mtk_pcie_irq_domain_free()
477 __clear_bit(d->hwirq, port->msi_irq_in_use); in mtk_pcie_irq_domain_free()
479 mutex_unlock(&port->lock); in mtk_pcie_irq_domain_free()
502 static int mtk_pcie_allocate_msi_domains(struct mtk_pcie_port *port) in mtk_pcie_allocate_msi_domains() argument
504 struct fwnode_handle *fwnode = of_node_to_fwnode(port->pcie->dev->of_node); in mtk_pcie_allocate_msi_domains()
506 mutex_init(&port->lock); in mtk_pcie_allocate_msi_domains()
508 port->inner_domain = irq_domain_create_linear(fwnode, MTK_MSI_IRQS_NUM, in mtk_pcie_allocate_msi_domains()
509 &msi_domain_ops, port); in mtk_pcie_allocate_msi_domains()
510 if (!port->inner_domain) { in mtk_pcie_allocate_msi_domains()
511 dev_err(port->pcie->dev, "failed to create IRQ domain\n"); in mtk_pcie_allocate_msi_domains()
512 return -ENOMEM; in mtk_pcie_allocate_msi_domains()
515 port->msi_domain = pci_msi_create_irq_domain(fwnode, &mtk_msi_domain_info, in mtk_pcie_allocate_msi_domains()
516 port->inner_domain); in mtk_pcie_allocate_msi_domains()
517 if (!port->msi_domain) { in mtk_pcie_allocate_msi_domains()
518 dev_err(port->pcie->dev, "failed to create MSI domain\n"); in mtk_pcie_allocate_msi_domains()
519 irq_domain_remove(port->inner_domain); in mtk_pcie_allocate_msi_domains()
520 return -ENOMEM; in mtk_pcie_allocate_msi_domains()
526 static void mtk_pcie_enable_msi(struct mtk_pcie_port *port) in mtk_pcie_enable_msi() argument
531 msg_addr = virt_to_phys(port->base + PCIE_MSI_VECTOR); in mtk_pcie_enable_msi()
533 writel(val, port->base + PCIE_IMSI_ADDR); in mtk_pcie_enable_msi()
535 val = readl(port->base + PCIE_INT_MASK); in mtk_pcie_enable_msi()
537 writel(val, port->base + PCIE_INT_MASK); in mtk_pcie_enable_msi()
544 irq_set_chip_data(irq, domain->host_data); in mtk_pcie_intx_map()
553 static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port, in mtk_pcie_init_irq_domain() argument
556 struct device *dev = port->pcie->dev; in mtk_pcie_init_irq_domain()
564 return -ENODEV; in mtk_pcie_init_irq_domain()
567 port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, in mtk_pcie_init_irq_domain()
568 &intx_domain_ops, port); in mtk_pcie_init_irq_domain()
569 if (!port->irq_domain) { in mtk_pcie_init_irq_domain()
571 return -ENODEV; in mtk_pcie_init_irq_domain()
575 ret = mtk_pcie_allocate_msi_domains(port); in mtk_pcie_init_irq_domain()
585 struct mtk_pcie_port *port = irq_desc_get_handler_data(desc); in mtk_pcie_intr_handler() local
593 status = readl(port->base + PCIE_INT_STATUS); in mtk_pcie_intr_handler()
597 writel(1 << bit, port->base + PCIE_INT_STATUS); in mtk_pcie_intr_handler()
598 virq = irq_find_mapping(port->irq_domain, in mtk_pcie_intr_handler()
599 bit - INTX_SHIFT); in mtk_pcie_intr_handler()
608 while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) { in mtk_pcie_intr_handler()
610 virq = irq_find_mapping(port->inner_domain, bit); in mtk_pcie_intr_handler()
615 writel(MSI_STATUS, port->base + PCIE_INT_STATUS); in mtk_pcie_intr_handler()
624 static int mtk_pcie_setup_irq(struct mtk_pcie_port *port, in mtk_pcie_setup_irq() argument
627 struct mtk_pcie *pcie = port->pcie; in mtk_pcie_setup_irq()
628 struct device *dev = pcie->dev; in mtk_pcie_setup_irq()
632 err = mtk_pcie_init_irq_domain(port, node); in mtk_pcie_setup_irq()
638 irq = platform_get_irq(pdev, port->slot); in mtk_pcie_setup_irq()
639 irq_set_chained_handler_and_data(irq, mtk_pcie_intr_handler, port); in mtk_pcie_setup_irq()
644 static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port) in mtk_pcie_startup_port_v2() argument
646 struct mtk_pcie *pcie = port->pcie; in mtk_pcie_startup_port_v2()
647 struct resource *mem = &pcie->mem; in mtk_pcie_startup_port_v2()
648 const struct mtk_pcie_soc *soc = port->pcie->soc; in mtk_pcie_startup_port_v2()
654 if (pcie->base) { in mtk_pcie_startup_port_v2()
655 val = readl(pcie->base + PCIE_SYS_CFG_V2); in mtk_pcie_startup_port_v2()
656 val |= PCIE_CSR_LTSSM_EN(port->slot) | in mtk_pcie_startup_port_v2()
657 PCIE_CSR_ASPM_L1_EN(port->slot); in mtk_pcie_startup_port_v2()
658 writel(val, pcie->base + PCIE_SYS_CFG_V2); in mtk_pcie_startup_port_v2()
662 writel(0, port->base + PCIE_RST_CTRL); in mtk_pcie_startup_port_v2()
669 writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL); in mtk_pcie_startup_port_v2()
671 /* De-assert PHY, PE, PIPE, MAC and configuration reset */ in mtk_pcie_startup_port_v2()
672 val = readl(port->base + PCIE_RST_CTRL); in mtk_pcie_startup_port_v2()
675 writel(val, port->base + PCIE_RST_CTRL); in mtk_pcie_startup_port_v2()
678 if (soc->need_fix_class_id) { in mtk_pcie_startup_port_v2()
680 writew(val, port->base + PCIE_CONF_VEND_ID); in mtk_pcie_startup_port_v2()
683 writew(val, port->base + PCIE_CONF_CLASS_ID); in mtk_pcie_startup_port_v2()
686 if (soc->need_fix_device_id) in mtk_pcie_startup_port_v2()
687 writew(soc->device_id, port->base + PCIE_CONF_DEVICE_ID); in mtk_pcie_startup_port_v2()
690 err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val, in mtk_pcie_startup_port_v2()
694 return -ETIMEDOUT; in mtk_pcie_startup_port_v2()
697 val = readl(port->base + PCIE_INT_MASK); in mtk_pcie_startup_port_v2()
699 writel(val, port->base + PCIE_INT_MASK); in mtk_pcie_startup_port_v2()
702 mtk_pcie_enable_msi(port); in mtk_pcie_startup_port_v2()
705 size = mem->end - mem->start; in mtk_pcie_startup_port_v2()
706 val = lower_32_bits(mem->start) | AHB2PCIE_SIZE(fls(size)); in mtk_pcie_startup_port_v2()
707 writel(val, port->base + PCIE_AHB_TRANS_BASE0_L); in mtk_pcie_startup_port_v2()
709 val = upper_32_bits(mem->start); in mtk_pcie_startup_port_v2()
710 writel(val, port->base + PCIE_AHB_TRANS_BASE0_H); in mtk_pcie_startup_port_v2()
714 writel(val, port->base + PCIE_AXI_WINDOW0); in mtk_pcie_startup_port_v2()
722 struct mtk_pcie *pcie = bus->sysdata; in mtk_pcie_map_bus()
725 bus->number), pcie->base + PCIE_CFG_ADDR); in mtk_pcie_map_bus()
727 return pcie->base + PCIE_CFG_DATA + (where & 3); in mtk_pcie_map_bus()
736 static int mtk_pcie_startup_port(struct mtk_pcie_port *port) in mtk_pcie_startup_port() argument
738 struct mtk_pcie *pcie = port->pcie; in mtk_pcie_startup_port()
739 u32 func = PCI_FUNC(port->slot << 3); in mtk_pcie_startup_port()
740 u32 slot = PCI_SLOT(port->slot << 3); in mtk_pcie_startup_port()
744 /* assert port PERST_N */ in mtk_pcie_startup_port()
745 val = readl(pcie->base + PCIE_SYS_CFG); in mtk_pcie_startup_port()
746 val |= PCIE_PORT_PERST(port->slot); in mtk_pcie_startup_port()
747 writel(val, pcie->base + PCIE_SYS_CFG); in mtk_pcie_startup_port()
749 /* de-assert port PERST_N */ in mtk_pcie_startup_port()
750 val = readl(pcie->base + PCIE_SYS_CFG); in mtk_pcie_startup_port()
751 val &= ~PCIE_PORT_PERST(port->slot); in mtk_pcie_startup_port()
752 writel(val, pcie->base + PCIE_SYS_CFG); in mtk_pcie_startup_port()
755 err = readl_poll_timeout(port->base + PCIE_LINK_STATUS, val, in mtk_pcie_startup_port()
759 return -ETIMEDOUT; in mtk_pcie_startup_port()
762 val = readl(pcie->base + PCIE_INT_ENABLE); in mtk_pcie_startup_port()
763 val |= PCIE_PORT_INT_EN(port->slot); in mtk_pcie_startup_port()
764 writel(val, pcie->base + PCIE_INT_ENABLE); in mtk_pcie_startup_port()
768 port->base + PCIE_BAR0_SETUP); in mtk_pcie_startup_port()
771 writel(PCIE_CLASS_CODE | PCIE_REVISION_ID, port->base + PCIE_CLASS); in mtk_pcie_startup_port()
775 pcie->base + PCIE_CFG_ADDR); in mtk_pcie_startup_port()
776 val = readl(pcie->base + PCIE_CFG_DATA); in mtk_pcie_startup_port()
780 pcie->base + PCIE_CFG_ADDR); in mtk_pcie_startup_port()
781 writel(val, pcie->base + PCIE_CFG_DATA); in mtk_pcie_startup_port()
785 pcie->base + PCIE_CFG_ADDR); in mtk_pcie_startup_port()
786 val = readl(pcie->base + PCIE_CFG_DATA); in mtk_pcie_startup_port()
790 pcie->base + PCIE_CFG_ADDR); in mtk_pcie_startup_port()
791 writel(val, pcie->base + PCIE_CFG_DATA); in mtk_pcie_startup_port()
796 static void mtk_pcie_enable_port(struct mtk_pcie_port *port) in mtk_pcie_enable_port() argument
798 struct mtk_pcie *pcie = port->pcie; in mtk_pcie_enable_port()
799 struct device *dev = pcie->dev; in mtk_pcie_enable_port()
802 err = clk_prepare_enable(port->sys_ck); in mtk_pcie_enable_port()
804 dev_err(dev, "failed to enable sys_ck%d clock\n", port->slot); in mtk_pcie_enable_port()
808 err = clk_prepare_enable(port->ahb_ck); in mtk_pcie_enable_port()
810 dev_err(dev, "failed to enable ahb_ck%d\n", port->slot); in mtk_pcie_enable_port()
814 err = clk_prepare_enable(port->aux_ck); in mtk_pcie_enable_port()
816 dev_err(dev, "failed to enable aux_ck%d\n", port->slot); in mtk_pcie_enable_port()
820 err = clk_prepare_enable(port->axi_ck); in mtk_pcie_enable_port()
822 dev_err(dev, "failed to enable axi_ck%d\n", port->slot); in mtk_pcie_enable_port()
826 err = clk_prepare_enable(port->obff_ck); in mtk_pcie_enable_port()
828 dev_err(dev, "failed to enable obff_ck%d\n", port->slot); in mtk_pcie_enable_port()
832 err = clk_prepare_enable(port->pipe_ck); in mtk_pcie_enable_port()
834 dev_err(dev, "failed to enable pipe_ck%d\n", port->slot); in mtk_pcie_enable_port()
838 reset_control_assert(port->reset); in mtk_pcie_enable_port()
839 reset_control_deassert(port->reset); in mtk_pcie_enable_port()
841 err = phy_init(port->phy); in mtk_pcie_enable_port()
843 dev_err(dev, "failed to initialize port%d phy\n", port->slot); in mtk_pcie_enable_port()
847 err = phy_power_on(port->phy); in mtk_pcie_enable_port()
849 dev_err(dev, "failed to power on port%d phy\n", port->slot); in mtk_pcie_enable_port()
853 if (!pcie->soc->startup(port)) in mtk_pcie_enable_port()
856 dev_info(dev, "Port%d link down\n", port->slot); in mtk_pcie_enable_port()
858 phy_power_off(port->phy); in mtk_pcie_enable_port()
860 phy_exit(port->phy); in mtk_pcie_enable_port()
862 clk_disable_unprepare(port->pipe_ck); in mtk_pcie_enable_port()
864 clk_disable_unprepare(port->obff_ck); in mtk_pcie_enable_port()
866 clk_disable_unprepare(port->axi_ck); in mtk_pcie_enable_port()
868 clk_disable_unprepare(port->aux_ck); in mtk_pcie_enable_port()
870 clk_disable_unprepare(port->ahb_ck); in mtk_pcie_enable_port()
872 clk_disable_unprepare(port->sys_ck); in mtk_pcie_enable_port()
874 mtk_pcie_port_free(port); in mtk_pcie_enable_port()
881 struct mtk_pcie_port *port; in mtk_pcie_parse_port() local
883 struct device *dev = pcie->dev; in mtk_pcie_parse_port()
888 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); in mtk_pcie_parse_port()
889 if (!port) in mtk_pcie_parse_port()
890 return -ENOMEM; in mtk_pcie_parse_port()
892 err = of_property_read_u32(node, "num-lanes", &port->lane); in mtk_pcie_parse_port()
894 dev_err(dev, "missing num-lanes property\n"); in mtk_pcie_parse_port()
898 snprintf(name, sizeof(name), "port%d", slot); in mtk_pcie_parse_port()
900 port->base = devm_ioremap_resource(dev, regs); in mtk_pcie_parse_port()
901 if (IS_ERR(port->base)) { in mtk_pcie_parse_port()
902 dev_err(dev, "failed to map port%d base\n", slot); in mtk_pcie_parse_port()
903 return PTR_ERR(port->base); in mtk_pcie_parse_port()
907 port->sys_ck = devm_clk_get(dev, name); in mtk_pcie_parse_port()
908 if (IS_ERR(port->sys_ck)) { in mtk_pcie_parse_port()
910 return PTR_ERR(port->sys_ck); in mtk_pcie_parse_port()
915 port->ahb_ck = devm_clk_get(dev, name); in mtk_pcie_parse_port()
916 if (IS_ERR(port->ahb_ck)) { in mtk_pcie_parse_port()
917 if (PTR_ERR(port->ahb_ck) == -EPROBE_DEFER) in mtk_pcie_parse_port()
918 return -EPROBE_DEFER; in mtk_pcie_parse_port()
920 port->ahb_ck = NULL; in mtk_pcie_parse_port()
924 port->axi_ck = devm_clk_get(dev, name); in mtk_pcie_parse_port()
925 if (IS_ERR(port->axi_ck)) { in mtk_pcie_parse_port()
926 if (PTR_ERR(port->axi_ck) == -EPROBE_DEFER) in mtk_pcie_parse_port()
927 return -EPROBE_DEFER; in mtk_pcie_parse_port()
929 port->axi_ck = NULL; in mtk_pcie_parse_port()
933 port->aux_ck = devm_clk_get(dev, name); in mtk_pcie_parse_port()
934 if (IS_ERR(port->aux_ck)) { in mtk_pcie_parse_port()
935 if (PTR_ERR(port->aux_ck) == -EPROBE_DEFER) in mtk_pcie_parse_port()
936 return -EPROBE_DEFER; in mtk_pcie_parse_port()
938 port->aux_ck = NULL; in mtk_pcie_parse_port()
942 port->obff_ck = devm_clk_get(dev, name); in mtk_pcie_parse_port()
943 if (IS_ERR(port->obff_ck)) { in mtk_pcie_parse_port()
944 if (PTR_ERR(port->obff_ck) == -EPROBE_DEFER) in mtk_pcie_parse_port()
945 return -EPROBE_DEFER; in mtk_pcie_parse_port()
947 port->obff_ck = NULL; in mtk_pcie_parse_port()
951 port->pipe_ck = devm_clk_get(dev, name); in mtk_pcie_parse_port()
952 if (IS_ERR(port->pipe_ck)) { in mtk_pcie_parse_port()
953 if (PTR_ERR(port->pipe_ck) == -EPROBE_DEFER) in mtk_pcie_parse_port()
954 return -EPROBE_DEFER; in mtk_pcie_parse_port()
956 port->pipe_ck = NULL; in mtk_pcie_parse_port()
959 snprintf(name, sizeof(name), "pcie-rst%d", slot); in mtk_pcie_parse_port()
960 port->reset = devm_reset_control_get_optional_exclusive(dev, name); in mtk_pcie_parse_port()
961 if (PTR_ERR(port->reset) == -EPROBE_DEFER) in mtk_pcie_parse_port()
962 return PTR_ERR(port->reset); in mtk_pcie_parse_port()
965 snprintf(name, sizeof(name), "pcie-phy%d", slot); in mtk_pcie_parse_port()
966 port->phy = devm_phy_optional_get(dev, name); in mtk_pcie_parse_port()
967 if (IS_ERR(port->phy)) in mtk_pcie_parse_port()
968 return PTR_ERR(port->phy); in mtk_pcie_parse_port()
970 port->slot = slot; in mtk_pcie_parse_port()
971 port->pcie = pcie; in mtk_pcie_parse_port()
973 if (pcie->soc->setup_irq) { in mtk_pcie_parse_port()
974 err = pcie->soc->setup_irq(port, node); in mtk_pcie_parse_port()
979 INIT_LIST_HEAD(&port->list); in mtk_pcie_parse_port()
980 list_add_tail(&port->list, &pcie->ports); in mtk_pcie_parse_port()
987 struct device *dev = pcie->dev; in mtk_pcie_subsys_powerup()
995 pcie->base = devm_ioremap_resource(dev, regs); in mtk_pcie_subsys_powerup()
996 if (IS_ERR(pcie->base)) { in mtk_pcie_subsys_powerup()
998 return PTR_ERR(pcie->base); in mtk_pcie_subsys_powerup()
1002 pcie->free_ck = devm_clk_get(dev, "free_ck"); in mtk_pcie_subsys_powerup()
1003 if (IS_ERR(pcie->free_ck)) { in mtk_pcie_subsys_powerup()
1004 if (PTR_ERR(pcie->free_ck) == -EPROBE_DEFER) in mtk_pcie_subsys_powerup()
1005 return -EPROBE_DEFER; in mtk_pcie_subsys_powerup()
1007 pcie->free_ck = NULL; in mtk_pcie_subsys_powerup()
1010 if (dev->pm_domain) { in mtk_pcie_subsys_powerup()
1016 err = clk_prepare_enable(pcie->free_ck); in mtk_pcie_subsys_powerup()
1025 if (dev->pm_domain) { in mtk_pcie_subsys_powerup()
1035 struct device *dev = pcie->dev; in mtk_pcie_setup()
1036 struct device_node *node = dev->of_node, *child; in mtk_pcie_setup()
1040 struct mtk_pcie_port *port, *tmp; in mtk_pcie_setup() local
1045 return -EINVAL; in mtk_pcie_setup()
1055 pcie->offset.io = res.start - range.pci_addr; in mtk_pcie_setup()
1057 memcpy(&pcie->pio, &res, sizeof(res)); in mtk_pcie_setup()
1058 pcie->pio.name = node->full_name; in mtk_pcie_setup()
1060 pcie->io.start = range.cpu_addr; in mtk_pcie_setup()
1061 pcie->io.end = range.cpu_addr + range.size - 1; in mtk_pcie_setup()
1062 pcie->io.flags = IORESOURCE_MEM; in mtk_pcie_setup()
1063 pcie->io.name = "I/O"; in mtk_pcie_setup()
1065 memcpy(&res, &pcie->io, sizeof(res)); in mtk_pcie_setup()
1069 pcie->offset.mem = res.start - range.pci_addr; in mtk_pcie_setup()
1071 memcpy(&pcie->mem, &res, sizeof(res)); in mtk_pcie_setup()
1072 pcie->mem.name = "non-prefetchable"; in mtk_pcie_setup()
1077 err = of_pci_parse_bus_range(node, &pcie->busn); in mtk_pcie_setup()
1080 pcie->busn.name = node->name; in mtk_pcie_setup()
1081 pcie->busn.start = 0; in mtk_pcie_setup()
1082 pcie->busn.end = 0xff; in mtk_pcie_setup()
1083 pcie->busn.flags = IORESOURCE_BUS; in mtk_pcie_setup()
1106 /* enable each port, and then check link status */ in mtk_pcie_setup()
1107 list_for_each_entry_safe(port, tmp, &pcie->ports, list) in mtk_pcie_setup()
1108 mtk_pcie_enable_port(port); in mtk_pcie_setup()
1111 if (list_empty(&pcie->ports)) in mtk_pcie_setup()
1119 struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); in mtk_pcie_request_resources() local
1120 struct list_head *windows = &host->windows; in mtk_pcie_request_resources()
1121 struct device *dev = pcie->dev; in mtk_pcie_request_resources()
1124 pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io); in mtk_pcie_request_resources()
1125 pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem); in mtk_pcie_request_resources()
1126 pci_add_resource(windows, &pcie->busn); in mtk_pcie_request_resources()
1132 err = devm_pci_remap_iospace(dev, &pcie->pio, pcie->io.start); in mtk_pcie_request_resources()
1139 static int mtk_pcie_register_host(struct pci_host_bridge *host) in mtk_pcie_register_host() argument
1141 struct mtk_pcie *pcie = pci_host_bridge_priv(host); in mtk_pcie_register_host()
1145 host->busnr = pcie->busn.start; in mtk_pcie_register_host()
1146 host->dev.parent = pcie->dev; in mtk_pcie_register_host()
1147 host->ops = pcie->soc->ops; in mtk_pcie_register_host()
1148 host->map_irq = of_irq_parse_and_map_pci; in mtk_pcie_register_host()
1149 host->swizzle_irq = pci_common_swizzle; in mtk_pcie_register_host()
1150 host->sysdata = pcie; in mtk_pcie_register_host()
1152 err = pci_scan_root_bus_bridge(host); in mtk_pcie_register_host()
1156 pci_bus_size_bridges(host->bus); in mtk_pcie_register_host()
1157 pci_bus_assign_resources(host->bus); in mtk_pcie_register_host()
1159 list_for_each_entry(child, &host->bus->children, node) in mtk_pcie_register_host()
1162 pci_bus_add_devices(host->bus); in mtk_pcie_register_host()
1169 struct device *dev = &pdev->dev; in mtk_pcie_probe()
1171 struct pci_host_bridge *host; in mtk_pcie_probe() local
1174 host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); in mtk_pcie_probe()
1175 if (!host) in mtk_pcie_probe()
1176 return -ENOMEM; in mtk_pcie_probe()
1178 pcie = pci_host_bridge_priv(host); in mtk_pcie_probe()
1180 pcie->dev = dev; in mtk_pcie_probe()
1181 pcie->soc = of_device_get_match_data(dev); in mtk_pcie_probe()
1183 INIT_LIST_HEAD(&pcie->ports); in mtk_pcie_probe()
1193 err = mtk_pcie_register_host(host); in mtk_pcie_probe()
1200 if (!list_empty(&pcie->ports)) in mtk_pcie_probe()
1234 { .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 },
1235 { .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 },
1236 { .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_mt2712 },
1237 { .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_mt7622 },
1238 { .compatible = "mediatek,mt7629-pcie", .data = &mtk_pcie_soc_mt7629 },
1245 .name = "mtk-pcie",