• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Qualcomm PCIe root complex driver
4  *
5  * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
6  * Copyright 2015 Linaro Limited.
7  *
8  * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
9  */
10 
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/gpio/consumer.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/iopoll.h>
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/of_device.h>
20 #include <linux/of_gpio.h>
21 #include <linux/pci.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/platform_device.h>
24 #include <linux/phy/phy.h>
25 #include <linux/regulator/consumer.h>
26 #include <linux/reset.h>
27 #include <linux/slab.h>
28 #include <linux/types.h>
29 
30 #include "../../pci.h"
31 #include "pcie-designware.h"
32 
33 #define PCIE20_PARF_SYS_CTRL			0x00
34 #define MST_WAKEUP_EN				BIT(13)
35 #define SLV_WAKEUP_EN				BIT(12)
36 #define MSTR_ACLK_CGC_DIS			BIT(10)
37 #define SLV_ACLK_CGC_DIS			BIT(9)
38 #define CORE_CLK_CGC_DIS			BIT(6)
39 #define AUX_PWR_DET				BIT(4)
40 #define L23_CLK_RMV_DIS				BIT(2)
41 #define L1_CLK_RMV_DIS				BIT(1)
42 
43 #define PCIE20_PARF_PHY_CTRL			0x40
44 #define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK	GENMASK(20, 16)
45 #define PHY_CTRL_PHY_TX0_TERM_OFFSET(x)		((x) << 16)
46 
47 #define PCIE20_PARF_PHY_REFCLK			0x4C
48 #define PHY_REFCLK_SSP_EN			BIT(16)
49 #define PHY_REFCLK_USE_PAD			BIT(12)
50 
51 #define PCIE20_PARF_DBI_BASE_ADDR		0x168
52 #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE		0x16C
53 #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL	0x174
54 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT	0x178
55 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2	0x1A8
56 #define PCIE20_PARF_LTSSM			0x1B0
57 #define PCIE20_PARF_SID_OFFSET			0x234
58 #define PCIE20_PARF_BDF_TRANSLATE_CFG		0x24C
59 #define PCIE20_PARF_DEVICE_TYPE			0x1000
60 
61 #define PCIE20_ELBI_SYS_CTRL			0x04
62 #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE		BIT(0)
63 
64 #define PCIE20_AXI_MSTR_RESP_COMP_CTRL0		0x818
65 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K	0x4
66 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K	0x5
67 #define PCIE20_AXI_MSTR_RESP_COMP_CTRL1		0x81c
68 #define CFG_BRIDGE_SB_INIT			BIT(0)
69 
70 #define PCIE_CAP_LINK1_VAL			0x2FD7F
71 
72 #define PCIE20_PARF_Q2A_FLUSH			0x1AC
73 
74 #define PCIE20_MISC_CONTROL_1_REG		0x8BC
75 #define DBI_RO_WR_EN				1
76 
77 #define PERST_DELAY_US				1000
78 /* PARF registers */
79 #define PCIE20_PARF_PCS_DEEMPH			0x34
80 #define PCS_DEEMPH_TX_DEEMPH_GEN1(x)		((x) << 16)
81 #define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x)	((x) << 8)
82 #define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x)	((x) << 0)
83 
84 #define PCIE20_PARF_PCS_SWING			0x38
85 #define PCS_SWING_TX_SWING_FULL(x)		((x) << 8)
86 #define PCS_SWING_TX_SWING_LOW(x)		((x) << 0)
87 
88 #define PCIE20_PARF_CONFIG_BITS		0x50
89 #define PHY_RX0_EQ(x)				((x) << 24)
90 
91 #define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE	0x358
92 #define SLV_ADDR_SPACE_SZ			0x10000000
93 
94 #define PCIE20_LNK_CONTROL2_LINK_STATUS2	0xa0
95 
96 #define DEVICE_TYPE_RC				0x4
97 
98 #define QCOM_PCIE_2_1_0_MAX_SUPPLY	3
99 #define QCOM_PCIE_2_1_0_MAX_CLOCKS	5
100 struct qcom_pcie_resources_2_1_0 {
101 	struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS];
102 	struct reset_control *pci_reset;
103 	struct reset_control *axi_reset;
104 	struct reset_control *ahb_reset;
105 	struct reset_control *por_reset;
106 	struct reset_control *phy_reset;
107 	struct reset_control *ext_reset;
108 	struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
109 };
110 
111 struct qcom_pcie_resources_1_0_0 {
112 	struct clk *iface;
113 	struct clk *aux;
114 	struct clk *master_bus;
115 	struct clk *slave_bus;
116 	struct reset_control *core;
117 	struct regulator *vdda;
118 };
119 
120 #define QCOM_PCIE_2_3_2_MAX_SUPPLY	2
121 struct qcom_pcie_resources_2_3_2 {
122 	struct clk *aux_clk;
123 	struct clk *master_clk;
124 	struct clk *slave_clk;
125 	struct clk *cfg_clk;
126 	struct clk *pipe_clk;
127 	struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
128 };
129 
130 #define QCOM_PCIE_2_4_0_MAX_CLOCKS	4
131 struct qcom_pcie_resources_2_4_0 {
132 	struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS];
133 	int num_clks;
134 	struct reset_control *axi_m_reset;
135 	struct reset_control *axi_s_reset;
136 	struct reset_control *pipe_reset;
137 	struct reset_control *axi_m_vmid_reset;
138 	struct reset_control *axi_s_xpu_reset;
139 	struct reset_control *parf_reset;
140 	struct reset_control *phy_reset;
141 	struct reset_control *axi_m_sticky_reset;
142 	struct reset_control *pipe_sticky_reset;
143 	struct reset_control *pwr_reset;
144 	struct reset_control *ahb_reset;
145 	struct reset_control *phy_ahb_reset;
146 };
147 
148 struct qcom_pcie_resources_2_3_3 {
149 	struct clk *iface;
150 	struct clk *axi_m_clk;
151 	struct clk *axi_s_clk;
152 	struct clk *ahb_clk;
153 	struct clk *aux_clk;
154 	struct reset_control *rst[7];
155 };
156 
157 struct qcom_pcie_resources_2_7_0 {
158 	struct clk_bulk_data clks[6];
159 	struct regulator_bulk_data supplies[2];
160 	struct reset_control *pci_reset;
161 	struct clk *pipe_clk;
162 };
163 
164 union qcom_pcie_resources {
165 	struct qcom_pcie_resources_1_0_0 v1_0_0;
166 	struct qcom_pcie_resources_2_1_0 v2_1_0;
167 	struct qcom_pcie_resources_2_3_2 v2_3_2;
168 	struct qcom_pcie_resources_2_3_3 v2_3_3;
169 	struct qcom_pcie_resources_2_4_0 v2_4_0;
170 	struct qcom_pcie_resources_2_7_0 v2_7_0;
171 };
172 
173 struct qcom_pcie;
174 
175 struct qcom_pcie_ops {
176 	int (*get_resources)(struct qcom_pcie *pcie);
177 	int (*init)(struct qcom_pcie *pcie);
178 	int (*post_init)(struct qcom_pcie *pcie);
179 	void (*deinit)(struct qcom_pcie *pcie);
180 	void (*post_deinit)(struct qcom_pcie *pcie);
181 	void (*ltssm_enable)(struct qcom_pcie *pcie);
182 };
183 
184 struct qcom_pcie {
185 	struct dw_pcie *pci;
186 	void __iomem *parf;			/* DT parf */
187 	void __iomem *elbi;			/* DT elbi */
188 	union qcom_pcie_resources res;
189 	struct phy *phy;
190 	struct gpio_desc *reset;
191 	const struct qcom_pcie_ops *ops;
192 };
193 
194 #define to_qcom_pcie(x)		dev_get_drvdata((x)->dev)
195 
qcom_ep_reset_assert(struct qcom_pcie * pcie)196 static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
197 {
198 	gpiod_set_value_cansleep(pcie->reset, 1);
199 	usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
200 }
201 
qcom_ep_reset_deassert(struct qcom_pcie * pcie)202 static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
203 {
204 	/* Ensure that PERST has been asserted for at least 100 ms */
205 	msleep(100);
206 	gpiod_set_value_cansleep(pcie->reset, 0);
207 	usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
208 }
209 
qcom_pcie_establish_link(struct qcom_pcie * pcie)210 static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
211 {
212 	struct dw_pcie *pci = pcie->pci;
213 
214 	if (dw_pcie_link_up(pci))
215 		return 0;
216 
217 	/* Enable Link Training state machine */
218 	if (pcie->ops->ltssm_enable)
219 		pcie->ops->ltssm_enable(pcie);
220 
221 	return dw_pcie_wait_for_link(pci);
222 }
223 
qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie * pcie)224 static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
225 {
226 	u32 val;
227 
228 	/* enable link training */
229 	val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
230 	val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
231 	writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
232 }
233 
qcom_pcie_get_resources_2_1_0(struct qcom_pcie * pcie)234 static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
235 {
236 	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
237 	struct dw_pcie *pci = pcie->pci;
238 	struct device *dev = pci->dev;
239 	int ret;
240 
241 	res->supplies[0].supply = "vdda";
242 	res->supplies[1].supply = "vdda_phy";
243 	res->supplies[2].supply = "vdda_refclk";
244 	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
245 				      res->supplies);
246 	if (ret)
247 		return ret;
248 
249 	res->clks[0].id = "iface";
250 	res->clks[1].id = "core";
251 	res->clks[2].id = "phy";
252 	res->clks[3].id = "aux";
253 	res->clks[4].id = "ref";
254 
255 	/* iface, core, phy are required */
256 	ret = devm_clk_bulk_get(dev, 3, res->clks);
257 	if (ret < 0)
258 		return ret;
259 
260 	/* aux, ref are optional */
261 	ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3);
262 	if (ret < 0)
263 		return ret;
264 
265 	res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
266 	if (IS_ERR(res->pci_reset))
267 		return PTR_ERR(res->pci_reset);
268 
269 	res->axi_reset = devm_reset_control_get_exclusive(dev, "axi");
270 	if (IS_ERR(res->axi_reset))
271 		return PTR_ERR(res->axi_reset);
272 
273 	res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
274 	if (IS_ERR(res->ahb_reset))
275 		return PTR_ERR(res->ahb_reset);
276 
277 	res->por_reset = devm_reset_control_get_exclusive(dev, "por");
278 	if (IS_ERR(res->por_reset))
279 		return PTR_ERR(res->por_reset);
280 
281 	res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext");
282 	if (IS_ERR(res->ext_reset))
283 		return PTR_ERR(res->ext_reset);
284 
285 	res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
286 	return PTR_ERR_OR_ZERO(res->phy_reset);
287 }
288 
qcom_pcie_deinit_2_1_0(struct qcom_pcie * pcie)289 static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
290 {
291 	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
292 
293 	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
294 	reset_control_assert(res->pci_reset);
295 	reset_control_assert(res->axi_reset);
296 	reset_control_assert(res->ahb_reset);
297 	reset_control_assert(res->por_reset);
298 	reset_control_assert(res->ext_reset);
299 	reset_control_assert(res->phy_reset);
300 
301 	writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL);
302 
303 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
304 }
305 
qcom_pcie_init_2_1_0(struct qcom_pcie * pcie)306 static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
307 {
308 	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
309 	struct dw_pcie *pci = pcie->pci;
310 	struct device *dev = pci->dev;
311 	struct device_node *node = dev->of_node;
312 	u32 val;
313 	int ret;
314 
315 	/* reset the PCIe interface as uboot can leave it undefined state */
316 	reset_control_assert(res->pci_reset);
317 	reset_control_assert(res->axi_reset);
318 	reset_control_assert(res->ahb_reset);
319 	reset_control_assert(res->por_reset);
320 	reset_control_assert(res->ext_reset);
321 	reset_control_assert(res->phy_reset);
322 
323 	writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL);
324 
325 	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
326 	if (ret < 0) {
327 		dev_err(dev, "cannot enable regulators\n");
328 		return ret;
329 	}
330 
331 	ret = reset_control_deassert(res->ahb_reset);
332 	if (ret) {
333 		dev_err(dev, "cannot deassert ahb reset\n");
334 		goto err_deassert_ahb;
335 	}
336 
337 	ret = reset_control_deassert(res->ext_reset);
338 	if (ret) {
339 		dev_err(dev, "cannot deassert ext reset\n");
340 		goto err_deassert_ext;
341 	}
342 
343 	ret = reset_control_deassert(res->phy_reset);
344 	if (ret) {
345 		dev_err(dev, "cannot deassert phy reset\n");
346 		goto err_deassert_phy;
347 	}
348 
349 	ret = reset_control_deassert(res->pci_reset);
350 	if (ret) {
351 		dev_err(dev, "cannot deassert pci reset\n");
352 		goto err_deassert_pci;
353 	}
354 
355 	ret = reset_control_deassert(res->por_reset);
356 	if (ret) {
357 		dev_err(dev, "cannot deassert por reset\n");
358 		goto err_deassert_por;
359 	}
360 
361 	ret = reset_control_deassert(res->axi_reset);
362 	if (ret) {
363 		dev_err(dev, "cannot deassert axi reset\n");
364 		goto err_deassert_axi;
365 	}
366 
367 	ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
368 	if (ret)
369 		goto err_clks;
370 
371 	/* enable PCIe clocks and resets */
372 	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
373 	val &= ~BIT(0);
374 	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
375 
376 	if (of_device_is_compatible(node, "qcom,pcie-ipq8064") ||
377 	    of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) {
378 		writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |
379 			       PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) |
380 			       PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34),
381 		       pcie->parf + PCIE20_PARF_PCS_DEEMPH);
382 		writel(PCS_SWING_TX_SWING_FULL(120) |
383 			       PCS_SWING_TX_SWING_LOW(120),
384 		       pcie->parf + PCIE20_PARF_PCS_SWING);
385 		writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS);
386 	}
387 
388 	if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
389 		/* set TX termination offset */
390 		val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
391 		val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK;
392 		val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7);
393 		writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
394 	}
395 
396 	/* enable external reference clock */
397 	val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
398 	/* USE_PAD is required only for ipq806x */
399 	if (!of_device_is_compatible(node, "qcom,pcie-apq8064"))
400 		val &= ~PHY_REFCLK_USE_PAD;
401 	val |= PHY_REFCLK_SSP_EN;
402 	writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
403 
404 	/* wait for clock acquisition */
405 	usleep_range(1000, 1500);
406 
407 	/* Set the Max TLP size to 2K, instead of using default of 4K */
408 	writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
409 	       pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
410 	writel(CFG_BRIDGE_SB_INIT,
411 	       pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
412 
413 	return 0;
414 
415 err_clks:
416 	reset_control_assert(res->axi_reset);
417 err_deassert_axi:
418 	reset_control_assert(res->por_reset);
419 err_deassert_por:
420 	reset_control_assert(res->pci_reset);
421 err_deassert_pci:
422 	reset_control_assert(res->phy_reset);
423 err_deassert_phy:
424 	reset_control_assert(res->ext_reset);
425 err_deassert_ext:
426 	reset_control_assert(res->ahb_reset);
427 err_deassert_ahb:
428 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
429 
430 	return ret;
431 }
432 
qcom_pcie_get_resources_1_0_0(struct qcom_pcie * pcie)433 static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
434 {
435 	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
436 	struct dw_pcie *pci = pcie->pci;
437 	struct device *dev = pci->dev;
438 
439 	res->vdda = devm_regulator_get(dev, "vdda");
440 	if (IS_ERR(res->vdda))
441 		return PTR_ERR(res->vdda);
442 
443 	res->iface = devm_clk_get(dev, "iface");
444 	if (IS_ERR(res->iface))
445 		return PTR_ERR(res->iface);
446 
447 	res->aux = devm_clk_get(dev, "aux");
448 	if (IS_ERR(res->aux))
449 		return PTR_ERR(res->aux);
450 
451 	res->master_bus = devm_clk_get(dev, "master_bus");
452 	if (IS_ERR(res->master_bus))
453 		return PTR_ERR(res->master_bus);
454 
455 	res->slave_bus = devm_clk_get(dev, "slave_bus");
456 	if (IS_ERR(res->slave_bus))
457 		return PTR_ERR(res->slave_bus);
458 
459 	res->core = devm_reset_control_get_exclusive(dev, "core");
460 	return PTR_ERR_OR_ZERO(res->core);
461 }
462 
qcom_pcie_deinit_1_0_0(struct qcom_pcie * pcie)463 static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
464 {
465 	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
466 
467 	reset_control_assert(res->core);
468 	clk_disable_unprepare(res->slave_bus);
469 	clk_disable_unprepare(res->master_bus);
470 	clk_disable_unprepare(res->iface);
471 	clk_disable_unprepare(res->aux);
472 	regulator_disable(res->vdda);
473 }
474 
qcom_pcie_init_1_0_0(struct qcom_pcie * pcie)475 static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
476 {
477 	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
478 	struct dw_pcie *pci = pcie->pci;
479 	struct device *dev = pci->dev;
480 	int ret;
481 
482 	ret = reset_control_deassert(res->core);
483 	if (ret) {
484 		dev_err(dev, "cannot deassert core reset\n");
485 		return ret;
486 	}
487 
488 	ret = clk_prepare_enable(res->aux);
489 	if (ret) {
490 		dev_err(dev, "cannot prepare/enable aux clock\n");
491 		goto err_res;
492 	}
493 
494 	ret = clk_prepare_enable(res->iface);
495 	if (ret) {
496 		dev_err(dev, "cannot prepare/enable iface clock\n");
497 		goto err_aux;
498 	}
499 
500 	ret = clk_prepare_enable(res->master_bus);
501 	if (ret) {
502 		dev_err(dev, "cannot prepare/enable master_bus clock\n");
503 		goto err_iface;
504 	}
505 
506 	ret = clk_prepare_enable(res->slave_bus);
507 	if (ret) {
508 		dev_err(dev, "cannot prepare/enable slave_bus clock\n");
509 		goto err_master;
510 	}
511 
512 	ret = regulator_enable(res->vdda);
513 	if (ret) {
514 		dev_err(dev, "cannot enable vdda regulator\n");
515 		goto err_slave;
516 	}
517 
518 	/* change DBI base address */
519 	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
520 
521 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
522 		u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
523 
524 		val |= BIT(31);
525 		writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
526 	}
527 
528 	return 0;
529 err_slave:
530 	clk_disable_unprepare(res->slave_bus);
531 err_master:
532 	clk_disable_unprepare(res->master_bus);
533 err_iface:
534 	clk_disable_unprepare(res->iface);
535 err_aux:
536 	clk_disable_unprepare(res->aux);
537 err_res:
538 	reset_control_assert(res->core);
539 
540 	return ret;
541 }
542 
qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie * pcie)543 static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
544 {
545 	u32 val;
546 
547 	/* enable link training */
548 	val = readl(pcie->parf + PCIE20_PARF_LTSSM);
549 	val |= BIT(8);
550 	writel(val, pcie->parf + PCIE20_PARF_LTSSM);
551 }
552 
qcom_pcie_get_resources_2_3_2(struct qcom_pcie * pcie)553 static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
554 {
555 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
556 	struct dw_pcie *pci = pcie->pci;
557 	struct device *dev = pci->dev;
558 	int ret;
559 
560 	res->supplies[0].supply = "vdda";
561 	res->supplies[1].supply = "vddpe-3v3";
562 	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
563 				      res->supplies);
564 	if (ret)
565 		return ret;
566 
567 	res->aux_clk = devm_clk_get(dev, "aux");
568 	if (IS_ERR(res->aux_clk))
569 		return PTR_ERR(res->aux_clk);
570 
571 	res->cfg_clk = devm_clk_get(dev, "cfg");
572 	if (IS_ERR(res->cfg_clk))
573 		return PTR_ERR(res->cfg_clk);
574 
575 	res->master_clk = devm_clk_get(dev, "bus_master");
576 	if (IS_ERR(res->master_clk))
577 		return PTR_ERR(res->master_clk);
578 
579 	res->slave_clk = devm_clk_get(dev, "bus_slave");
580 	if (IS_ERR(res->slave_clk))
581 		return PTR_ERR(res->slave_clk);
582 
583 	res->pipe_clk = devm_clk_get(dev, "pipe");
584 	return PTR_ERR_OR_ZERO(res->pipe_clk);
585 }
586 
qcom_pcie_deinit_2_3_2(struct qcom_pcie * pcie)587 static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
588 {
589 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
590 
591 	clk_disable_unprepare(res->slave_clk);
592 	clk_disable_unprepare(res->master_clk);
593 	clk_disable_unprepare(res->cfg_clk);
594 	clk_disable_unprepare(res->aux_clk);
595 
596 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
597 }
598 
qcom_pcie_post_deinit_2_3_2(struct qcom_pcie * pcie)599 static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie)
600 {
601 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
602 
603 	clk_disable_unprepare(res->pipe_clk);
604 }
605 
qcom_pcie_init_2_3_2(struct qcom_pcie * pcie)606 static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
607 {
608 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
609 	struct dw_pcie *pci = pcie->pci;
610 	struct device *dev = pci->dev;
611 	u32 val;
612 	int ret;
613 
614 	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
615 	if (ret < 0) {
616 		dev_err(dev, "cannot enable regulators\n");
617 		return ret;
618 	}
619 
620 	ret = clk_prepare_enable(res->aux_clk);
621 	if (ret) {
622 		dev_err(dev, "cannot prepare/enable aux clock\n");
623 		goto err_aux_clk;
624 	}
625 
626 	ret = clk_prepare_enable(res->cfg_clk);
627 	if (ret) {
628 		dev_err(dev, "cannot prepare/enable cfg clock\n");
629 		goto err_cfg_clk;
630 	}
631 
632 	ret = clk_prepare_enable(res->master_clk);
633 	if (ret) {
634 		dev_err(dev, "cannot prepare/enable master clock\n");
635 		goto err_master_clk;
636 	}
637 
638 	ret = clk_prepare_enable(res->slave_clk);
639 	if (ret) {
640 		dev_err(dev, "cannot prepare/enable slave clock\n");
641 		goto err_slave_clk;
642 	}
643 
644 	/* enable PCIe clocks and resets */
645 	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
646 	val &= ~BIT(0);
647 	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
648 
649 	/* change DBI base address */
650 	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
651 
652 	/* MAC PHY_POWERDOWN MUX DISABLE  */
653 	val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
654 	val &= ~BIT(29);
655 	writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
656 
657 	val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
658 	val |= BIT(4);
659 	writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
660 
661 	val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
662 	val |= BIT(31);
663 	writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
664 
665 	return 0;
666 
667 err_slave_clk:
668 	clk_disable_unprepare(res->master_clk);
669 err_master_clk:
670 	clk_disable_unprepare(res->cfg_clk);
671 err_cfg_clk:
672 	clk_disable_unprepare(res->aux_clk);
673 
674 err_aux_clk:
675 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
676 
677 	return ret;
678 }
679 
qcom_pcie_post_init_2_3_2(struct qcom_pcie * pcie)680 static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
681 {
682 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
683 	struct dw_pcie *pci = pcie->pci;
684 	struct device *dev = pci->dev;
685 	int ret;
686 
687 	ret = clk_prepare_enable(res->pipe_clk);
688 	if (ret) {
689 		dev_err(dev, "cannot prepare/enable pipe clock\n");
690 		return ret;
691 	}
692 
693 	return 0;
694 }
695 
qcom_pcie_get_resources_2_4_0(struct qcom_pcie * pcie)696 static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
697 {
698 	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
699 	struct dw_pcie *pci = pcie->pci;
700 	struct device *dev = pci->dev;
701 	bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");
702 	int ret;
703 
704 	res->clks[0].id = "aux";
705 	res->clks[1].id = "master_bus";
706 	res->clks[2].id = "slave_bus";
707 	res->clks[3].id = "iface";
708 
709 	/* qcom,pcie-ipq4019 is defined without "iface" */
710 	res->num_clks = is_ipq ? 3 : 4;
711 
712 	ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
713 	if (ret < 0)
714 		return ret;
715 
716 	res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m");
717 	if (IS_ERR(res->axi_m_reset))
718 		return PTR_ERR(res->axi_m_reset);
719 
720 	res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s");
721 	if (IS_ERR(res->axi_s_reset))
722 		return PTR_ERR(res->axi_s_reset);
723 
724 	if (is_ipq) {
725 		/*
726 		 * These resources relates to the PHY or are secure clocks, but
727 		 * are controlled here for IPQ4019
728 		 */
729 		res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe");
730 		if (IS_ERR(res->pipe_reset))
731 			return PTR_ERR(res->pipe_reset);
732 
733 		res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev,
734 									 "axi_m_vmid");
735 		if (IS_ERR(res->axi_m_vmid_reset))
736 			return PTR_ERR(res->axi_m_vmid_reset);
737 
738 		res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev,
739 									"axi_s_xpu");
740 		if (IS_ERR(res->axi_s_xpu_reset))
741 			return PTR_ERR(res->axi_s_xpu_reset);
742 
743 		res->parf_reset = devm_reset_control_get_exclusive(dev, "parf");
744 		if (IS_ERR(res->parf_reset))
745 			return PTR_ERR(res->parf_reset);
746 
747 		res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
748 		if (IS_ERR(res->phy_reset))
749 			return PTR_ERR(res->phy_reset);
750 	}
751 
752 	res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev,
753 								   "axi_m_sticky");
754 	if (IS_ERR(res->axi_m_sticky_reset))
755 		return PTR_ERR(res->axi_m_sticky_reset);
756 
757 	res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev,
758 								  "pipe_sticky");
759 	if (IS_ERR(res->pipe_sticky_reset))
760 		return PTR_ERR(res->pipe_sticky_reset);
761 
762 	res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr");
763 	if (IS_ERR(res->pwr_reset))
764 		return PTR_ERR(res->pwr_reset);
765 
766 	res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
767 	if (IS_ERR(res->ahb_reset))
768 		return PTR_ERR(res->ahb_reset);
769 
770 	if (is_ipq) {
771 		res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb");
772 		if (IS_ERR(res->phy_ahb_reset))
773 			return PTR_ERR(res->phy_ahb_reset);
774 	}
775 
776 	return 0;
777 }
778 
qcom_pcie_deinit_2_4_0(struct qcom_pcie * pcie)779 static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
780 {
781 	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
782 
783 	reset_control_assert(res->axi_m_reset);
784 	reset_control_assert(res->axi_s_reset);
785 	reset_control_assert(res->pipe_reset);
786 	reset_control_assert(res->pipe_sticky_reset);
787 	reset_control_assert(res->phy_reset);
788 	reset_control_assert(res->phy_ahb_reset);
789 	reset_control_assert(res->axi_m_sticky_reset);
790 	reset_control_assert(res->pwr_reset);
791 	reset_control_assert(res->ahb_reset);
792 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
793 }
794 
qcom_pcie_init_2_4_0(struct qcom_pcie * pcie)795 static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
796 {
797 	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
798 	struct dw_pcie *pci = pcie->pci;
799 	struct device *dev = pci->dev;
800 	u32 val;
801 	int ret;
802 
803 	ret = reset_control_assert(res->axi_m_reset);
804 	if (ret) {
805 		dev_err(dev, "cannot assert axi master reset\n");
806 		return ret;
807 	}
808 
809 	ret = reset_control_assert(res->axi_s_reset);
810 	if (ret) {
811 		dev_err(dev, "cannot assert axi slave reset\n");
812 		return ret;
813 	}
814 
815 	usleep_range(10000, 12000);
816 
817 	ret = reset_control_assert(res->pipe_reset);
818 	if (ret) {
819 		dev_err(dev, "cannot assert pipe reset\n");
820 		return ret;
821 	}
822 
823 	ret = reset_control_assert(res->pipe_sticky_reset);
824 	if (ret) {
825 		dev_err(dev, "cannot assert pipe sticky reset\n");
826 		return ret;
827 	}
828 
829 	ret = reset_control_assert(res->phy_reset);
830 	if (ret) {
831 		dev_err(dev, "cannot assert phy reset\n");
832 		return ret;
833 	}
834 
835 	ret = reset_control_assert(res->phy_ahb_reset);
836 	if (ret) {
837 		dev_err(dev, "cannot assert phy ahb reset\n");
838 		return ret;
839 	}
840 
841 	usleep_range(10000, 12000);
842 
843 	ret = reset_control_assert(res->axi_m_sticky_reset);
844 	if (ret) {
845 		dev_err(dev, "cannot assert axi master sticky reset\n");
846 		return ret;
847 	}
848 
849 	ret = reset_control_assert(res->pwr_reset);
850 	if (ret) {
851 		dev_err(dev, "cannot assert power reset\n");
852 		return ret;
853 	}
854 
855 	ret = reset_control_assert(res->ahb_reset);
856 	if (ret) {
857 		dev_err(dev, "cannot assert ahb reset\n");
858 		return ret;
859 	}
860 
861 	usleep_range(10000, 12000);
862 
863 	ret = reset_control_deassert(res->phy_ahb_reset);
864 	if (ret) {
865 		dev_err(dev, "cannot deassert phy ahb reset\n");
866 		return ret;
867 	}
868 
869 	ret = reset_control_deassert(res->phy_reset);
870 	if (ret) {
871 		dev_err(dev, "cannot deassert phy reset\n");
872 		goto err_rst_phy;
873 	}
874 
875 	ret = reset_control_deassert(res->pipe_reset);
876 	if (ret) {
877 		dev_err(dev, "cannot deassert pipe reset\n");
878 		goto err_rst_pipe;
879 	}
880 
881 	ret = reset_control_deassert(res->pipe_sticky_reset);
882 	if (ret) {
883 		dev_err(dev, "cannot deassert pipe sticky reset\n");
884 		goto err_rst_pipe_sticky;
885 	}
886 
887 	usleep_range(10000, 12000);
888 
889 	ret = reset_control_deassert(res->axi_m_reset);
890 	if (ret) {
891 		dev_err(dev, "cannot deassert axi master reset\n");
892 		goto err_rst_axi_m;
893 	}
894 
895 	ret = reset_control_deassert(res->axi_m_sticky_reset);
896 	if (ret) {
897 		dev_err(dev, "cannot deassert axi master sticky reset\n");
898 		goto err_rst_axi_m_sticky;
899 	}
900 
901 	ret = reset_control_deassert(res->axi_s_reset);
902 	if (ret) {
903 		dev_err(dev, "cannot deassert axi slave reset\n");
904 		goto err_rst_axi_s;
905 	}
906 
907 	ret = reset_control_deassert(res->pwr_reset);
908 	if (ret) {
909 		dev_err(dev, "cannot deassert power reset\n");
910 		goto err_rst_pwr;
911 	}
912 
913 	ret = reset_control_deassert(res->ahb_reset);
914 	if (ret) {
915 		dev_err(dev, "cannot deassert ahb reset\n");
916 		goto err_rst_ahb;
917 	}
918 
919 	usleep_range(10000, 12000);
920 
921 	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
922 	if (ret)
923 		goto err_clks;
924 
925 	/* enable PCIe clocks and resets */
926 	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
927 	val &= ~BIT(0);
928 	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
929 
930 	/* change DBI base address */
931 	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
932 
933 	/* MAC PHY_POWERDOWN MUX DISABLE  */
934 	val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
935 	val &= ~BIT(29);
936 	writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
937 
938 	val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
939 	val |= BIT(4);
940 	writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
941 
942 	val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
943 	val |= BIT(31);
944 	writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
945 
946 	return 0;
947 
948 err_clks:
949 	reset_control_assert(res->ahb_reset);
950 err_rst_ahb:
951 	reset_control_assert(res->pwr_reset);
952 err_rst_pwr:
953 	reset_control_assert(res->axi_s_reset);
954 err_rst_axi_s:
955 	reset_control_assert(res->axi_m_sticky_reset);
956 err_rst_axi_m_sticky:
957 	reset_control_assert(res->axi_m_reset);
958 err_rst_axi_m:
959 	reset_control_assert(res->pipe_sticky_reset);
960 err_rst_pipe_sticky:
961 	reset_control_assert(res->pipe_reset);
962 err_rst_pipe:
963 	reset_control_assert(res->phy_reset);
964 err_rst_phy:
965 	reset_control_assert(res->phy_ahb_reset);
966 	return ret;
967 }
968 
qcom_pcie_get_resources_2_3_3(struct qcom_pcie * pcie)969 static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
970 {
971 	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
972 	struct dw_pcie *pci = pcie->pci;
973 	struct device *dev = pci->dev;
974 	int i;
975 	const char *rst_names[] = { "axi_m", "axi_s", "pipe",
976 				    "axi_m_sticky", "sticky",
977 				    "ahb", "sleep", };
978 
979 	res->iface = devm_clk_get(dev, "iface");
980 	if (IS_ERR(res->iface))
981 		return PTR_ERR(res->iface);
982 
983 	res->axi_m_clk = devm_clk_get(dev, "axi_m");
984 	if (IS_ERR(res->axi_m_clk))
985 		return PTR_ERR(res->axi_m_clk);
986 
987 	res->axi_s_clk = devm_clk_get(dev, "axi_s");
988 	if (IS_ERR(res->axi_s_clk))
989 		return PTR_ERR(res->axi_s_clk);
990 
991 	res->ahb_clk = devm_clk_get(dev, "ahb");
992 	if (IS_ERR(res->ahb_clk))
993 		return PTR_ERR(res->ahb_clk);
994 
995 	res->aux_clk = devm_clk_get(dev, "aux");
996 	if (IS_ERR(res->aux_clk))
997 		return PTR_ERR(res->aux_clk);
998 
999 	for (i = 0; i < ARRAY_SIZE(rst_names); i++) {
1000 		res->rst[i] = devm_reset_control_get(dev, rst_names[i]);
1001 		if (IS_ERR(res->rst[i]))
1002 			return PTR_ERR(res->rst[i]);
1003 	}
1004 
1005 	return 0;
1006 }
1007 
qcom_pcie_deinit_2_3_3(struct qcom_pcie * pcie)1008 static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
1009 {
1010 	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
1011 
1012 	clk_disable_unprepare(res->iface);
1013 	clk_disable_unprepare(res->axi_m_clk);
1014 	clk_disable_unprepare(res->axi_s_clk);
1015 	clk_disable_unprepare(res->ahb_clk);
1016 	clk_disable_unprepare(res->aux_clk);
1017 }
1018 
qcom_pcie_init_2_3_3(struct qcom_pcie * pcie)1019 static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
1020 {
1021 	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
1022 	struct dw_pcie *pci = pcie->pci;
1023 	struct device *dev = pci->dev;
1024 	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
1025 	int i, ret;
1026 	u32 val;
1027 
1028 	for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
1029 		ret = reset_control_assert(res->rst[i]);
1030 		if (ret) {
1031 			dev_err(dev, "reset #%d assert failed (%d)\n", i, ret);
1032 			return ret;
1033 		}
1034 	}
1035 
1036 	usleep_range(2000, 2500);
1037 
1038 	for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
1039 		ret = reset_control_deassert(res->rst[i]);
1040 		if (ret) {
1041 			dev_err(dev, "reset #%d deassert failed (%d)\n", i,
1042 				ret);
1043 			return ret;
1044 		}
1045 	}
1046 
1047 	/*
1048 	 * Don't have a way to see if the reset has completed.
1049 	 * Wait for some time.
1050 	 */
1051 	usleep_range(2000, 2500);
1052 
1053 	ret = clk_prepare_enable(res->iface);
1054 	if (ret) {
1055 		dev_err(dev, "cannot prepare/enable core clock\n");
1056 		goto err_clk_iface;
1057 	}
1058 
1059 	ret = clk_prepare_enable(res->axi_m_clk);
1060 	if (ret) {
1061 		dev_err(dev, "cannot prepare/enable core clock\n");
1062 		goto err_clk_axi_m;
1063 	}
1064 
1065 	ret = clk_prepare_enable(res->axi_s_clk);
1066 	if (ret) {
1067 		dev_err(dev, "cannot prepare/enable axi slave clock\n");
1068 		goto err_clk_axi_s;
1069 	}
1070 
1071 	ret = clk_prepare_enable(res->ahb_clk);
1072 	if (ret) {
1073 		dev_err(dev, "cannot prepare/enable ahb clock\n");
1074 		goto err_clk_ahb;
1075 	}
1076 
1077 	ret = clk_prepare_enable(res->aux_clk);
1078 	if (ret) {
1079 		dev_err(dev, "cannot prepare/enable aux clock\n");
1080 		goto err_clk_aux;
1081 	}
1082 
1083 	writel(SLV_ADDR_SPACE_SZ,
1084 		pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE);
1085 
1086 	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
1087 	val &= ~BIT(0);
1088 	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
1089 
1090 	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
1091 
1092 	writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
1093 		| SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
1094 		AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
1095 		pcie->parf + PCIE20_PARF_SYS_CTRL);
1096 	writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH);
1097 
1098 	writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND);
1099 	writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG);
1100 	writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
1101 
1102 	val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
1103 	val &= ~PCI_EXP_LNKCAP_ASPMS;
1104 	writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
1105 
1106 	writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
1107 		PCI_EXP_DEVCTL2);
1108 
1109 	return 0;
1110 
1111 err_clk_aux:
1112 	clk_disable_unprepare(res->ahb_clk);
1113 err_clk_ahb:
1114 	clk_disable_unprepare(res->axi_s_clk);
1115 err_clk_axi_s:
1116 	clk_disable_unprepare(res->axi_m_clk);
1117 err_clk_axi_m:
1118 	clk_disable_unprepare(res->iface);
1119 err_clk_iface:
1120 	/*
1121 	 * Not checking for failure, will anyway return
1122 	 * the original failure in 'ret'.
1123 	 */
1124 	for (i = 0; i < ARRAY_SIZE(res->rst); i++)
1125 		reset_control_assert(res->rst[i]);
1126 
1127 	return ret;
1128 }
1129 
qcom_pcie_get_resources_2_7_0(struct qcom_pcie * pcie)1130 static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
1131 {
1132 	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1133 	struct dw_pcie *pci = pcie->pci;
1134 	struct device *dev = pci->dev;
1135 	int ret;
1136 
1137 	res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
1138 	if (IS_ERR(res->pci_reset))
1139 		return PTR_ERR(res->pci_reset);
1140 
1141 	res->supplies[0].supply = "vdda";
1142 	res->supplies[1].supply = "vddpe-3v3";
1143 	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
1144 				      res->supplies);
1145 	if (ret)
1146 		return ret;
1147 
1148 	res->clks[0].id = "aux";
1149 	res->clks[1].id = "cfg";
1150 	res->clks[2].id = "bus_master";
1151 	res->clks[3].id = "bus_slave";
1152 	res->clks[4].id = "slave_q2a";
1153 	res->clks[5].id = "tbu";
1154 
1155 	ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
1156 	if (ret < 0)
1157 		return ret;
1158 
1159 	res->pipe_clk = devm_clk_get(dev, "pipe");
1160 	return PTR_ERR_OR_ZERO(res->pipe_clk);
1161 }
1162 
qcom_pcie_init_2_7_0(struct qcom_pcie * pcie)1163 static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
1164 {
1165 	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1166 	struct dw_pcie *pci = pcie->pci;
1167 	struct device *dev = pci->dev;
1168 	u32 val;
1169 	int ret;
1170 
1171 	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
1172 	if (ret < 0) {
1173 		dev_err(dev, "cannot enable regulators\n");
1174 		return ret;
1175 	}
1176 
1177 	ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
1178 	if (ret < 0)
1179 		goto err_disable_regulators;
1180 
1181 	ret = reset_control_assert(res->pci_reset);
1182 	if (ret < 0) {
1183 		dev_err(dev, "cannot deassert pci reset\n");
1184 		goto err_disable_clocks;
1185 	}
1186 
1187 	usleep_range(1000, 1500);
1188 
1189 	ret = reset_control_deassert(res->pci_reset);
1190 	if (ret < 0) {
1191 		dev_err(dev, "cannot deassert pci reset\n");
1192 		goto err_disable_clocks;
1193 	}
1194 
1195 	ret = clk_prepare_enable(res->pipe_clk);
1196 	if (ret) {
1197 		dev_err(dev, "cannot prepare/enable pipe clock\n");
1198 		goto err_disable_clocks;
1199 	}
1200 
1201 	/* configure PCIe to RC mode */
1202 	writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE);
1203 
1204 	/* enable PCIe clocks and resets */
1205 	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
1206 	val &= ~BIT(0);
1207 	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
1208 
1209 	/* change DBI base address */
1210 	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
1211 
1212 	/* MAC PHY_POWERDOWN MUX DISABLE  */
1213 	val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
1214 	val &= ~BIT(29);
1215 	writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
1216 
1217 	val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
1218 	val |= BIT(4);
1219 	writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
1220 
1221 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
1222 		val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
1223 		val |= BIT(31);
1224 		writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
1225 	}
1226 
1227 	return 0;
1228 err_disable_clocks:
1229 	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
1230 err_disable_regulators:
1231 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
1232 
1233 	return ret;
1234 }
1235 
qcom_pcie_deinit_2_7_0(struct qcom_pcie * pcie)1236 static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
1237 {
1238 	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1239 
1240 	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
1241 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
1242 }
1243 
qcom_pcie_post_init_2_7_0(struct qcom_pcie * pcie)1244 static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
1245 {
1246 	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1247 
1248 	return clk_prepare_enable(res->pipe_clk);
1249 }
1250 
qcom_pcie_post_deinit_2_7_0(struct qcom_pcie * pcie)1251 static void qcom_pcie_post_deinit_2_7_0(struct qcom_pcie *pcie)
1252 {
1253 	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1254 
1255 	clk_disable_unprepare(res->pipe_clk);
1256 }
1257 
qcom_pcie_link_up(struct dw_pcie * pci)1258 static int qcom_pcie_link_up(struct dw_pcie *pci)
1259 {
1260 	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
1261 	u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
1262 
1263 	return !!(val & PCI_EXP_LNKSTA_DLLLA);
1264 }
1265 
qcom_pcie_host_init(struct pcie_port * pp)1266 static int qcom_pcie_host_init(struct pcie_port *pp)
1267 {
1268 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1269 	struct qcom_pcie *pcie = to_qcom_pcie(pci);
1270 	int ret;
1271 
1272 	qcom_ep_reset_assert(pcie);
1273 
1274 	ret = pcie->ops->init(pcie);
1275 	if (ret)
1276 		return ret;
1277 
1278 	ret = phy_power_on(pcie->phy);
1279 	if (ret)
1280 		goto err_deinit;
1281 
1282 	if (pcie->ops->post_init) {
1283 		ret = pcie->ops->post_init(pcie);
1284 		if (ret)
1285 			goto err_disable_phy;
1286 	}
1287 
1288 	dw_pcie_setup_rc(pp);
1289 	dw_pcie_msi_init(pp);
1290 
1291 	qcom_ep_reset_deassert(pcie);
1292 
1293 	ret = qcom_pcie_establish_link(pcie);
1294 	if (ret)
1295 		goto err;
1296 
1297 	return 0;
1298 err:
1299 	qcom_ep_reset_assert(pcie);
1300 	if (pcie->ops->post_deinit)
1301 		pcie->ops->post_deinit(pcie);
1302 err_disable_phy:
1303 	phy_power_off(pcie->phy);
1304 err_deinit:
1305 	pcie->ops->deinit(pcie);
1306 
1307 	return ret;
1308 }
1309 
1310 static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
1311 	.host_init = qcom_pcie_host_init,
1312 };
1313 
1314 /* Qcom IP rev.: 2.1.0	Synopsys IP rev.: 4.01a */
1315 static const struct qcom_pcie_ops ops_2_1_0 = {
1316 	.get_resources = qcom_pcie_get_resources_2_1_0,
1317 	.init = qcom_pcie_init_2_1_0,
1318 	.deinit = qcom_pcie_deinit_2_1_0,
1319 	.ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1320 };
1321 
1322 /* Qcom IP rev.: 1.0.0	Synopsys IP rev.: 4.11a */
1323 static const struct qcom_pcie_ops ops_1_0_0 = {
1324 	.get_resources = qcom_pcie_get_resources_1_0_0,
1325 	.init = qcom_pcie_init_1_0_0,
1326 	.deinit = qcom_pcie_deinit_1_0_0,
1327 	.ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1328 };
1329 
1330 /* Qcom IP rev.: 2.3.2	Synopsys IP rev.: 4.21a */
1331 static const struct qcom_pcie_ops ops_2_3_2 = {
1332 	.get_resources = qcom_pcie_get_resources_2_3_2,
1333 	.init = qcom_pcie_init_2_3_2,
1334 	.post_init = qcom_pcie_post_init_2_3_2,
1335 	.deinit = qcom_pcie_deinit_2_3_2,
1336 	.post_deinit = qcom_pcie_post_deinit_2_3_2,
1337 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1338 };
1339 
1340 /* Qcom IP rev.: 2.4.0	Synopsys IP rev.: 4.20a */
1341 static const struct qcom_pcie_ops ops_2_4_0 = {
1342 	.get_resources = qcom_pcie_get_resources_2_4_0,
1343 	.init = qcom_pcie_init_2_4_0,
1344 	.deinit = qcom_pcie_deinit_2_4_0,
1345 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1346 };
1347 
1348 /* Qcom IP rev.: 2.3.3	Synopsys IP rev.: 4.30a */
1349 static const struct qcom_pcie_ops ops_2_3_3 = {
1350 	.get_resources = qcom_pcie_get_resources_2_3_3,
1351 	.init = qcom_pcie_init_2_3_3,
1352 	.deinit = qcom_pcie_deinit_2_3_3,
1353 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1354 };
1355 
1356 /* Qcom IP rev.: 2.7.0	Synopsys IP rev.: 4.30a */
1357 static const struct qcom_pcie_ops ops_2_7_0 = {
1358 	.get_resources = qcom_pcie_get_resources_2_7_0,
1359 	.init = qcom_pcie_init_2_7_0,
1360 	.deinit = qcom_pcie_deinit_2_7_0,
1361 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1362 	.post_init = qcom_pcie_post_init_2_7_0,
1363 	.post_deinit = qcom_pcie_post_deinit_2_7_0,
1364 };
1365 
1366 static const struct dw_pcie_ops dw_pcie_ops = {
1367 	.link_up = qcom_pcie_link_up,
1368 };
1369 
qcom_pcie_probe(struct platform_device * pdev)1370 static int qcom_pcie_probe(struct platform_device *pdev)
1371 {
1372 	struct device *dev = &pdev->dev;
1373 	struct resource *res;
1374 	struct pcie_port *pp;
1375 	struct dw_pcie *pci;
1376 	struct qcom_pcie *pcie;
1377 	int ret;
1378 
1379 	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
1380 	if (!pcie)
1381 		return -ENOMEM;
1382 
1383 	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1384 	if (!pci)
1385 		return -ENOMEM;
1386 
1387 	pm_runtime_enable(dev);
1388 	ret = pm_runtime_get_sync(dev);
1389 	if (ret < 0)
1390 		goto err_pm_runtime_put;
1391 
1392 	pci->dev = dev;
1393 	pci->ops = &dw_pcie_ops;
1394 	pp = &pci->pp;
1395 
1396 	pcie->pci = pci;
1397 
1398 	pcie->ops = of_device_get_match_data(dev);
1399 
1400 	pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
1401 	if (IS_ERR(pcie->reset)) {
1402 		ret = PTR_ERR(pcie->reset);
1403 		goto err_pm_runtime_put;
1404 	}
1405 
1406 	pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf");
1407 	if (IS_ERR(pcie->parf)) {
1408 		ret = PTR_ERR(pcie->parf);
1409 		goto err_pm_runtime_put;
1410 	}
1411 
1412 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
1413 	pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
1414 	if (IS_ERR(pci->dbi_base)) {
1415 		ret = PTR_ERR(pci->dbi_base);
1416 		goto err_pm_runtime_put;
1417 	}
1418 
1419 	pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi");
1420 	if (IS_ERR(pcie->elbi)) {
1421 		ret = PTR_ERR(pcie->elbi);
1422 		goto err_pm_runtime_put;
1423 	}
1424 
1425 	pcie->phy = devm_phy_optional_get(dev, "pciephy");
1426 	if (IS_ERR(pcie->phy)) {
1427 		ret = PTR_ERR(pcie->phy);
1428 		goto err_pm_runtime_put;
1429 	}
1430 
1431 	ret = pcie->ops->get_resources(pcie);
1432 	if (ret)
1433 		goto err_pm_runtime_put;
1434 
1435 	pp->ops = &qcom_pcie_dw_ops;
1436 
1437 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
1438 		pp->msi_irq = platform_get_irq_byname(pdev, "msi");
1439 		if (pp->msi_irq < 0) {
1440 			ret = pp->msi_irq;
1441 			goto err_pm_runtime_put;
1442 		}
1443 	}
1444 
1445 	ret = phy_init(pcie->phy);
1446 	if (ret) {
1447 		pm_runtime_disable(&pdev->dev);
1448 		goto err_pm_runtime_put;
1449 	}
1450 
1451 	platform_set_drvdata(pdev, pcie);
1452 
1453 	ret = dw_pcie_host_init(pp);
1454 	if (ret) {
1455 		dev_err(dev, "cannot initialize host\n");
1456 		pm_runtime_disable(&pdev->dev);
1457 		goto err_pm_runtime_put;
1458 	}
1459 
1460 	return 0;
1461 
1462 err_pm_runtime_put:
1463 	pm_runtime_put(dev);
1464 	pm_runtime_disable(dev);
1465 
1466 	return ret;
1467 }
1468 
1469 static const struct of_device_id qcom_pcie_match[] = {
1470 	{ .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 },
1471 	{ .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 },
1472 	{ .compatible = "qcom,pcie-ipq8064-v2", .data = &ops_2_1_0 },
1473 	{ .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 },
1474 	{ .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 },
1475 	{ .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 },
1476 	{ .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 },
1477 	{ .compatible = "qcom,pcie-qcs404", .data = &ops_2_4_0 },
1478 	{ .compatible = "qcom,pcie-sdm845", .data = &ops_2_7_0 },
1479 	{ }
1480 };
1481 
qcom_fixup_class(struct pci_dev * dev)1482 static void qcom_fixup_class(struct pci_dev *dev)
1483 {
1484 	dev->class = PCI_CLASS_BRIDGE_PCI << 8;
1485 }
1486 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class);
1487 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class);
1488 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class);
1489 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class);
1490 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
1491 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
1492 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
1493 
1494 static struct platform_driver qcom_pcie_driver = {
1495 	.probe = qcom_pcie_probe,
1496 	.driver = {
1497 		.name = "qcom-pcie",
1498 		.suppress_bind_attrs = true,
1499 		.of_match_table = qcom_pcie_match,
1500 	},
1501 };
1502 builtin_platform_driver(qcom_pcie_driver);
1503