• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Designware application register space functions for Keystone PCI controller
3  *
4  * Copyright (C) 2013-2014 Texas Instruments., Ltd.
5  *		http://www.ti.com
6  *
7  * Author: Murali Karicheri <m-karicheri2@ti.com>
8  *
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14 
15 #include <linux/irq.h>
16 #include <linux/irqdomain.h>
17 #include <linux/irqreturn.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/of_pci.h>
21 #include <linux/pci.h>
22 #include <linux/platform_device.h>
23 
24 #include "pcie-designware.h"
25 #include "pci-keystone.h"
26 
27 /* Application register defines */
28 #define LTSSM_EN_VAL		        1
29 #define LTSSM_STATE_MASK		0x1f
30 #define LTSSM_STATE_L0			0x11
31 #define DBI_CS2_EN_VAL			0x20
32 #define OB_XLAT_EN_VAL		        2
33 
34 /* Application registers */
35 #define CMD_STATUS			0x004
36 #define CFG_SETUP			0x008
37 #define OB_SIZE				0x030
38 #define CFG_PCIM_WIN_SZ_IDX		3
39 #define CFG_PCIM_WIN_CNT		32
40 #define SPACE0_REMOTE_CFG_OFFSET	0x1000
41 #define OB_OFFSET_INDEX(n)		(0x200 + (8 * n))
42 #define OB_OFFSET_HI(n)			(0x204 + (8 * n))
43 
44 /* IRQ register defines */
45 #define IRQ_EOI				0x050
46 #define IRQ_STATUS			0x184
47 #define IRQ_ENABLE_SET			0x188
48 #define IRQ_ENABLE_CLR			0x18c
49 
50 #define MSI_IRQ				0x054
51 #define MSI0_IRQ_STATUS			0x104
52 #define MSI0_IRQ_ENABLE_SET		0x108
53 #define MSI0_IRQ_ENABLE_CLR		0x10c
54 #define IRQ_STATUS			0x184
55 #define MSI_IRQ_OFFSET			4
56 
57 /* Error IRQ bits */
58 #define ERR_AER		BIT(5)	/* ECRC error */
59 #define ERR_AXI		BIT(4)	/* AXI tag lookup fatal error */
60 #define ERR_CORR	BIT(3)	/* Correctable error */
61 #define ERR_NONFATAL	BIT(2)	/* Non-fatal error */
62 #define ERR_FATAL	BIT(1)	/* Fatal error */
63 #define ERR_SYS		BIT(0)	/* System (fatal, non-fatal, or correctable) */
64 #define ERR_IRQ_ALL	(ERR_AER | ERR_AXI | ERR_CORR | \
65 			 ERR_NONFATAL | ERR_FATAL | ERR_SYS)
66 #define ERR_FATAL_IRQ	(ERR_FATAL | ERR_AXI)
67 #define ERR_IRQ_STATUS_RAW		0x1c0
68 #define ERR_IRQ_STATUS			0x1c4
69 #define ERR_IRQ_ENABLE_SET		0x1c8
70 #define ERR_IRQ_ENABLE_CLR		0x1cc
71 
72 /* Config space registers */
73 #define DEBUG0				0x728
74 
75 #define to_keystone_pcie(x)	container_of(x, struct keystone_pcie, pp)
76 
update_reg_offset_bit_pos(u32 offset,u32 * reg_offset,u32 * bit_pos)77 static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
78 					     u32 *bit_pos)
79 {
80 	*reg_offset = offset % 8;
81 	*bit_pos = offset >> 3;
82 }
83 
ks_dw_pcie_get_msi_addr(struct pcie_port * pp)84 phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp)
85 {
86 	struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
87 
88 	return ks_pcie->app.start + MSI_IRQ;
89 }
90 
ks_dw_app_readl(struct keystone_pcie * ks_pcie,u32 offset)91 static u32 ks_dw_app_readl(struct keystone_pcie *ks_pcie, u32 offset)
92 {
93 	return readl(ks_pcie->va_app_base + offset);
94 }
95 
ks_dw_app_writel(struct keystone_pcie * ks_pcie,u32 offset,u32 val)96 static void ks_dw_app_writel(struct keystone_pcie *ks_pcie, u32 offset, u32 val)
97 {
98 	writel(val, ks_pcie->va_app_base + offset);
99 }
100 
ks_dw_pcie_handle_msi_irq(struct keystone_pcie * ks_pcie,int offset)101 void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
102 {
103 	struct pcie_port *pp = &ks_pcie->pp;
104 	struct device *dev = pp->dev;
105 	u32 pending, vector;
106 	int src, virq;
107 
108 	pending = ks_dw_app_readl(ks_pcie, MSI0_IRQ_STATUS + (offset << 4));
109 
110 	/*
111 	 * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
112 	 * shows 1, 9, 17, 25 and so forth
113 	 */
114 	for (src = 0; src < 4; src++) {
115 		if (BIT(src) & pending) {
116 			vector = offset + (src << 3);
117 			virq = irq_linear_revmap(pp->irq_domain, vector);
118 			dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n",
119 				src, vector, virq);
120 			generic_handle_irq(virq);
121 		}
122 	}
123 }
124 
ks_dw_pcie_msi_irq_ack(struct irq_data * d)125 static void ks_dw_pcie_msi_irq_ack(struct irq_data *d)
126 {
127 	u32 offset, reg_offset, bit_pos;
128 	struct keystone_pcie *ks_pcie;
129 	struct msi_desc *msi;
130 	struct pcie_port *pp;
131 
132 	msi = irq_data_get_msi_desc(d);
133 	pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
134 	ks_pcie = to_keystone_pcie(pp);
135 	offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
136 	update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos);
137 
138 	ks_dw_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4),
139 			 BIT(bit_pos));
140 	ks_dw_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
141 }
142 
ks_dw_pcie_msi_set_irq(struct pcie_port * pp,int irq)143 void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
144 {
145 	u32 reg_offset, bit_pos;
146 	struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
147 
148 	update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
149 	ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4),
150 			 BIT(bit_pos));
151 }
152 
ks_dw_pcie_msi_clear_irq(struct pcie_port * pp,int irq)153 void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
154 {
155 	u32 reg_offset, bit_pos;
156 	struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
157 
158 	update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
159 	ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4),
160 			 BIT(bit_pos));
161 }
162 
ks_dw_pcie_msi_irq_mask(struct irq_data * d)163 static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
164 {
165 	struct keystone_pcie *ks_pcie;
166 	struct msi_desc *msi;
167 	struct pcie_port *pp;
168 	u32 offset;
169 
170 	msi = irq_data_get_msi_desc(d);
171 	pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
172 	ks_pcie = to_keystone_pcie(pp);
173 	offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
174 
175 	/* Mask the end point if PVM implemented */
176 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
177 		if (msi->msi_attrib.maskbit)
178 			pci_msi_mask_irq(d);
179 	}
180 
181 	ks_dw_pcie_msi_clear_irq(pp, offset);
182 }
183 
ks_dw_pcie_msi_irq_unmask(struct irq_data * d)184 static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d)
185 {
186 	struct keystone_pcie *ks_pcie;
187 	struct msi_desc *msi;
188 	struct pcie_port *pp;
189 	u32 offset;
190 
191 	msi = irq_data_get_msi_desc(d);
192 	pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
193 	ks_pcie = to_keystone_pcie(pp);
194 	offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
195 
196 	/* Mask the end point if PVM implemented */
197 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
198 		if (msi->msi_attrib.maskbit)
199 			pci_msi_unmask_irq(d);
200 	}
201 
202 	ks_dw_pcie_msi_set_irq(pp, offset);
203 }
204 
205 static struct irq_chip ks_dw_pcie_msi_irq_chip = {
206 	.name = "Keystone-PCIe-MSI-IRQ",
207 	.irq_ack = ks_dw_pcie_msi_irq_ack,
208 	.irq_mask = ks_dw_pcie_msi_irq_mask,
209 	.irq_unmask = ks_dw_pcie_msi_irq_unmask,
210 };
211 
ks_dw_pcie_msi_map(struct irq_domain * domain,unsigned int irq,irq_hw_number_t hwirq)212 static int ks_dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
213 			      irq_hw_number_t hwirq)
214 {
215 	irq_set_chip_and_handler(irq, &ks_dw_pcie_msi_irq_chip,
216 				 handle_level_irq);
217 	irq_set_chip_data(irq, domain->host_data);
218 
219 	return 0;
220 }
221 
222 static const struct irq_domain_ops ks_dw_pcie_msi_domain_ops = {
223 	.map = ks_dw_pcie_msi_map,
224 };
225 
ks_dw_pcie_msi_host_init(struct pcie_port * pp,struct msi_controller * chip)226 int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_controller *chip)
227 {
228 	struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
229 	struct device *dev = pp->dev;
230 	int i;
231 
232 	pp->irq_domain = irq_domain_add_linear(ks_pcie->msi_intc_np,
233 					MAX_MSI_IRQS,
234 					&ks_dw_pcie_msi_domain_ops,
235 					chip);
236 	if (!pp->irq_domain) {
237 		dev_err(dev, "irq domain init failed\n");
238 		return -ENXIO;
239 	}
240 
241 	for (i = 0; i < MAX_MSI_IRQS; i++)
242 		irq_create_mapping(pp->irq_domain, i);
243 
244 	return 0;
245 }
246 
ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie * ks_pcie)247 void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie)
248 {
249 	int i;
250 
251 	for (i = 0; i < MAX_LEGACY_IRQS; i++)
252 		ks_dw_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1);
253 }
254 
ks_dw_pcie_handle_legacy_irq(struct keystone_pcie * ks_pcie,int offset)255 void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset)
256 {
257 	struct pcie_port *pp = &ks_pcie->pp;
258 	struct device *dev = pp->dev;
259 	u32 pending;
260 	int virq;
261 
262 	pending = ks_dw_app_readl(ks_pcie, IRQ_STATUS + (offset << 4));
263 
264 	if (BIT(0) & pending) {
265 		virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
266 		dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq);
267 		generic_handle_irq(virq);
268 	}
269 
270 	/* EOI the INTx interrupt */
271 	ks_dw_app_writel(ks_pcie, IRQ_EOI, offset);
272 }
273 
ks_dw_pcie_enable_error_irq(struct keystone_pcie * ks_pcie)274 void ks_dw_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
275 {
276 	ks_dw_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
277 }
278 
ks_dw_pcie_handle_error_irq(struct keystone_pcie * ks_pcie)279 irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
280 {
281 	u32 status;
282 
283 	status = ks_dw_app_readl(ks_pcie, ERR_IRQ_STATUS_RAW) & ERR_IRQ_ALL;
284 	if (!status)
285 		return IRQ_NONE;
286 
287 	if (status & ERR_FATAL_IRQ)
288 		dev_err(ks_pcie->pp.dev, "fatal error (status %#010x)\n",
289 			status);
290 
291 	/* Ack the IRQ; status bits are RW1C */
292 	ks_dw_app_writel(ks_pcie, ERR_IRQ_STATUS, status);
293 	return IRQ_HANDLED;
294 }
295 
ks_dw_pcie_ack_legacy_irq(struct irq_data * d)296 static void ks_dw_pcie_ack_legacy_irq(struct irq_data *d)
297 {
298 }
299 
ks_dw_pcie_mask_legacy_irq(struct irq_data * d)300 static void ks_dw_pcie_mask_legacy_irq(struct irq_data *d)
301 {
302 }
303 
ks_dw_pcie_unmask_legacy_irq(struct irq_data * d)304 static void ks_dw_pcie_unmask_legacy_irq(struct irq_data *d)
305 {
306 }
307 
308 static struct irq_chip ks_dw_pcie_legacy_irq_chip = {
309 	.name = "Keystone-PCI-Legacy-IRQ",
310 	.irq_ack = ks_dw_pcie_ack_legacy_irq,
311 	.irq_mask = ks_dw_pcie_mask_legacy_irq,
312 	.irq_unmask = ks_dw_pcie_unmask_legacy_irq,
313 };
314 
ks_dw_pcie_init_legacy_irq_map(struct irq_domain * d,unsigned int irq,irq_hw_number_t hw_irq)315 static int ks_dw_pcie_init_legacy_irq_map(struct irq_domain *d,
316 				unsigned int irq, irq_hw_number_t hw_irq)
317 {
318 	irq_set_chip_and_handler(irq, &ks_dw_pcie_legacy_irq_chip,
319 				 handle_level_irq);
320 	irq_set_chip_data(irq, d->host_data);
321 
322 	return 0;
323 }
324 
325 static const struct irq_domain_ops ks_dw_pcie_legacy_irq_domain_ops = {
326 	.map = ks_dw_pcie_init_legacy_irq_map,
327 	.xlate = irq_domain_xlate_onetwocell,
328 };
329 
330 /**
331  * ks_dw_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask
332  * registers
333  *
334  * Since modification of dbi_cs2 involves different clock domain, read the
335  * status back to ensure the transition is complete.
336  */
ks_dw_pcie_set_dbi_mode(struct keystone_pcie * ks_pcie)337 static void ks_dw_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
338 {
339 	u32 val;
340 
341 	val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
342 	ks_dw_app_writel(ks_pcie, CMD_STATUS, DBI_CS2_EN_VAL | val);
343 
344 	do {
345 		val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
346 	} while (!(val & DBI_CS2_EN_VAL));
347 }
348 
349 /**
350  * ks_dw_pcie_clear_dbi_mode() - Disable DBI mode
351  *
352  * Since modification of dbi_cs2 involves different clock domain, read the
353  * status back to ensure the transition is complete.
354  */
ks_dw_pcie_clear_dbi_mode(struct keystone_pcie * ks_pcie)355 static void ks_dw_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
356 {
357 	u32 val;
358 
359 	val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
360 	ks_dw_app_writel(ks_pcie, CMD_STATUS, ~DBI_CS2_EN_VAL & val);
361 
362 	do {
363 		val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
364 	} while (val & DBI_CS2_EN_VAL);
365 }
366 
ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie * ks_pcie)367 void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
368 {
369 	struct pcie_port *pp = &ks_pcie->pp;
370 	u32 start = pp->mem->start, end = pp->mem->end;
371 	int i, tr_size;
372 	u32 val;
373 
374 	/* Disable BARs for inbound access */
375 	ks_dw_pcie_set_dbi_mode(ks_pcie);
376 	dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, 0);
377 	dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_1, 0);
378 	ks_dw_pcie_clear_dbi_mode(ks_pcie);
379 
380 	/* Set outbound translation size per window division */
381 	ks_dw_app_writel(ks_pcie, OB_SIZE, CFG_PCIM_WIN_SZ_IDX & 0x7);
382 
383 	tr_size = (1 << (CFG_PCIM_WIN_SZ_IDX & 0x7)) * SZ_1M;
384 
385 	/* Using Direct 1:1 mapping of RC <-> PCI memory space */
386 	for (i = 0; (i < CFG_PCIM_WIN_CNT) && (start < end); i++) {
387 		ks_dw_app_writel(ks_pcie, OB_OFFSET_INDEX(i), start | 1);
388 		ks_dw_app_writel(ks_pcie, OB_OFFSET_HI(i), 0);
389 		start += tr_size;
390 	}
391 
392 	/* Enable OB translation */
393 	val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
394 	ks_dw_app_writel(ks_pcie, CMD_STATUS, OB_XLAT_EN_VAL | val);
395 }
396 
397 /**
398  * ks_pcie_cfg_setup() - Set up configuration space address for a device
399  *
400  * @ks_pcie: ptr to keystone_pcie structure
401  * @bus: Bus number the device is residing on
402  * @devfn: device, function number info
403  *
404  * Forms and returns the address of configuration space mapped in PCIESS
405  * address space 0.  Also configures CFG_SETUP for remote configuration space
406  * access.
407  *
408  * The address space has two regions to access configuration - local and remote.
409  * We access local region for bus 0 (as RC is attached on bus 0) and remote
410  * region for others with TYPE 1 access when bus > 1.  As for device on bus = 1,
411  * we will do TYPE 0 access as it will be on our secondary bus (logical).
412  * CFG_SETUP is needed only for remote configuration access.
413  */
ks_pcie_cfg_setup(struct keystone_pcie * ks_pcie,u8 bus,unsigned int devfn)414 static void __iomem *ks_pcie_cfg_setup(struct keystone_pcie *ks_pcie, u8 bus,
415 				       unsigned int devfn)
416 {
417 	u8 device = PCI_SLOT(devfn), function = PCI_FUNC(devfn);
418 	struct pcie_port *pp = &ks_pcie->pp;
419 	u32 regval;
420 
421 	if (bus == 0)
422 		return pp->dbi_base;
423 
424 	regval = (bus << 16) | (device << 8) | function;
425 
426 	/*
427 	 * Since Bus#1 will be a virtual bus, we need to have TYPE0
428 	 * access only.
429 	 * TYPE 1
430 	 */
431 	if (bus != 1)
432 		regval |= BIT(24);
433 
434 	ks_dw_app_writel(ks_pcie, CFG_SETUP, regval);
435 	return pp->va_cfg0_base;
436 }
437 
ks_dw_pcie_rd_other_conf(struct pcie_port * pp,struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)438 int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
439 			     unsigned int devfn, int where, int size, u32 *val)
440 {
441 	struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
442 	u8 bus_num = bus->number;
443 	void __iomem *addr;
444 
445 	addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
446 
447 	return dw_pcie_cfg_read(addr + where, size, val);
448 }
449 
ks_dw_pcie_wr_other_conf(struct pcie_port * pp,struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)450 int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
451 			     unsigned int devfn, int where, int size, u32 val)
452 {
453 	struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
454 	u8 bus_num = bus->number;
455 	void __iomem *addr;
456 
457 	addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
458 
459 	return dw_pcie_cfg_write(addr + where, size, val);
460 }
461 
462 /**
463  * ks_dw_pcie_v3_65_scan_bus() - keystone scan_bus post initialization
464  *
465  * This sets BAR0 to enable inbound access for MSI_IRQ register
466  */
ks_dw_pcie_v3_65_scan_bus(struct pcie_port * pp)467 void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp)
468 {
469 	struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
470 
471 	/* Configure and set up BAR0 */
472 	ks_dw_pcie_set_dbi_mode(ks_pcie);
473 
474 	/* Enable BAR0 */
475 	dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, 1);
476 	dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, SZ_4K - 1);
477 
478 	ks_dw_pcie_clear_dbi_mode(ks_pcie);
479 
480 	 /*
481 	  * For BAR0, just setting bus address for inbound writes (MSI) should
482 	  * be sufficient.  Use physical address to avoid any conflicts.
483 	  */
484 	dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
485 }
486 
487 /**
488  * ks_dw_pcie_link_up() - Check if link up
489  */
ks_dw_pcie_link_up(struct pcie_port * pp)490 int ks_dw_pcie_link_up(struct pcie_port *pp)
491 {
492 	u32 val;
493 
494 	val = dw_pcie_readl_rc(pp, DEBUG0);
495 	return (val & LTSSM_STATE_MASK) == LTSSM_STATE_L0;
496 }
497 
ks_dw_pcie_initiate_link_train(struct keystone_pcie * ks_pcie)498 void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie)
499 {
500 	u32 val;
501 
502 	/* Disable Link training */
503 	val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
504 	val &= ~LTSSM_EN_VAL;
505 	ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
506 
507 	/* Initiate Link Training */
508 	val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
509 	ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
510 }
511 
512 /**
513  * ks_dw_pcie_host_init() - initialize host for v3_65 dw hardware
514  *
515  * Ioremap the register resources, initialize legacy irq domain
516  * and call dw_pcie_v3_65_host_init() API to initialize the Keystone
517  * PCI host controller.
518  */
ks_dw_pcie_host_init(struct keystone_pcie * ks_pcie,struct device_node * msi_intc_np)519 int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
520 				struct device_node *msi_intc_np)
521 {
522 	struct pcie_port *pp = &ks_pcie->pp;
523 	struct device *dev = pp->dev;
524 	struct platform_device *pdev = to_platform_device(dev);
525 	struct resource *res;
526 
527 	/* Index 0 is the config reg. space address */
528 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
529 	pp->dbi_base = devm_ioremap_resource(dev, res);
530 	if (IS_ERR(pp->dbi_base))
531 		return PTR_ERR(pp->dbi_base);
532 
533 	/*
534 	 * We set these same and is used in pcie rd/wr_other_conf
535 	 * functions
536 	 */
537 	pp->va_cfg0_base = pp->dbi_base + SPACE0_REMOTE_CFG_OFFSET;
538 	pp->va_cfg1_base = pp->va_cfg0_base;
539 
540 	/* Index 1 is the application reg. space address */
541 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
542 	ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
543 	if (IS_ERR(ks_pcie->va_app_base))
544 		return PTR_ERR(ks_pcie->va_app_base);
545 
546 	ks_pcie->app = *res;
547 
548 	/* Create legacy IRQ domain */
549 	ks_pcie->legacy_irq_domain =
550 			irq_domain_add_linear(ks_pcie->legacy_intc_np,
551 					MAX_LEGACY_IRQS,
552 					&ks_dw_pcie_legacy_irq_domain_ops,
553 					NULL);
554 	if (!ks_pcie->legacy_irq_domain) {
555 		dev_err(dev, "Failed to add irq domain for legacy irqs\n");
556 		return -EINVAL;
557 	}
558 
559 	return dw_pcie_host_init(pp);
560 }
561