• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * drivers/misc/spear13xx_pcie_gadget.c
3  *
4  * Copyright (C) 2010 ST Microelectronics
5  * Pratyush Anand<pratyush.anand@st.com>
6  *
7  * This file is licensed under the terms of the GNU General Public
8  * License version 2. This program is licensed "as is" without any
9  * warranty of any kind, whether express or implied.
10  */
11 
12 #include <linux/device.h>
13 #include <linux/clk.h>
14 #include <linux/slab.h>
15 #include <linux/delay.h>
16 #include <linux/io.h>
17 #include <linux/interrupt.h>
18 #include <linux/irq.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/pci_regs.h>
23 #include <linux/configfs.h>
24 #include <mach/pcie.h>
25 #include <mach/misc_regs.h>
26 
27 #define IN0_MEM_SIZE	(200 * 1024 * 1024 - 1)
28 /* In current implementation address translation is done using IN0 only.
29  * So IN1 start address and IN0 end address has been kept same
30 */
31 #define IN1_MEM_SIZE	(0 * 1024 * 1024 - 1)
32 #define IN_IO_SIZE	(20 * 1024 * 1024 - 1)
33 #define IN_CFG0_SIZE	(12 * 1024 * 1024 - 1)
34 #define IN_CFG1_SIZE	(12 * 1024 * 1024 - 1)
35 #define IN_MSG_SIZE	(12 * 1024 * 1024 - 1)
36 /* Keep default BAR size as 4K*/
37 /* AORAM would be mapped by default*/
38 #define INBOUND_ADDR_MASK	(SPEAR13XX_SYSRAM1_SIZE - 1)
39 
40 #define INT_TYPE_NO_INT	0
41 #define INT_TYPE_INTX	1
42 #define INT_TYPE_MSI	2
43 struct spear_pcie_gadget_config {
44 	void __iomem *base;
45 	void __iomem *va_app_base;
46 	void __iomem *va_dbi_base;
47 	char int_type[10];
48 	ulong requested_msi;
49 	ulong configured_msi;
50 	ulong bar0_size;
51 	ulong bar0_rw_offset;
52 	void __iomem *va_bar0_address;
53 };
54 
55 struct pcie_gadget_target {
56 	struct configfs_subsystem subsys;
57 	struct spear_pcie_gadget_config config;
58 };
59 
60 struct pcie_gadget_target_attr {
61 	struct configfs_attribute	attr;
62 	ssize_t		(*show)(struct spear_pcie_gadget_config *config,
63 						char *buf);
64 	ssize_t		(*store)(struct spear_pcie_gadget_config *config,
65 						 const char *buf,
66 						 size_t count);
67 };
68 
enable_dbi_access(struct pcie_app_reg __iomem * app_reg)69 static void enable_dbi_access(struct pcie_app_reg __iomem *app_reg)
70 {
71 	/* Enable DBI access */
72 	writel(readl(&app_reg->slv_armisc) | (1 << AXI_OP_DBI_ACCESS_ID),
73 			&app_reg->slv_armisc);
74 	writel(readl(&app_reg->slv_awmisc) | (1 << AXI_OP_DBI_ACCESS_ID),
75 			&app_reg->slv_awmisc);
76 
77 }
78 
disable_dbi_access(struct pcie_app_reg __iomem * app_reg)79 static void disable_dbi_access(struct pcie_app_reg __iomem *app_reg)
80 {
81 	/* disable DBI access */
82 	writel(readl(&app_reg->slv_armisc) & ~(1 << AXI_OP_DBI_ACCESS_ID),
83 			&app_reg->slv_armisc);
84 	writel(readl(&app_reg->slv_awmisc) & ~(1 << AXI_OP_DBI_ACCESS_ID),
85 			&app_reg->slv_awmisc);
86 
87 }
88 
spear_dbi_read_reg(struct spear_pcie_gadget_config * config,int where,int size,u32 * val)89 static void spear_dbi_read_reg(struct spear_pcie_gadget_config *config,
90 		int where, int size, u32 *val)
91 {
92 	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
93 	ulong va_address;
94 
95 	/* Enable DBI access */
96 	enable_dbi_access(app_reg);
97 
98 	va_address = (ulong)config->va_dbi_base + (where & ~0x3);
99 
100 	*val = readl(va_address);
101 
102 	if (size == 1)
103 		*val = (*val >> (8 * (where & 3))) & 0xff;
104 	else if (size == 2)
105 		*val = (*val >> (8 * (where & 3))) & 0xffff;
106 
107 	/* Disable DBI access */
108 	disable_dbi_access(app_reg);
109 }
110 
spear_dbi_write_reg(struct spear_pcie_gadget_config * config,int where,int size,u32 val)111 static void spear_dbi_write_reg(struct spear_pcie_gadget_config *config,
112 		int where, int size, u32 val)
113 {
114 	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
115 	ulong va_address;
116 
117 	/* Enable DBI access */
118 	enable_dbi_access(app_reg);
119 
120 	va_address = (ulong)config->va_dbi_base + (where & ~0x3);
121 
122 	if (size == 4)
123 		writel(val, va_address);
124 	else if (size == 2)
125 		writew(val, va_address + (where & 2));
126 	else if (size == 1)
127 		writeb(val, va_address + (where & 3));
128 
129 	/* Disable DBI access */
130 	disable_dbi_access(app_reg);
131 }
132 
133 #define PCI_FIND_CAP_TTL	48
134 
pci_find_own_next_cap_ttl(struct spear_pcie_gadget_config * config,u32 pos,int cap,int * ttl)135 static int pci_find_own_next_cap_ttl(struct spear_pcie_gadget_config *config,
136 		u32 pos, int cap, int *ttl)
137 {
138 	u32 id;
139 
140 	while ((*ttl)--) {
141 		spear_dbi_read_reg(config, pos, 1, &pos);
142 		if (pos < 0x40)
143 			break;
144 		pos &= ~3;
145 		spear_dbi_read_reg(config, pos + PCI_CAP_LIST_ID, 1, &id);
146 		if (id == 0xff)
147 			break;
148 		if (id == cap)
149 			return pos;
150 		pos += PCI_CAP_LIST_NEXT;
151 	}
152 	return 0;
153 }
154 
pci_find_own_next_cap(struct spear_pcie_gadget_config * config,u32 pos,int cap)155 static int pci_find_own_next_cap(struct spear_pcie_gadget_config *config,
156 			u32 pos, int cap)
157 {
158 	int ttl = PCI_FIND_CAP_TTL;
159 
160 	return pci_find_own_next_cap_ttl(config, pos, cap, &ttl);
161 }
162 
pci_find_own_cap_start(struct spear_pcie_gadget_config * config,u8 hdr_type)163 static int pci_find_own_cap_start(struct spear_pcie_gadget_config *config,
164 				u8 hdr_type)
165 {
166 	u32 status;
167 
168 	spear_dbi_read_reg(config, PCI_STATUS, 2, &status);
169 	if (!(status & PCI_STATUS_CAP_LIST))
170 		return 0;
171 
172 	switch (hdr_type) {
173 	case PCI_HEADER_TYPE_NORMAL:
174 	case PCI_HEADER_TYPE_BRIDGE:
175 		return PCI_CAPABILITY_LIST;
176 	case PCI_HEADER_TYPE_CARDBUS:
177 		return PCI_CB_CAPABILITY_LIST;
178 	default:
179 		return 0;
180 	}
181 
182 	return 0;
183 }
184 
185 /*
186  * Tell if a device supports a given PCI capability.
187  * Returns the address of the requested capability structure within the
188  * device's PCI configuration space or 0 in case the device does not
189  * support it. Possible values for @cap:
190  *
191  * %PCI_CAP_ID_PM	Power Management
192  * %PCI_CAP_ID_AGP	Accelerated Graphics Port
193  * %PCI_CAP_ID_VPD	Vital Product Data
194  * %PCI_CAP_ID_SLOTID	Slot Identification
195  * %PCI_CAP_ID_MSI	Message Signalled Interrupts
196  * %PCI_CAP_ID_CHSWP	CompactPCI HotSwap
197  * %PCI_CAP_ID_PCIX	PCI-X
198  * %PCI_CAP_ID_EXP	PCI Express
199  */
pci_find_own_capability(struct spear_pcie_gadget_config * config,int cap)200 static int pci_find_own_capability(struct spear_pcie_gadget_config *config,
201 		int cap)
202 {
203 	u32 pos;
204 	u32 hdr_type;
205 
206 	spear_dbi_read_reg(config, PCI_HEADER_TYPE, 1, &hdr_type);
207 
208 	pos = pci_find_own_cap_start(config, hdr_type);
209 	if (pos)
210 		pos = pci_find_own_next_cap(config, pos, cap);
211 
212 	return pos;
213 }
214 
spear_pcie_gadget_irq(int irq,void * dev_id)215 static irqreturn_t spear_pcie_gadget_irq(int irq, void *dev_id)
216 {
217 	return 0;
218 }
219 
220 /*
221  * configfs interfaces show/store functions
222  */
pcie_gadget_show_link(struct spear_pcie_gadget_config * config,char * buf)223 static ssize_t pcie_gadget_show_link(
224 		struct spear_pcie_gadget_config *config,
225 		char *buf)
226 {
227 	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
228 
229 	if (readl(&app_reg->app_status_1) & ((u32)1 << XMLH_LINK_UP_ID))
230 		return sprintf(buf, "UP");
231 	else
232 		return sprintf(buf, "DOWN");
233 }
234 
pcie_gadget_store_link(struct spear_pcie_gadget_config * config,const char * buf,size_t count)235 static ssize_t pcie_gadget_store_link(
236 		struct spear_pcie_gadget_config *config,
237 		const char *buf, size_t count)
238 {
239 	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
240 
241 	if (sysfs_streq(buf, "UP"))
242 		writel(readl(&app_reg->app_ctrl_0) | (1 << APP_LTSSM_ENABLE_ID),
243 			&app_reg->app_ctrl_0);
244 	else if (sysfs_streq(buf, "DOWN"))
245 		writel(readl(&app_reg->app_ctrl_0)
246 				& ~(1 << APP_LTSSM_ENABLE_ID),
247 				&app_reg->app_ctrl_0);
248 	else
249 		return -EINVAL;
250 	return count;
251 }
252 
pcie_gadget_show_int_type(struct spear_pcie_gadget_config * config,char * buf)253 static ssize_t pcie_gadget_show_int_type(
254 		struct spear_pcie_gadget_config *config,
255 		char *buf)
256 {
257 	return sprintf(buf, "%s", config->int_type);
258 }
259 
pcie_gadget_store_int_type(struct spear_pcie_gadget_config * config,const char * buf,size_t count)260 static ssize_t pcie_gadget_store_int_type(
261 		struct spear_pcie_gadget_config *config,
262 		const char *buf, size_t count)
263 {
264 	u32 cap, vec, flags;
265 	ulong vector;
266 
267 	if (sysfs_streq(buf, "INTA"))
268 		spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 1);
269 
270 	else if (sysfs_streq(buf, "MSI")) {
271 		vector = config->requested_msi;
272 		vec = 0;
273 		while (vector > 1) {
274 			vector /= 2;
275 			vec++;
276 		}
277 		spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 0);
278 		cap = pci_find_own_capability(config, PCI_CAP_ID_MSI);
279 		spear_dbi_read_reg(config, cap + PCI_MSI_FLAGS, 1, &flags);
280 		flags &= ~PCI_MSI_FLAGS_QMASK;
281 		flags |= vec << 1;
282 		spear_dbi_write_reg(config, cap + PCI_MSI_FLAGS, 1, flags);
283 	} else
284 		return -EINVAL;
285 
286 	strcpy(config->int_type, buf);
287 
288 	return count;
289 }
290 
pcie_gadget_show_no_of_msi(struct spear_pcie_gadget_config * config,char * buf)291 static ssize_t pcie_gadget_show_no_of_msi(
292 		struct spear_pcie_gadget_config *config,
293 		char *buf)
294 {
295 	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
296 	u32 cap, vec, flags;
297 	ulong vector;
298 
299 	if ((readl(&app_reg->msg_status) & (1 << CFG_MSI_EN_ID))
300 			!= (1 << CFG_MSI_EN_ID))
301 		vector = 0;
302 	else {
303 		cap = pci_find_own_capability(config, PCI_CAP_ID_MSI);
304 		spear_dbi_read_reg(config, cap + PCI_MSI_FLAGS, 1, &flags);
305 		flags &= ~PCI_MSI_FLAGS_QSIZE;
306 		vec = flags >> 4;
307 		vector = 1;
308 		while (vec--)
309 			vector *= 2;
310 	}
311 	config->configured_msi = vector;
312 
313 	return sprintf(buf, "%lu", vector);
314 }
315 
pcie_gadget_store_no_of_msi(struct spear_pcie_gadget_config * config,const char * buf,size_t count)316 static ssize_t pcie_gadget_store_no_of_msi(
317 		struct spear_pcie_gadget_config *config,
318 		const char *buf, size_t count)
319 {
320 	int ret;
321 
322 	ret = kstrtoul(buf, 0, &config->requested_msi);
323 	if (ret)
324 		return ret;
325 
326 	if (config->requested_msi > 32)
327 		config->requested_msi = 32;
328 
329 	return count;
330 }
331 
pcie_gadget_store_inta(struct spear_pcie_gadget_config * config,const char * buf,size_t count)332 static ssize_t pcie_gadget_store_inta(
333 		struct spear_pcie_gadget_config *config,
334 		const char *buf, size_t count)
335 {
336 	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
337 	ulong en;
338 	int ret;
339 
340 	ret = kstrtoul(buf, 0, &en);
341 	if (ret)
342 		return ret;
343 
344 	if (en)
345 		writel(readl(&app_reg->app_ctrl_0) | (1 << SYS_INT_ID),
346 				&app_reg->app_ctrl_0);
347 	else
348 		writel(readl(&app_reg->app_ctrl_0) & ~(1 << SYS_INT_ID),
349 				&app_reg->app_ctrl_0);
350 
351 	return count;
352 }
353 
pcie_gadget_store_send_msi(struct spear_pcie_gadget_config * config,const char * buf,size_t count)354 static ssize_t pcie_gadget_store_send_msi(
355 		struct spear_pcie_gadget_config *config,
356 		const char *buf, size_t count)
357 {
358 	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
359 	ulong vector;
360 	u32 ven_msi;
361 	int ret;
362 
363 	ret = kstrtoul(buf, 0, &vector);
364 	if (ret)
365 		return ret;
366 
367 	if (!config->configured_msi)
368 		return -EINVAL;
369 
370 	if (vector >= config->configured_msi)
371 		return -EINVAL;
372 
373 	ven_msi = readl(&app_reg->ven_msi_1);
374 	ven_msi &= ~VEN_MSI_FUN_NUM_MASK;
375 	ven_msi |= 0 << VEN_MSI_FUN_NUM_ID;
376 	ven_msi &= ~VEN_MSI_TC_MASK;
377 	ven_msi |= 0 << VEN_MSI_TC_ID;
378 	ven_msi &= ~VEN_MSI_VECTOR_MASK;
379 	ven_msi |= vector << VEN_MSI_VECTOR_ID;
380 
381 	/* generating interrupt for msi vector */
382 	ven_msi |= VEN_MSI_REQ_EN;
383 	writel(ven_msi, &app_reg->ven_msi_1);
384 	udelay(1);
385 	ven_msi &= ~VEN_MSI_REQ_EN;
386 	writel(ven_msi, &app_reg->ven_msi_1);
387 
388 	return count;
389 }
390 
pcie_gadget_show_vendor_id(struct spear_pcie_gadget_config * config,char * buf)391 static ssize_t pcie_gadget_show_vendor_id(
392 		struct spear_pcie_gadget_config *config,
393 		char *buf)
394 {
395 	u32 id;
396 
397 	spear_dbi_read_reg(config, PCI_VENDOR_ID, 2, &id);
398 
399 	return sprintf(buf, "%x", id);
400 }
401 
pcie_gadget_store_vendor_id(struct spear_pcie_gadget_config * config,const char * buf,size_t count)402 static ssize_t pcie_gadget_store_vendor_id(
403 		struct spear_pcie_gadget_config *config,
404 		const char *buf, size_t count)
405 {
406 	ulong id;
407 	int ret;
408 
409 	ret = kstrtoul(buf, 0, &id);
410 	if (ret)
411 		return ret;
412 
413 	spear_dbi_write_reg(config, PCI_VENDOR_ID, 2, id);
414 
415 	return count;
416 }
417 
pcie_gadget_show_device_id(struct spear_pcie_gadget_config * config,char * buf)418 static ssize_t pcie_gadget_show_device_id(
419 		struct spear_pcie_gadget_config *config,
420 		char *buf)
421 {
422 	u32 id;
423 
424 	spear_dbi_read_reg(config, PCI_DEVICE_ID, 2, &id);
425 
426 	return sprintf(buf, "%x", id);
427 }
428 
pcie_gadget_store_device_id(struct spear_pcie_gadget_config * config,const char * buf,size_t count)429 static ssize_t pcie_gadget_store_device_id(
430 		struct spear_pcie_gadget_config *config,
431 		const char *buf, size_t count)
432 {
433 	ulong id;
434 	int ret;
435 
436 	ret = kstrtoul(buf, 0, &id);
437 	if (ret)
438 		return ret;
439 
440 	spear_dbi_write_reg(config, PCI_DEVICE_ID, 2, id);
441 
442 	return count;
443 }
444 
pcie_gadget_show_bar0_size(struct spear_pcie_gadget_config * config,char * buf)445 static ssize_t pcie_gadget_show_bar0_size(
446 		struct spear_pcie_gadget_config *config,
447 		char *buf)
448 {
449 	return sprintf(buf, "%lx", config->bar0_size);
450 }
451 
pcie_gadget_store_bar0_size(struct spear_pcie_gadget_config * config,const char * buf,size_t count)452 static ssize_t pcie_gadget_store_bar0_size(
453 		struct spear_pcie_gadget_config *config,
454 		const char *buf, size_t count)
455 {
456 	ulong size;
457 	u32 pos, pos1;
458 	u32 no_of_bit = 0;
459 	int ret;
460 
461 	ret = kstrtoul(buf, 0, &size);
462 	if (ret)
463 		return ret;
464 
465 	/* min bar size is 256 */
466 	if (size <= 0x100)
467 		size = 0x100;
468 	/* max bar size is 1MB*/
469 	else if (size >= 0x100000)
470 		size = 0x100000;
471 	else {
472 		pos = 0;
473 		pos1 = 0;
474 		while (pos < 21) {
475 			pos = find_next_bit((ulong *)&size, 21, pos);
476 			if (pos != 21)
477 				pos1 = pos + 1;
478 			pos++;
479 			no_of_bit++;
480 		}
481 		if (no_of_bit == 2)
482 			pos1--;
483 
484 		size = 1 << pos1;
485 	}
486 	config->bar0_size = size;
487 	spear_dbi_write_reg(config, PCIE_BAR0_MASK_REG, 4, size - 1);
488 
489 	return count;
490 }
491 
pcie_gadget_show_bar0_address(struct spear_pcie_gadget_config * config,char * buf)492 static ssize_t pcie_gadget_show_bar0_address(
493 		struct spear_pcie_gadget_config *config,
494 		char *buf)
495 {
496 	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
497 
498 	u32 address = readl(&app_reg->pim0_mem_addr_start);
499 
500 	return sprintf(buf, "%x", address);
501 }
502 
pcie_gadget_store_bar0_address(struct spear_pcie_gadget_config * config,const char * buf,size_t count)503 static ssize_t pcie_gadget_store_bar0_address(
504 		struct spear_pcie_gadget_config *config,
505 		const char *buf, size_t count)
506 {
507 	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
508 	ulong address;
509 	int ret;
510 
511 	ret = kstrtoul(buf, 0, &address);
512 	if (ret)
513 		return ret;
514 
515 	address &= ~(config->bar0_size - 1);
516 	if (config->va_bar0_address)
517 		iounmap(config->va_bar0_address);
518 	config->va_bar0_address = ioremap(address, config->bar0_size);
519 	if (!config->va_bar0_address)
520 		return -ENOMEM;
521 
522 	writel(address, &app_reg->pim0_mem_addr_start);
523 
524 	return count;
525 }
526 
pcie_gadget_show_bar0_rw_offset(struct spear_pcie_gadget_config * config,char * buf)527 static ssize_t pcie_gadget_show_bar0_rw_offset(
528 		struct spear_pcie_gadget_config *config,
529 		char *buf)
530 {
531 	return sprintf(buf, "%lx", config->bar0_rw_offset);
532 }
533 
pcie_gadget_store_bar0_rw_offset(struct spear_pcie_gadget_config * config,const char * buf,size_t count)534 static ssize_t pcie_gadget_store_bar0_rw_offset(
535 		struct spear_pcie_gadget_config *config,
536 		const char *buf, size_t count)
537 {
538 	ulong offset;
539 	int ret;
540 
541 	ret = kstrtoul(buf, 0, &offset);
542 	if (ret)
543 		return ret;
544 
545 	if (offset % 4)
546 		return -EINVAL;
547 
548 	config->bar0_rw_offset = offset;
549 
550 	return count;
551 }
552 
pcie_gadget_show_bar0_data(struct spear_pcie_gadget_config * config,char * buf)553 static ssize_t pcie_gadget_show_bar0_data(
554 		struct spear_pcie_gadget_config *config,
555 		char *buf)
556 {
557 	ulong data;
558 
559 	if (!config->va_bar0_address)
560 		return -ENOMEM;
561 
562 	data = readl((ulong)config->va_bar0_address + config->bar0_rw_offset);
563 
564 	return sprintf(buf, "%lx", data);
565 }
566 
pcie_gadget_store_bar0_data(struct spear_pcie_gadget_config * config,const char * buf,size_t count)567 static ssize_t pcie_gadget_store_bar0_data(
568 		struct spear_pcie_gadget_config *config,
569 		const char *buf, size_t count)
570 {
571 	ulong data;
572 	int ret;
573 
574 	ret = kstrtoul(buf, 0, &data);
575 	if (ret)
576 		return ret;
577 
578 	if (!config->va_bar0_address)
579 		return -ENOMEM;
580 
581 	writel(data, (ulong)config->va_bar0_address + config->bar0_rw_offset);
582 
583 	return count;
584 }
585 
586 /*
587  * Attribute definitions.
588  */
589 
590 #define PCIE_GADGET_TARGET_ATTR_RO(_name)				\
591 static struct pcie_gadget_target_attr pcie_gadget_target_##_name =	\
592 	__CONFIGFS_ATTR(_name, S_IRUGO, pcie_gadget_show_##_name, NULL)
593 
594 #define PCIE_GADGET_TARGET_ATTR_WO(_name)				\
595 static struct pcie_gadget_target_attr pcie_gadget_target_##_name =	\
596 	__CONFIGFS_ATTR(_name, S_IWUSR, NULL, pcie_gadget_store_##_name)
597 
598 #define PCIE_GADGET_TARGET_ATTR_RW(_name)				\
599 static struct pcie_gadget_target_attr pcie_gadget_target_##_name =	\
600 	__CONFIGFS_ATTR(_name, S_IRUGO | S_IWUSR, pcie_gadget_show_##_name, \
601 			pcie_gadget_store_##_name)
602 PCIE_GADGET_TARGET_ATTR_RW(link);
603 PCIE_GADGET_TARGET_ATTR_RW(int_type);
604 PCIE_GADGET_TARGET_ATTR_RW(no_of_msi);
605 PCIE_GADGET_TARGET_ATTR_WO(inta);
606 PCIE_GADGET_TARGET_ATTR_WO(send_msi);
607 PCIE_GADGET_TARGET_ATTR_RW(vendor_id);
608 PCIE_GADGET_TARGET_ATTR_RW(device_id);
609 PCIE_GADGET_TARGET_ATTR_RW(bar0_size);
610 PCIE_GADGET_TARGET_ATTR_RW(bar0_address);
611 PCIE_GADGET_TARGET_ATTR_RW(bar0_rw_offset);
612 PCIE_GADGET_TARGET_ATTR_RW(bar0_data);
613 
614 static struct configfs_attribute *pcie_gadget_target_attrs[] = {
615 	&pcie_gadget_target_link.attr,
616 	&pcie_gadget_target_int_type.attr,
617 	&pcie_gadget_target_no_of_msi.attr,
618 	&pcie_gadget_target_inta.attr,
619 	&pcie_gadget_target_send_msi.attr,
620 	&pcie_gadget_target_vendor_id.attr,
621 	&pcie_gadget_target_device_id.attr,
622 	&pcie_gadget_target_bar0_size.attr,
623 	&pcie_gadget_target_bar0_address.attr,
624 	&pcie_gadget_target_bar0_rw_offset.attr,
625 	&pcie_gadget_target_bar0_data.attr,
626 	NULL,
627 };
628 
to_target(struct config_item * item)629 static struct pcie_gadget_target *to_target(struct config_item *item)
630 {
631 	return item ?
632 		container_of(to_configfs_subsystem(to_config_group(item)),
633 				struct pcie_gadget_target, subsys) : NULL;
634 }
635 
636 /*
637  * Item operations and type for pcie_gadget_target.
638  */
639 
pcie_gadget_target_attr_show(struct config_item * item,struct configfs_attribute * attr,char * buf)640 static ssize_t pcie_gadget_target_attr_show(struct config_item *item,
641 					   struct configfs_attribute *attr,
642 					   char *buf)
643 {
644 	ssize_t ret = -EINVAL;
645 	struct pcie_gadget_target *target = to_target(item);
646 	struct pcie_gadget_target_attr *t_attr =
647 		container_of(attr, struct pcie_gadget_target_attr, attr);
648 
649 	if (t_attr->show)
650 		ret = t_attr->show(&target->config, buf);
651 	return ret;
652 }
653 
pcie_gadget_target_attr_store(struct config_item * item,struct configfs_attribute * attr,const char * buf,size_t count)654 static ssize_t pcie_gadget_target_attr_store(struct config_item *item,
655 					struct configfs_attribute *attr,
656 					const char *buf,
657 					size_t count)
658 {
659 	ssize_t ret = -EINVAL;
660 	struct pcie_gadget_target *target = to_target(item);
661 	struct pcie_gadget_target_attr *t_attr =
662 		container_of(attr, struct pcie_gadget_target_attr, attr);
663 
664 	if (t_attr->store)
665 		ret = t_attr->store(&target->config, buf, count);
666 	return ret;
667 }
668 
669 static struct configfs_item_operations pcie_gadget_target_item_ops = {
670 	.show_attribute		= pcie_gadget_target_attr_show,
671 	.store_attribute	= pcie_gadget_target_attr_store,
672 };
673 
674 static struct config_item_type pcie_gadget_target_type = {
675 	.ct_attrs		= pcie_gadget_target_attrs,
676 	.ct_item_ops		= &pcie_gadget_target_item_ops,
677 	.ct_owner		= THIS_MODULE,
678 };
679 
spear13xx_pcie_device_init(struct spear_pcie_gadget_config * config)680 static void spear13xx_pcie_device_init(struct spear_pcie_gadget_config *config)
681 {
682 	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
683 
684 	/*setup registers for outbound translation */
685 
686 	writel(config->base, &app_reg->in0_mem_addr_start);
687 	writel(app_reg->in0_mem_addr_start + IN0_MEM_SIZE,
688 			&app_reg->in0_mem_addr_limit);
689 	writel(app_reg->in0_mem_addr_limit + 1, &app_reg->in1_mem_addr_start);
690 	writel(app_reg->in1_mem_addr_start + IN1_MEM_SIZE,
691 			&app_reg->in1_mem_addr_limit);
692 	writel(app_reg->in1_mem_addr_limit + 1, &app_reg->in_io_addr_start);
693 	writel(app_reg->in_io_addr_start + IN_IO_SIZE,
694 			&app_reg->in_io_addr_limit);
695 	writel(app_reg->in_io_addr_limit + 1, &app_reg->in_cfg0_addr_start);
696 	writel(app_reg->in_cfg0_addr_start + IN_CFG0_SIZE,
697 			&app_reg->in_cfg0_addr_limit);
698 	writel(app_reg->in_cfg0_addr_limit + 1, &app_reg->in_cfg1_addr_start);
699 	writel(app_reg->in_cfg1_addr_start + IN_CFG1_SIZE,
700 			&app_reg->in_cfg1_addr_limit);
701 	writel(app_reg->in_cfg1_addr_limit + 1, &app_reg->in_msg_addr_start);
702 	writel(app_reg->in_msg_addr_start + IN_MSG_SIZE,
703 			&app_reg->in_msg_addr_limit);
704 
705 	writel(app_reg->in0_mem_addr_start, &app_reg->pom0_mem_addr_start);
706 	writel(app_reg->in1_mem_addr_start, &app_reg->pom1_mem_addr_start);
707 	writel(app_reg->in_io_addr_start, &app_reg->pom_io_addr_start);
708 
709 	/*setup registers for inbound translation */
710 
711 	/* Keep AORAM mapped at BAR0 as default */
712 	config->bar0_size = INBOUND_ADDR_MASK + 1;
713 	spear_dbi_write_reg(config, PCIE_BAR0_MASK_REG, 4, INBOUND_ADDR_MASK);
714 	spear_dbi_write_reg(config, PCI_BASE_ADDRESS_0, 4, 0xC);
715 	config->va_bar0_address = ioremap(SPEAR13XX_SYSRAM1_BASE,
716 			config->bar0_size);
717 
718 	writel(SPEAR13XX_SYSRAM1_BASE, &app_reg->pim0_mem_addr_start);
719 	writel(0, &app_reg->pim1_mem_addr_start);
720 	writel(INBOUND_ADDR_MASK + 1, &app_reg->mem0_addr_offset_limit);
721 
722 	writel(0x0, &app_reg->pim_io_addr_start);
723 	writel(0x0, &app_reg->pim_io_addr_start);
724 	writel(0x0, &app_reg->pim_rom_addr_start);
725 
726 	writel(DEVICE_TYPE_EP | (1 << MISCTRL_EN_ID)
727 			| ((u32)1 << REG_TRANSLATION_ENABLE),
728 			&app_reg->app_ctrl_0);
729 	/* disable all rx interrupts */
730 	writel(0, &app_reg->int_mask);
731 
732 	/* Select INTA as default*/
733 	spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 1);
734 }
735 
spear_pcie_gadget_probe(struct platform_device * pdev)736 static int spear_pcie_gadget_probe(struct platform_device *pdev)
737 {
738 	struct resource *res0, *res1;
739 	unsigned int status = 0;
740 	int irq;
741 	struct clk *clk;
742 	static struct pcie_gadget_target *target;
743 	struct spear_pcie_gadget_config *config;
744 	struct config_item		*cg_item;
745 	struct configfs_subsystem *subsys;
746 
747 	target = devm_kzalloc(&pdev->dev, sizeof(*target), GFP_KERNEL);
748 	if (!target) {
749 		dev_err(&pdev->dev, "out of memory\n");
750 		return -ENOMEM;
751 	}
752 
753 	cg_item = &target->subsys.su_group.cg_item;
754 	sprintf(cg_item->ci_namebuf, "pcie_gadget.%d", pdev->id);
755 	cg_item->ci_type	= &pcie_gadget_target_type;
756 	config = &target->config;
757 
758 	/* get resource for application registers*/
759 	res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
760 	config->va_app_base = devm_ioremap_resource(&pdev->dev, res0);
761 	if (IS_ERR(config->va_app_base)) {
762 		dev_err(&pdev->dev, "ioremap fail\n");
763 		return PTR_ERR(config->va_app_base);
764 	}
765 
766 	/* get resource for dbi registers*/
767 	res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
768 	config->base = (void __iomem *)res1->start;
769 
770 	config->va_dbi_base = devm_ioremap_resource(&pdev->dev, res1);
771 	if (IS_ERR(config->va_dbi_base)) {
772 		dev_err(&pdev->dev, "ioremap fail\n");
773 		return PTR_ERR(config->va_dbi_base);
774 	}
775 
776 	platform_set_drvdata(pdev, target);
777 
778 	irq = platform_get_irq(pdev, 0);
779 	if (irq < 0) {
780 		dev_err(&pdev->dev, "no update irq?\n");
781 		return irq;
782 	}
783 
784 	status = devm_request_irq(&pdev->dev, irq, spear_pcie_gadget_irq,
785 				  0, pdev->name, NULL);
786 	if (status) {
787 		dev_err(&pdev->dev,
788 			"pcie gadget interrupt IRQ%d already claimed\n", irq);
789 		return status;
790 	}
791 
792 	/* Register configfs hooks */
793 	subsys = &target->subsys;
794 	config_group_init(&subsys->su_group);
795 	mutex_init(&subsys->su_mutex);
796 	status = configfs_register_subsystem(subsys);
797 	if (status)
798 		return status;
799 
800 	/*
801 	 * init basic pcie application registers
802 	 * do not enable clock if it is PCIE0.Ideally , all controller should
803 	 * have been independent from others with respect to clock. But PCIE1
804 	 * and 2 depends on PCIE0.So PCIE0 clk is provided during board init.
805 	 */
806 	if (pdev->id == 1) {
807 		/*
808 		 * Ideally CFG Clock should have been also enabled here. But
809 		 * it is done currently during board init routne
810 		 */
811 		clk = clk_get_sys("pcie1", NULL);
812 		if (IS_ERR(clk)) {
813 			pr_err("%s:couldn't get clk for pcie1\n", __func__);
814 			return PTR_ERR(clk);
815 		}
816 		status = clk_enable(clk);
817 		if (status) {
818 			pr_err("%s:couldn't enable clk for pcie1\n", __func__);
819 			return status;
820 		}
821 	} else if (pdev->id == 2) {
822 		/*
823 		 * Ideally CFG Clock should have been also enabled here. But
824 		 * it is done currently during board init routne
825 		 */
826 		clk = clk_get_sys("pcie2", NULL);
827 		if (IS_ERR(clk)) {
828 			pr_err("%s:couldn't get clk for pcie2\n", __func__);
829 			return PTR_ERR(clk);
830 		}
831 		status = clk_enable(clk);
832 		if (status) {
833 			pr_err("%s:couldn't enable clk for pcie2\n", __func__);
834 			return status;
835 		}
836 	}
837 	spear13xx_pcie_device_init(config);
838 
839 	return 0;
840 }
841 
spear_pcie_gadget_remove(struct platform_device * pdev)842 static int spear_pcie_gadget_remove(struct platform_device *pdev)
843 {
844 	static struct pcie_gadget_target *target;
845 
846 	target = platform_get_drvdata(pdev);
847 
848 	configfs_unregister_subsystem(&target->subsys);
849 
850 	return 0;
851 }
852 
spear_pcie_gadget_shutdown(struct platform_device * pdev)853 static void spear_pcie_gadget_shutdown(struct platform_device *pdev)
854 {
855 }
856 
857 static struct platform_driver spear_pcie_gadget_driver = {
858 	.probe = spear_pcie_gadget_probe,
859 	.remove = spear_pcie_gadget_remove,
860 	.shutdown = spear_pcie_gadget_shutdown,
861 	.driver = {
862 		.name = "pcie-gadget-spear",
863 		.bus = &platform_bus_type
864 	},
865 };
866 
867 module_platform_driver(spear_pcie_gadget_driver);
868 
869 MODULE_ALIAS("platform:pcie-gadget-spear");
870 MODULE_AUTHOR("Pratyush Anand");
871 MODULE_LICENSE("GPL");
872