• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Broadcom specific AMBA
3  * Bus subsystem
4  *
5  * Licensed under the GNU/GPL. See COPYING for details.
6  */
7 
8 #include "bcma_private.h"
9 #include <linux/module.h>
10 #include <linux/platform_device.h>
11 #include <linux/bcma/bcma.h>
12 #include <linux/slab.h>
13 #include <linux/of_address.h>
14 
15 MODULE_DESCRIPTION("Broadcom's specific AMBA driver");
16 MODULE_LICENSE("GPL");
17 
18 /* contains the number the next bus should get. */
19 static unsigned int bcma_bus_next_num = 0;
20 
21 /* bcma_buses_mutex locks the bcma_bus_next_num */
22 static DEFINE_MUTEX(bcma_buses_mutex);
23 
24 static int bcma_bus_match(struct device *dev, struct device_driver *drv);
25 static int bcma_device_probe(struct device *dev);
26 static int bcma_device_remove(struct device *dev);
27 static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env);
28 
manuf_show(struct device * dev,struct device_attribute * attr,char * buf)29 static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf)
30 {
31 	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
32 	return sprintf(buf, "0x%03X\n", core->id.manuf);
33 }
34 static DEVICE_ATTR_RO(manuf);
35 
id_show(struct device * dev,struct device_attribute * attr,char * buf)36 static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf)
37 {
38 	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
39 	return sprintf(buf, "0x%03X\n", core->id.id);
40 }
41 static DEVICE_ATTR_RO(id);
42 
rev_show(struct device * dev,struct device_attribute * attr,char * buf)43 static ssize_t rev_show(struct device *dev, struct device_attribute *attr, char *buf)
44 {
45 	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
46 	return sprintf(buf, "0x%02X\n", core->id.rev);
47 }
48 static DEVICE_ATTR_RO(rev);
49 
class_show(struct device * dev,struct device_attribute * attr,char * buf)50 static ssize_t class_show(struct device *dev, struct device_attribute *attr, char *buf)
51 {
52 	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
53 	return sprintf(buf, "0x%X\n", core->id.class);
54 }
55 static DEVICE_ATTR_RO(class);
56 
57 static struct attribute *bcma_device_attrs[] = {
58 	&dev_attr_manuf.attr,
59 	&dev_attr_id.attr,
60 	&dev_attr_rev.attr,
61 	&dev_attr_class.attr,
62 	NULL,
63 };
64 ATTRIBUTE_GROUPS(bcma_device);
65 
66 static struct bus_type bcma_bus_type = {
67 	.name		= "bcma",
68 	.match		= bcma_bus_match,
69 	.probe		= bcma_device_probe,
70 	.remove		= bcma_device_remove,
71 	.uevent		= bcma_device_uevent,
72 	.dev_groups	= bcma_device_groups,
73 };
74 
bcma_cc_core_id(struct bcma_bus * bus)75 static u16 bcma_cc_core_id(struct bcma_bus *bus)
76 {
77 	if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706)
78 		return BCMA_CORE_4706_CHIPCOMMON;
79 	return BCMA_CORE_CHIPCOMMON;
80 }
81 
bcma_find_core_unit(struct bcma_bus * bus,u16 coreid,u8 unit)82 struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
83 					u8 unit)
84 {
85 	struct bcma_device *core;
86 
87 	list_for_each_entry(core, &bus->cores, list) {
88 		if (core->id.id == coreid && core->core_unit == unit)
89 			return core;
90 	}
91 	return NULL;
92 }
93 EXPORT_SYMBOL_GPL(bcma_find_core_unit);
94 
bcma_wait_value(struct bcma_device * core,u16 reg,u32 mask,u32 value,int timeout)95 bool bcma_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value,
96 		     int timeout)
97 {
98 	unsigned long deadline = jiffies + timeout;
99 	u32 val;
100 
101 	do {
102 		val = bcma_read32(core, reg);
103 		if ((val & mask) == value)
104 			return true;
105 		cpu_relax();
106 		udelay(10);
107 	} while (!time_after_eq(jiffies, deadline));
108 
109 	bcma_warn(core->bus, "Timeout waiting for register 0x%04X!\n", reg);
110 
111 	return false;
112 }
113 
bcma_release_core_dev(struct device * dev)114 static void bcma_release_core_dev(struct device *dev)
115 {
116 	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
117 	if (core->io_addr)
118 		iounmap(core->io_addr);
119 	if (core->io_wrap)
120 		iounmap(core->io_wrap);
121 	kfree(core);
122 }
123 
bcma_is_core_needed_early(u16 core_id)124 static bool bcma_is_core_needed_early(u16 core_id)
125 {
126 	switch (core_id) {
127 	case BCMA_CORE_NS_NAND:
128 	case BCMA_CORE_NS_QSPI:
129 		return true;
130 	}
131 
132 	return false;
133 }
134 
135 #if defined(CONFIG_OF) && defined(CONFIG_OF_ADDRESS)
bcma_of_find_child_device(struct platform_device * parent,struct bcma_device * core)136 static struct device_node *bcma_of_find_child_device(struct platform_device *parent,
137 						     struct bcma_device *core)
138 {
139 	struct device_node *node;
140 	u64 size;
141 	const __be32 *reg;
142 
143 	if (!parent || !parent->dev.of_node)
144 		return NULL;
145 
146 	for_each_child_of_node(parent->dev.of_node, node) {
147 		reg = of_get_address(node, 0, &size, NULL);
148 		if (!reg)
149 			continue;
150 		if (of_translate_address(node, reg) == core->addr)
151 			return node;
152 	}
153 	return NULL;
154 }
155 
bcma_of_fill_device(struct platform_device * parent,struct bcma_device * core)156 static void bcma_of_fill_device(struct platform_device *parent,
157 				struct bcma_device *core)
158 {
159 	struct device_node *node;
160 
161 	node = bcma_of_find_child_device(parent, core);
162 	if (node)
163 		core->dev.of_node = node;
164 }
165 #else
bcma_of_fill_device(struct platform_device * parent,struct bcma_device * core)166 static void bcma_of_fill_device(struct platform_device *parent,
167 				struct bcma_device *core)
168 {
169 }
170 #endif /* CONFIG_OF */
171 
bcma_register_core(struct bcma_bus * bus,struct bcma_device * core)172 static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core)
173 {
174 	int err;
175 
176 	core->dev.release = bcma_release_core_dev;
177 	core->dev.bus = &bcma_bus_type;
178 	dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index);
179 
180 	switch (bus->hosttype) {
181 	case BCMA_HOSTTYPE_PCI:
182 		core->dev.parent = &bus->host_pci->dev;
183 		core->dma_dev = &bus->host_pci->dev;
184 		core->irq = bus->host_pci->irq;
185 		break;
186 	case BCMA_HOSTTYPE_SOC:
187 		core->dev.dma_mask = &core->dev.coherent_dma_mask;
188 		if (bus->host_pdev) {
189 			core->dma_dev = &bus->host_pdev->dev;
190 			core->dev.parent = &bus->host_pdev->dev;
191 			bcma_of_fill_device(bus->host_pdev, core);
192 		} else {
193 			core->dma_dev = &core->dev;
194 		}
195 		break;
196 	case BCMA_HOSTTYPE_SDIO:
197 		break;
198 	}
199 
200 	err = device_register(&core->dev);
201 	if (err) {
202 		bcma_err(bus, "Could not register dev for core 0x%03X\n",
203 			 core->id.id);
204 		put_device(&core->dev);
205 		return;
206 	}
207 	core->dev_registered = true;
208 }
209 
bcma_register_devices(struct bcma_bus * bus)210 static int bcma_register_devices(struct bcma_bus *bus)
211 {
212 	struct bcma_device *core;
213 	int err;
214 
215 	list_for_each_entry(core, &bus->cores, list) {
216 		/* We support that cores ourself */
217 		switch (core->id.id) {
218 		case BCMA_CORE_4706_CHIPCOMMON:
219 		case BCMA_CORE_CHIPCOMMON:
220 		case BCMA_CORE_NS_CHIPCOMMON_B:
221 		case BCMA_CORE_PCI:
222 		case BCMA_CORE_PCIE:
223 		case BCMA_CORE_PCIE2:
224 		case BCMA_CORE_MIPS_74K:
225 		case BCMA_CORE_4706_MAC_GBIT_COMMON:
226 			continue;
227 		}
228 
229 		/* Early cores were already registered */
230 		if (bcma_is_core_needed_early(core->id.id))
231 			continue;
232 
233 		/* Only first GMAC core on BCM4706 is connected and working */
234 		if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
235 		    core->core_unit > 0)
236 			continue;
237 
238 		bcma_register_core(bus, core);
239 	}
240 
241 #ifdef CONFIG_BCMA_DRIVER_MIPS
242 	if (bus->drv_cc.pflash.present) {
243 		err = platform_device_register(&bcma_pflash_dev);
244 		if (err)
245 			bcma_err(bus, "Error registering parallel flash\n");
246 	}
247 #endif
248 
249 #ifdef CONFIG_BCMA_SFLASH
250 	if (bus->drv_cc.sflash.present) {
251 		err = platform_device_register(&bcma_sflash_dev);
252 		if (err)
253 			bcma_err(bus, "Error registering serial flash\n");
254 	}
255 #endif
256 
257 #ifdef CONFIG_BCMA_NFLASH
258 	if (bus->drv_cc.nflash.present) {
259 		err = platform_device_register(&bcma_nflash_dev);
260 		if (err)
261 			bcma_err(bus, "Error registering NAND flash\n");
262 	}
263 #endif
264 	err = bcma_gpio_init(&bus->drv_cc);
265 	if (err == -ENOTSUPP)
266 		bcma_debug(bus, "GPIO driver not activated\n");
267 	else if (err)
268 		bcma_err(bus, "Error registering GPIO driver: %i\n", err);
269 
270 	if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
271 		err = bcma_chipco_watchdog_register(&bus->drv_cc);
272 		if (err)
273 			bcma_err(bus, "Error registering watchdog driver\n");
274 	}
275 
276 	return 0;
277 }
278 
bcma_unregister_cores(struct bcma_bus * bus)279 static void bcma_unregister_cores(struct bcma_bus *bus)
280 {
281 	struct bcma_device *core, *tmp;
282 
283 	list_for_each_entry_safe(core, tmp, &bus->cores, list) {
284 		list_del(&core->list);
285 		if (core->dev_registered)
286 			device_unregister(&core->dev);
287 	}
288 	if (bus->hosttype == BCMA_HOSTTYPE_SOC)
289 		platform_device_unregister(bus->drv_cc.watchdog);
290 }
291 
bcma_bus_register(struct bcma_bus * bus)292 int bcma_bus_register(struct bcma_bus *bus)
293 {
294 	int err;
295 	struct bcma_device *core;
296 
297 	mutex_lock(&bcma_buses_mutex);
298 	bus->num = bcma_bus_next_num++;
299 	mutex_unlock(&bcma_buses_mutex);
300 
301 	/* Scan for devices (cores) */
302 	err = bcma_bus_scan(bus);
303 	if (err) {
304 		bcma_err(bus, "Failed to scan: %d\n", err);
305 		return err;
306 	}
307 
308 	/* Early init CC core */
309 	core = bcma_find_core(bus, bcma_cc_core_id(bus));
310 	if (core) {
311 		bus->drv_cc.core = core;
312 		bcma_core_chipcommon_early_init(&bus->drv_cc);
313 	}
314 
315 	/* Cores providing flash access go before SPROM init */
316 	list_for_each_entry(core, &bus->cores, list) {
317 		if (bcma_is_core_needed_early(core->id.id))
318 			bcma_register_core(bus, core);
319 	}
320 
321 	/* Try to get SPROM */
322 	err = bcma_sprom_get(bus);
323 	if (err == -ENOENT) {
324 		bcma_err(bus, "No SPROM available\n");
325 	} else if (err)
326 		bcma_err(bus, "Failed to get SPROM: %d\n", err);
327 
328 	/* Init CC core */
329 	core = bcma_find_core(bus, bcma_cc_core_id(bus));
330 	if (core) {
331 		bus->drv_cc.core = core;
332 		bcma_core_chipcommon_init(&bus->drv_cc);
333 	}
334 
335 	/* Init CC core */
336 	core = bcma_find_core(bus, BCMA_CORE_NS_CHIPCOMMON_B);
337 	if (core) {
338 		bus->drv_cc_b.core = core;
339 		bcma_core_chipcommon_b_init(&bus->drv_cc_b);
340 	}
341 
342 	/* Init MIPS core */
343 	core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
344 	if (core) {
345 		bus->drv_mips.core = core;
346 		bcma_core_mips_init(&bus->drv_mips);
347 	}
348 
349 	/* Init PCIE core */
350 	core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 0);
351 	if (core) {
352 		bus->drv_pci[0].core = core;
353 		bcma_core_pci_init(&bus->drv_pci[0]);
354 	}
355 
356 	/* Init PCIE core */
357 	core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 1);
358 	if (core) {
359 		bus->drv_pci[1].core = core;
360 		bcma_core_pci_init(&bus->drv_pci[1]);
361 	}
362 
363 	/* Init PCIe Gen 2 core */
364 	core = bcma_find_core_unit(bus, BCMA_CORE_PCIE2, 0);
365 	if (core) {
366 		bus->drv_pcie2.core = core;
367 		bcma_core_pcie2_init(&bus->drv_pcie2);
368 	}
369 
370 	/* Init GBIT MAC COMMON core */
371 	core = bcma_find_core(bus, BCMA_CORE_4706_MAC_GBIT_COMMON);
372 	if (core) {
373 		bus->drv_gmac_cmn.core = core;
374 		bcma_core_gmac_cmn_init(&bus->drv_gmac_cmn);
375 	}
376 
377 	/* Register found cores */
378 	bcma_register_devices(bus);
379 
380 	bcma_info(bus, "Bus registered\n");
381 
382 	return 0;
383 }
384 
bcma_bus_unregister(struct bcma_bus * bus)385 void bcma_bus_unregister(struct bcma_bus *bus)
386 {
387 	struct bcma_device *cores[3];
388 	int err;
389 
390 	err = bcma_gpio_unregister(&bus->drv_cc);
391 	if (err == -EBUSY)
392 		bcma_err(bus, "Some GPIOs are still in use.\n");
393 	else if (err)
394 		bcma_err(bus, "Can not unregister GPIO driver: %i\n", err);
395 
396 	bcma_core_chipcommon_b_free(&bus->drv_cc_b);
397 
398 	cores[0] = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
399 	cores[1] = bcma_find_core(bus, BCMA_CORE_PCIE);
400 	cores[2] = bcma_find_core(bus, BCMA_CORE_4706_MAC_GBIT_COMMON);
401 
402 	bcma_unregister_cores(bus);
403 
404 	kfree(cores[2]);
405 	kfree(cores[1]);
406 	kfree(cores[0]);
407 }
408 
bcma_bus_early_register(struct bcma_bus * bus,struct bcma_device * core_cc,struct bcma_device * core_mips)409 int __init bcma_bus_early_register(struct bcma_bus *bus,
410 				   struct bcma_device *core_cc,
411 				   struct bcma_device *core_mips)
412 {
413 	int err;
414 	struct bcma_device *core;
415 	struct bcma_device_id match;
416 
417 	match.manuf = BCMA_MANUF_BCM;
418 	match.id = bcma_cc_core_id(bus);
419 	match.class = BCMA_CL_SIM;
420 	match.rev = BCMA_ANY_REV;
421 
422 	/* Scan for chip common core */
423 	err = bcma_bus_scan_early(bus, &match, core_cc);
424 	if (err) {
425 		bcma_err(bus, "Failed to scan for common core: %d\n", err);
426 		return -1;
427 	}
428 
429 	match.manuf = BCMA_MANUF_MIPS;
430 	match.id = BCMA_CORE_MIPS_74K;
431 	match.class = BCMA_CL_SIM;
432 	match.rev = BCMA_ANY_REV;
433 
434 	/* Scan for mips core */
435 	err = bcma_bus_scan_early(bus, &match, core_mips);
436 	if (err) {
437 		bcma_err(bus, "Failed to scan for mips core: %d\n", err);
438 		return -1;
439 	}
440 
441 	/* Early init CC core */
442 	core = bcma_find_core(bus, bcma_cc_core_id(bus));
443 	if (core) {
444 		bus->drv_cc.core = core;
445 		bcma_core_chipcommon_early_init(&bus->drv_cc);
446 	}
447 
448 	/* Early init MIPS core */
449 	core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
450 	if (core) {
451 		bus->drv_mips.core = core;
452 		bcma_core_mips_early_init(&bus->drv_mips);
453 	}
454 
455 	bcma_info(bus, "Early bus registered\n");
456 
457 	return 0;
458 }
459 
460 #ifdef CONFIG_PM
bcma_bus_suspend(struct bcma_bus * bus)461 int bcma_bus_suspend(struct bcma_bus *bus)
462 {
463 	struct bcma_device *core;
464 
465 	list_for_each_entry(core, &bus->cores, list) {
466 		struct device_driver *drv = core->dev.driver;
467 		if (drv) {
468 			struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
469 			if (adrv->suspend)
470 				adrv->suspend(core);
471 		}
472 	}
473 	return 0;
474 }
475 
bcma_bus_resume(struct bcma_bus * bus)476 int bcma_bus_resume(struct bcma_bus *bus)
477 {
478 	struct bcma_device *core;
479 
480 	/* Init CC core */
481 	if (bus->drv_cc.core) {
482 		bus->drv_cc.setup_done = false;
483 		bcma_core_chipcommon_init(&bus->drv_cc);
484 	}
485 
486 	list_for_each_entry(core, &bus->cores, list) {
487 		struct device_driver *drv = core->dev.driver;
488 		if (drv) {
489 			struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
490 			if (adrv->resume)
491 				adrv->resume(core);
492 		}
493 	}
494 
495 	return 0;
496 }
497 #endif
498 
__bcma_driver_register(struct bcma_driver * drv,struct module * owner)499 int __bcma_driver_register(struct bcma_driver *drv, struct module *owner)
500 {
501 	drv->drv.name = drv->name;
502 	drv->drv.bus = &bcma_bus_type;
503 	drv->drv.owner = owner;
504 
505 	return driver_register(&drv->drv);
506 }
507 EXPORT_SYMBOL_GPL(__bcma_driver_register);
508 
bcma_driver_unregister(struct bcma_driver * drv)509 void bcma_driver_unregister(struct bcma_driver *drv)
510 {
511 	driver_unregister(&drv->drv);
512 }
513 EXPORT_SYMBOL_GPL(bcma_driver_unregister);
514 
bcma_bus_match(struct device * dev,struct device_driver * drv)515 static int bcma_bus_match(struct device *dev, struct device_driver *drv)
516 {
517 	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
518 	struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
519 	const struct bcma_device_id *cid = &core->id;
520 	const struct bcma_device_id *did;
521 
522 	for (did = adrv->id_table; did->manuf || did->id || did->rev; did++) {
523 	    if ((did->manuf == cid->manuf || did->manuf == BCMA_ANY_MANUF) &&
524 		(did->id == cid->id || did->id == BCMA_ANY_ID) &&
525 		(did->rev == cid->rev || did->rev == BCMA_ANY_REV) &&
526 		(did->class == cid->class || did->class == BCMA_ANY_CLASS))
527 			return 1;
528 	}
529 	return 0;
530 }
531 
bcma_device_probe(struct device * dev)532 static int bcma_device_probe(struct device *dev)
533 {
534 	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
535 	struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver,
536 					       drv);
537 	int err = 0;
538 
539 	if (adrv->probe)
540 		err = adrv->probe(core);
541 
542 	return err;
543 }
544 
bcma_device_remove(struct device * dev)545 static int bcma_device_remove(struct device *dev)
546 {
547 	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
548 	struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver,
549 					       drv);
550 
551 	if (adrv->remove)
552 		adrv->remove(core);
553 
554 	return 0;
555 }
556 
bcma_device_uevent(struct device * dev,struct kobj_uevent_env * env)557 static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env)
558 {
559 	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
560 
561 	return add_uevent_var(env,
562 			      "MODALIAS=bcma:m%04Xid%04Xrev%02Xcl%02X",
563 			      core->id.manuf, core->id.id,
564 			      core->id.rev, core->id.class);
565 }
566 
bcma_modinit(void)567 static int __init bcma_modinit(void)
568 {
569 	int err;
570 
571 	err = bus_register(&bcma_bus_type);
572 	if (err)
573 		return err;
574 
575 	err = bcma_host_soc_register_driver();
576 	if (err) {
577 		pr_err("SoC host initialization failed\n");
578 		err = 0;
579 	}
580 #ifdef CONFIG_BCMA_HOST_PCI
581 	err = bcma_host_pci_init();
582 	if (err) {
583 		pr_err("PCI host initialization failed\n");
584 		err = 0;
585 	}
586 #endif
587 
588 	return err;
589 }
590 fs_initcall(bcma_modinit);
591 
bcma_modexit(void)592 static void __exit bcma_modexit(void)
593 {
594 #ifdef CONFIG_BCMA_HOST_PCI
595 	bcma_host_pci_exit();
596 #endif
597 	bcma_host_soc_unregister_driver();
598 	bus_unregister(&bcma_bus_type);
599 }
600 module_exit(bcma_modexit)
601