• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Linux DHD Bus Module for PCIE
4  *
5  * Copyright (C) 1999-2019, Broadcom.
6  *
7  *      Unless you and Broadcom execute a separate written software license
8  * agreement governing use of this software, this software is licensed to you
9  * under the terms of the GNU General Public License version 2 (the "GPL"),
10  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11  * following added to such license:
12  *
13  *      As a special exception, the copyright holders of this software give you
14  * permission to link this software with independent modules, and to copy and
15  * distribute the resulting executable under terms of your choice, provided that
16  * you also meet, for each linked independent module, the terms and conditions of
17  * the license of that module.  An independent module is a module which is not
18  * derived from this software.  The special exception does not apply to any
19  * modifications of the software.
20  *
21  *      Notwithstanding the above, under no circumstances may you combine this
22  * software in any way with any other Broadcom software provided under a license
23  * other than the GPL, without Broadcom's express prior written consent.
24  *
25  *
26  * <<Broadcom-WL-IPTag/Open:>>
27  *
28  * $Id: dhd_pcie_linux.c 821650 2019-05-24 10:41:54Z $
29  */
30 
31 /* include files */
32 #include <typedefs.h>
33 #include <bcmutils.h>
34 #include <bcmdevs.h>
35 #include <siutils.h>
36 #include <hndsoc.h>
37 #include <hndpmu.h>
38 #include <sbchipc.h>
39 #if defined(DHD_DEBUG)
40 #include <hnd_armtrap.h>
41 #include <hnd_cons.h>
42 #endif /* defined(DHD_DEBUG) */
43 #include <dngl_stats.h>
44 #include <pcie_core.h>
45 #include <dhd.h>
46 #include <dhd_bus.h>
47 #include <dhd_proto.h>
48 #include <dhd_dbg.h>
49 #include <dhdioctl.h>
50 #include <bcmmsgbuf.h>
51 #include <pcicfg.h>
52 #include <dhd_pcie.h>
53 #include <dhd_linux.h>
54 #ifdef CONFIG_ARCH_MSM
55 #if defined(CONFIG_PCI_MSM) || defined(CONFIG_ARCH_MSM8996)
56 #include <linux/msm_pcie.h>
57 #else
58 #include <mach/msm_pcie.h>
59 #endif /* CONFIG_PCI_MSM */
60 #endif /* CONFIG_ARCH_MSM */
61 
62 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
63 #include <linux/pm_runtime.h>
64 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
65 
66 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
67 #ifndef AUTO_SUSPEND_TIMEOUT
68 #define AUTO_SUSPEND_TIMEOUT 1000
69 #endif /* AUTO_SUSPEND_TIMEOUT */
70 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
71 
72 #include <linux/irq.h>
73 #ifdef USE_SMMU_ARCH_MSM
74 #include <asm/dma-iommu.h>
75 #include <linux/iommu.h>
76 #include <linux/of.h>
77 #include <linux/platform_device.h>
78 #endif /* USE_SMMU_ARCH_MSM */
79 #include <dhd_config.h>
80 
81 #define PCI_CFG_RETRY 		10
82 #define OS_HANDLE_MAGIC		0x1234abcd	/* Magic # to recognize osh */
83 #define BCM_MEM_FILENAME_LEN 	24		/* Mem. filename length */
84 
85 #ifdef FORCE_TPOWERON
86 extern uint32 tpoweron_scale;
87 #endif /* FORCE_TPOWERON */
88 /* user defined data structures  */
89 
90 typedef bool (*dhdpcie_cb_fn_t)(void *);
91 
92 typedef struct dhdpcie_info
93 {
94 	dhd_bus_t	*bus;
95 	osl_t		*osh;
96 	struct pci_dev  *dev;		/* pci device handle */
97 	volatile char	*regs;		/* pci device memory va */
98 	volatile char	*tcm;		/* pci device memory va */
99 	uint32		bar1_size;	/* pci device memory size */
100 	uint32		curr_bar1_win;	/* current PCIEBar1Window setting */
101 	struct pcos_info *pcos_info;
102 	uint16		last_intrstatus;	/* to cache intrstatus */
103 	int	irq;
104 	char pciname[32];
105 	struct pci_saved_state* default_state;
106 	struct pci_saved_state* state;
107 #ifdef BCMPCIE_OOB_HOST_WAKE
108 	void *os_cxt;			/* Pointer to per-OS private data */
109 #endif /* BCMPCIE_OOB_HOST_WAKE */
110 #ifdef DHD_WAKE_STATUS
111 	spinlock_t	pcie_lock;
112 	unsigned int	total_wake_count;
113 	int		pkt_wake;
114 	int		wake_irq;
115 #endif /* DHD_WAKE_STATUS */
116 #ifdef USE_SMMU_ARCH_MSM
117 	void *smmu_cxt;
118 #endif /* USE_SMMU_ARCH_MSM */
119 } dhdpcie_info_t;
120 
121 struct pcos_info {
122 	dhdpcie_info_t *pc;
123 	spinlock_t lock;
124 	wait_queue_head_t intr_wait_queue;
125 	struct timer_list tuning_timer;
126 	int tuning_timer_exp;
127 	atomic_t timer_enab;
128 	struct tasklet_struct tuning_tasklet;
129 };
130 
131 #ifdef BCMPCIE_OOB_HOST_WAKE
132 typedef struct dhdpcie_os_info {
133 	int			oob_irq_num;	/* valid when hardware or software oob in use */
134 	unsigned long		oob_irq_flags;	/* valid when hardware or software oob in use */
135 	bool			oob_irq_registered;
136 	bool			oob_irq_enabled;
137 	bool			oob_irq_wake_enabled;
138 	spinlock_t		oob_irq_spinlock;
139 	void			*dev;		/* handle to the underlying device */
140 } dhdpcie_os_info_t;
141 static irqreturn_t wlan_oob_irq(int irq, void *data);
142 #ifdef CUSTOMER_HW2
143 extern struct brcm_pcie_wake brcm_pcie_wake;
144 #endif /* CUSTOMER_HW2 */
145 #endif /* BCMPCIE_OOB_HOST_WAKE */
146 
147 #ifdef USE_SMMU_ARCH_MSM
148 typedef struct dhdpcie_smmu_info {
149 	struct dma_iommu_mapping *smmu_mapping;
150 	dma_addr_t smmu_iova_start;
151 	size_t smmu_iova_len;
152 } dhdpcie_smmu_info_t;
153 #endif /* USE_SMMU_ARCH_MSM */
154 
155 /* function declarations */
156 static int __devinit
157 dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
158 static void __devexit
159 dhdpcie_pci_remove(struct pci_dev *pdev);
160 static int dhdpcie_init(struct pci_dev *pdev);
161 static irqreturn_t dhdpcie_isr(int irq, void *arg);
162 /* OS Routine functions for PCI suspend/resume */
163 
164 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
165 static int dhdpcie_set_suspend_resume(struct pci_dev *dev, bool state, bool byint);
166 #else
167 static int dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state);
168 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
169 static int dhdpcie_resume_host_dev(dhd_bus_t *bus);
170 static int dhdpcie_suspend_host_dev(dhd_bus_t *bus);
171 static int dhdpcie_resume_dev(struct pci_dev *dev);
172 static int dhdpcie_suspend_dev(struct pci_dev *dev);
173 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
174 static int dhdpcie_pm_system_suspend_noirq(struct device * dev);
175 static int dhdpcie_pm_system_resume_noirq(struct device * dev);
176 #else
177 static int dhdpcie_pci_suspend(struct pci_dev *dev, pm_message_t state);
178 static int dhdpcie_pci_resume(struct pci_dev *dev);
179 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
180 
181 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
182 static int dhdpcie_pm_runtime_suspend(struct device * dev);
183 static int dhdpcie_pm_runtime_resume(struct device * dev);
184 static int dhdpcie_pm_system_suspend_noirq(struct device * dev);
185 static int dhdpcie_pm_system_resume_noirq(struct device * dev);
186 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
187 
188 static void dhdpcie_config_save_restore_coherent(dhd_bus_t *bus, bool state);
189 
190 uint32
191 dhdpcie_access_cap(struct pci_dev *pdev, int cap, uint offset, bool is_ext, bool is_write,
192 	uint32 writeval);
193 
194 static struct pci_device_id dhdpcie_pci_devid[] __devinitdata = {
195 	{ vendor: 0x14e4,
196 	device: PCI_ANY_ID,
197 	subvendor: PCI_ANY_ID,
198 	subdevice: PCI_ANY_ID,
199 	class: PCI_CLASS_NETWORK_OTHER << 8,
200 	class_mask: 0xffff00,
201 	driver_data: 0,
202 	},
203 	{ 0, 0, 0, 0, 0, 0, 0}
204 };
205 MODULE_DEVICE_TABLE(pci, dhdpcie_pci_devid);
206 
207 /* Power Management Hooks */
208 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
209 static const struct dev_pm_ops dhdpcie_pm_ops = {
210 	SET_RUNTIME_PM_OPS(dhdpcie_pm_runtime_suspend, dhdpcie_pm_runtime_resume, NULL)
211 	.suspend_noirq = dhdpcie_pm_system_suspend_noirq,
212 	.resume_noirq = dhdpcie_pm_system_resume_noirq
213 };
214 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
215 
216 static struct pci_driver dhdpcie_driver = {
217 	node:		{&dhdpcie_driver.node, &dhdpcie_driver.node},
218 	name:		"pcieh",
219 	id_table:	dhdpcie_pci_devid,
220 	probe:		dhdpcie_pci_probe,
221 	remove:		dhdpcie_pci_remove,
222 #if defined(DHD_PCIE_NATIVE_RUNTIMEPM)
223 	.driver.pm = &dhd_pcie_pm_ops,
224 #else
225 	suspend:	dhdpcie_pci_suspend,
226 	resume:		dhdpcie_pci_resume,
227 #endif // endif
228 };
229 
230 int dhdpcie_init_succeeded = FALSE;
231 
232 #ifdef USE_SMMU_ARCH_MSM
dhdpcie_smmu_init(struct pci_dev * pdev,void * smmu_cxt)233 static int dhdpcie_smmu_init(struct pci_dev *pdev, void *smmu_cxt)
234 {
235 	struct dma_iommu_mapping *mapping;
236 	struct device_node *root_node = NULL;
237 	dhdpcie_smmu_info_t *smmu_info = (dhdpcie_smmu_info_t *)smmu_cxt;
238 	int smmu_iova_address[2];
239 	char *wlan_node = "android,bcmdhd_wlan";
240 	char *wlan_smmu_node = "wlan-smmu-iova-address";
241 	int atomic_ctx = 1;
242 	int s1_bypass = 1;
243 	int ret = 0;
244 
245 	DHD_ERROR(("%s: SMMU initialize\n", __FUNCTION__));
246 
247 	root_node = of_find_compatible_node(NULL, NULL, wlan_node);
248 	if (!root_node) {
249 		WARN(1, "failed to get device node of BRCM WLAN\n");
250 		return -ENODEV;
251 	}
252 
253 	if (of_property_read_u32_array(root_node, wlan_smmu_node,
254 		smmu_iova_address, 2) == 0) {
255 		DHD_ERROR(("%s : get SMMU start address 0x%x, size 0x%x\n",
256 			__FUNCTION__, smmu_iova_address[0], smmu_iova_address[1]));
257 		smmu_info->smmu_iova_start = smmu_iova_address[0];
258 		smmu_info->smmu_iova_len = smmu_iova_address[1];
259 	} else {
260 		printf("%s : can't get smmu iova address property\n",
261 			__FUNCTION__);
262 		return -ENODEV;
263 	}
264 
265 	if (smmu_info->smmu_iova_len <= 0) {
266 		DHD_ERROR(("%s: Invalid smmu iova len %d\n",
267 			__FUNCTION__, (int)smmu_info->smmu_iova_len));
268 		return -EINVAL;
269 	}
270 
271 	DHD_ERROR(("%s : SMMU init start\n", __FUNCTION__));
272 
273 	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) ||
274 		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
275 		DHD_ERROR(("%s: DMA set 64bit mask failed.\n", __FUNCTION__));
276 		return -EINVAL;
277 	}
278 
279 	mapping = arm_iommu_create_mapping(&platform_bus_type,
280 		smmu_info->smmu_iova_start, smmu_info->smmu_iova_len);
281 	if (IS_ERR(mapping)) {
282 		DHD_ERROR(("%s: create mapping failed, err = %d\n",
283 			__FUNCTION__, ret));
284 		ret = PTR_ERR(mapping);
285 		goto map_fail;
286 	}
287 
288 	ret = iommu_domain_set_attr(mapping->domain,
289 		DOMAIN_ATTR_ATOMIC, &atomic_ctx);
290 	if (ret) {
291 		DHD_ERROR(("%s: set atomic_ctx attribute failed, err = %d\n",
292 			__FUNCTION__, ret));
293 		goto set_attr_fail;
294 	}
295 
296 	ret = iommu_domain_set_attr(mapping->domain,
297 		DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
298 	if (ret < 0) {
299 		DHD_ERROR(("%s: set s1_bypass attribute failed, err = %d\n",
300 			__FUNCTION__, ret));
301 		goto set_attr_fail;
302 	}
303 
304 	ret = arm_iommu_attach_device(&pdev->dev, mapping);
305 	if (ret) {
306 		DHD_ERROR(("%s: attach device failed, err = %d\n",
307 			__FUNCTION__, ret));
308 		goto attach_fail;
309 	}
310 
311 	smmu_info->smmu_mapping = mapping;
312 
313 	return ret;
314 
315 attach_fail:
316 set_attr_fail:
317 	arm_iommu_release_mapping(mapping);
318 map_fail:
319 	return ret;
320 }
321 
dhdpcie_smmu_remove(struct pci_dev * pdev,void * smmu_cxt)322 static void dhdpcie_smmu_remove(struct pci_dev *pdev, void *smmu_cxt)
323 {
324 	dhdpcie_smmu_info_t *smmu_info;
325 
326 	if (!smmu_cxt) {
327 		return;
328 	}
329 
330 	smmu_info = (dhdpcie_smmu_info_t *)smmu_cxt;
331 	if (smmu_info->smmu_mapping) {
332 		arm_iommu_detach_device(&pdev->dev);
333 		arm_iommu_release_mapping(smmu_info->smmu_mapping);
334 		smmu_info->smmu_mapping = NULL;
335 	}
336 }
337 #endif /* USE_SMMU_ARCH_MSM */
338 
339 #ifdef FORCE_TPOWERON
340 static void
dhd_bus_get_tpoweron(dhd_bus_t * bus)341 dhd_bus_get_tpoweron(dhd_bus_t *bus)
342 {
343 
344 	uint32 tpoweron_rc;
345 	uint32 tpoweron_ep;
346 
347 	tpoweron_rc = dhdpcie_rc_access_cap(bus, PCIE_EXTCAP_ID_L1SS,
348 		PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, FALSE, 0);
349 	tpoweron_ep = dhdpcie_ep_access_cap(bus, PCIE_EXTCAP_ID_L1SS,
350 		PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, FALSE, 0);
351 	DHD_ERROR(("%s: tpoweron_rc:0x%x tpoweron_ep:0x%x\n",
352 		__FUNCTION__, tpoweron_rc, tpoweron_ep));
353 }
354 
355 static void
dhd_bus_set_tpoweron(dhd_bus_t * bus,uint16 tpoweron)356 dhd_bus_set_tpoweron(dhd_bus_t *bus, uint16 tpoweron)
357 {
358 
359 	dhd_bus_get_tpoweron(bus);
360 	/* Set the tpoweron */
361 	DHD_ERROR(("%s tpoweron: 0x%x\n", __FUNCTION__, tpoweron));
362 	dhdpcie_rc_access_cap(bus, PCIE_EXTCAP_ID_L1SS,
363 		PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, TRUE, tpoweron);
364 	dhdpcie_ep_access_cap(bus, PCIE_EXTCAP_ID_L1SS,
365 		PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, TRUE, tpoweron);
366 
367 	dhd_bus_get_tpoweron(bus);
368 
369 }
370 
371 static bool
dhdpcie_chip_req_forced_tpoweron(dhd_bus_t * bus)372 dhdpcie_chip_req_forced_tpoweron(dhd_bus_t *bus)
373 {
374 	/*
375 	 * On Fire's reference platform, coming out of L1.2,
376 	 * there is a constant delay of 45us between CLKREQ# and stable REFCLK
377 	 * Due to this delay, with tPowerOn < 50
378 	 * there is a chance of the refclk sense to trigger on noise.
379 	 *
380 	 * Which ever chip needs forced tPowerOn of 50us should be listed below.
381 	 */
382 	if (si_chipid(bus->sih) == BCM4377_CHIP_ID) {
383 		return TRUE;
384 	}
385 	return FALSE;
386 }
387 #endif /* FORCE_TPOWERON */
388 
389 static bool
dhd_bus_aspm_enable_dev(dhd_bus_t * bus,struct pci_dev * dev,bool enable)390 dhd_bus_aspm_enable_dev(dhd_bus_t *bus, struct pci_dev *dev, bool enable)
391 {
392 	uint32 linkctrl_before;
393 	uint32 linkctrl_after = 0;
394 	uint8 linkctrl_asm;
395 	char *device;
396 
397 	device = (dev == bus->dev) ? "EP" : "RC";
398 
399 	linkctrl_before = dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET,
400 		FALSE, FALSE, 0);
401 	linkctrl_asm = (linkctrl_before & PCIE_ASPM_CTRL_MASK);
402 
403 	if (enable) {
404 		if (linkctrl_asm == PCIE_ASPM_L1_ENAB) {
405 			DHD_ERROR(("%s: %s already enabled  linkctrl: 0x%x\n",
406 				__FUNCTION__, device, linkctrl_before));
407 			return FALSE;
408 		}
409 		/* Enable only L1 ASPM (bit 1) */
410 		dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, FALSE,
411 			TRUE, (linkctrl_before | PCIE_ASPM_L1_ENAB));
412 	} else {
413 		if (linkctrl_asm == 0) {
414 			DHD_ERROR(("%s: %s already disabled linkctrl: 0x%x\n",
415 				__FUNCTION__, device, linkctrl_before));
416 			return FALSE;
417 		}
418 		/* Disable complete ASPM (bit 1 and bit 0) */
419 		dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, FALSE,
420 			TRUE, (linkctrl_before & (~PCIE_ASPM_ENAB)));
421 	}
422 
423 	linkctrl_after = dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET,
424 		FALSE, FALSE, 0);
425 	DHD_ERROR(("%s: %s %s, linkctrl_before: 0x%x linkctrl_after: 0x%x\n",
426 		__FUNCTION__, device, (enable ? "ENABLE " : "DISABLE"),
427 		linkctrl_before, linkctrl_after));
428 
429 	return TRUE;
430 }
431 
432 static bool
dhd_bus_is_rc_ep_aspm_capable(dhd_bus_t * bus)433 dhd_bus_is_rc_ep_aspm_capable(dhd_bus_t *bus)
434 {
435 	uint32 rc_aspm_cap;
436 	uint32 ep_aspm_cap;
437 
438 	/* RC ASPM capability */
439 	rc_aspm_cap = dhdpcie_access_cap(bus->rc_dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET,
440 		FALSE, FALSE, 0);
441 	if (rc_aspm_cap == BCME_ERROR) {
442 		DHD_ERROR(("%s RC is not ASPM capable\n", __FUNCTION__));
443 		return FALSE;
444 	}
445 
446 	/* EP ASPM capability */
447 	ep_aspm_cap = dhdpcie_access_cap(bus->dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET,
448 		FALSE, FALSE, 0);
449 	if (ep_aspm_cap == BCME_ERROR) {
450 		DHD_ERROR(("%s EP is not ASPM capable\n", __FUNCTION__));
451 		return FALSE;
452 	}
453 
454 	return TRUE;
455 }
456 
457 bool
dhd_bus_aspm_enable_rc_ep(dhd_bus_t * bus,bool enable)458 dhd_bus_aspm_enable_rc_ep(dhd_bus_t *bus, bool enable)
459 {
460 	bool ret;
461 
462 	if (!bus->rc_ep_aspm_cap) {
463 		DHD_ERROR(("%s: NOT ASPM  CAPABLE rc_ep_aspm_cap: %d\n",
464 			__FUNCTION__, bus->rc_ep_aspm_cap));
465 		return FALSE;
466 	}
467 
468 	if (enable) {
469 		/* Enable only L1 ASPM first RC then EP */
470 		ret = dhd_bus_aspm_enable_dev(bus, bus->rc_dev, enable);
471 		ret = dhd_bus_aspm_enable_dev(bus, bus->dev, enable);
472 	} else {
473 		/* Disable complete ASPM first EP then RC */
474 		ret = dhd_bus_aspm_enable_dev(bus, bus->dev, enable);
475 		ret = dhd_bus_aspm_enable_dev(bus, bus->rc_dev, enable);
476 	}
477 
478 	return ret;
479 }
480 
481 static void
dhd_bus_l1ss_enable_dev(dhd_bus_t * bus,struct pci_dev * dev,bool enable)482 dhd_bus_l1ss_enable_dev(dhd_bus_t *bus, struct pci_dev *dev, bool enable)
483 {
484 	uint32 l1ssctrl_before;
485 	uint32 l1ssctrl_after = 0;
486 	uint8 l1ss_ep;
487 	char *device;
488 
489 	device = (dev == bus->dev) ? "EP" : "RC";
490 
491 	/* Extendend Capacility Reg */
492 	l1ssctrl_before = dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS,
493 		PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0);
494 	l1ss_ep = (l1ssctrl_before & PCIE_EXT_L1SS_MASK);
495 
496 	if (enable) {
497 		if (l1ss_ep == PCIE_EXT_L1SS_ENAB) {
498 			DHD_ERROR(("%s: %s already enabled,  l1ssctrl: 0x%x\n",
499 				__FUNCTION__, device, l1ssctrl_before));
500 			return;
501 		}
502 		dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS, PCIE_EXTCAP_L1SS_CONTROL_OFFSET,
503 			TRUE, TRUE, (l1ssctrl_before | PCIE_EXT_L1SS_ENAB));
504 	} else {
505 		if (l1ss_ep == 0) {
506 			DHD_ERROR(("%s: %s already disabled, l1ssctrl: 0x%x\n",
507 				__FUNCTION__, device, l1ssctrl_before));
508 			return;
509 		}
510 		dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS, PCIE_EXTCAP_L1SS_CONTROL_OFFSET,
511 			TRUE, TRUE, (l1ssctrl_before & (~PCIE_EXT_L1SS_ENAB)));
512 	}
513 	l1ssctrl_after = dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS,
514 		PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0);
515 	DHD_ERROR(("%s: %s %s, l1ssctrl_before: 0x%x l1ssctrl_after: 0x%x\n",
516 		__FUNCTION__, device, (enable ? "ENABLE " : "DISABLE"),
517 		l1ssctrl_before, l1ssctrl_after));
518 
519 }
520 
521 static bool
dhd_bus_is_rc_ep_l1ss_capable(dhd_bus_t * bus)522 dhd_bus_is_rc_ep_l1ss_capable(dhd_bus_t *bus)
523 {
524 	uint32 rc_l1ss_cap;
525 	uint32 ep_l1ss_cap;
526 
527 	/* RC Extendend Capacility */
528 	rc_l1ss_cap = dhdpcie_access_cap(bus->rc_dev, PCIE_EXTCAP_ID_L1SS,
529 		PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0);
530 	if (rc_l1ss_cap == BCME_ERROR) {
531 		DHD_ERROR(("%s RC is not l1ss capable\n", __FUNCTION__));
532 		return FALSE;
533 	}
534 
535 	/* EP Extendend Capacility */
536 	ep_l1ss_cap = dhdpcie_access_cap(bus->dev, PCIE_EXTCAP_ID_L1SS,
537 		PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0);
538 	if (ep_l1ss_cap == BCME_ERROR) {
539 		DHD_ERROR(("%s EP is not l1ss capable\n", __FUNCTION__));
540 		return FALSE;
541 	}
542 
543 	return TRUE;
544 }
545 
546 void
dhd_bus_l1ss_enable_rc_ep(dhd_bus_t * bus,bool enable)547 dhd_bus_l1ss_enable_rc_ep(dhd_bus_t *bus, bool enable)
548 {
549 	bool ret;
550 
551 	if ((!bus->rc_ep_aspm_cap) || (!bus->rc_ep_l1ss_cap)) {
552 		DHD_ERROR(("%s: NOT L1SS CAPABLE rc_ep_aspm_cap: %d rc_ep_l1ss_cap: %d\n",
553 			__FUNCTION__, bus->rc_ep_aspm_cap, bus->rc_ep_l1ss_cap));
554 		return;
555 	}
556 
557 	/* Disable ASPM of RC and EP */
558 	ret = dhd_bus_aspm_enable_rc_ep(bus, FALSE);
559 
560 	if (enable) {
561 		/* Enable RC then EP */
562 		dhd_bus_l1ss_enable_dev(bus, bus->rc_dev, enable);
563 		dhd_bus_l1ss_enable_dev(bus, bus->dev, enable);
564 	} else {
565 		/* Disable EP then RC */
566 		dhd_bus_l1ss_enable_dev(bus, bus->dev, enable);
567 		dhd_bus_l1ss_enable_dev(bus, bus->rc_dev, enable);
568 	}
569 
570 	/* Enable ASPM of RC and EP only if this API disabled */
571 	if (ret == TRUE) {
572 		dhd_bus_aspm_enable_rc_ep(bus, TRUE);
573 	}
574 }
575 
576 void
dhd_bus_aer_config(dhd_bus_t * bus)577 dhd_bus_aer_config(dhd_bus_t *bus)
578 {
579 	uint32 val;
580 
581 	DHD_ERROR(("%s: Configure AER registers for EP\n", __FUNCTION__));
582 	val = dhdpcie_ep_access_cap(bus, PCIE_ADVERRREP_CAPID,
583 		PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, FALSE, 0);
584 	if (val != (uint32)-1) {
585 		val &= ~CORR_ERR_AE;
586 		dhdpcie_ep_access_cap(bus, PCIE_ADVERRREP_CAPID,
587 			PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, TRUE, val);
588 	} else {
589 		DHD_ERROR(("%s: Invalid EP's PCIE_ADV_CORR_ERR_MASK: 0x%x\n",
590 			__FUNCTION__, val));
591 	}
592 
593 	DHD_ERROR(("%s: Configure AER registers for RC\n", __FUNCTION__));
594 	val = dhdpcie_rc_access_cap(bus, PCIE_ADVERRREP_CAPID,
595 		PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, FALSE, 0);
596 	if (val != (uint32)-1) {
597 		val &= ~CORR_ERR_AE;
598 		dhdpcie_rc_access_cap(bus, PCIE_ADVERRREP_CAPID,
599 			PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, TRUE, val);
600 	} else {
601 		DHD_ERROR(("%s: Invalid RC's PCIE_ADV_CORR_ERR_MASK: 0x%x\n",
602 			__FUNCTION__, val));
603 	}
604 }
605 
dhdpcie_pci_suspend(struct pci_dev * pdev,pm_message_t state)606 static int dhdpcie_pci_suspend(struct pci_dev * pdev, pm_message_t state)
607 {
608 	int ret = 0;
609 	dhdpcie_info_t *pch = pci_get_drvdata(pdev);
610 	dhd_bus_t *bus = NULL;
611 	unsigned long flags;
612 	uint32 i = 0;
613 
614 	if (pch) {
615 		bus = pch->bus;
616 	}
617 	if (!bus) {
618 		return ret;
619 	}
620 
621 	BCM_REFERENCE(state);
622 
623 	if (!DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) {
624 		DHD_ERROR(("%s: Bus not IDLE!! dhd_bus_busy_state = 0x%x\n",
625 			__FUNCTION__, bus->dhd->dhd_bus_busy_state));
626 
627 		OSL_DELAY(1000);
628 		/* retry till the transaction is complete */
629 		while (i < 100) {
630 			OSL_DELAY(1000);
631 			i++;
632 			if (DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) {
633 				DHD_ERROR(("%s: Bus enter IDLE!! after %d ms\n",
634 					__FUNCTION__, i));
635 				break;
636 			}
637 		}
638 		if (!DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) {
639 			DHD_ERROR(("%s: Bus not IDLE!! Failed after %d ms, "
640 				"dhd_bus_busy_state = 0x%x\n",
641 				__FUNCTION__, i, bus->dhd->dhd_bus_busy_state));
642 			return -EBUSY;
643 		}
644 	}
645 	DHD_GENERAL_LOCK(bus->dhd, flags);
646 	DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd);
647 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
648 
649 	if (!bus->dhd->dongle_reset)
650 		ret = dhdpcie_set_suspend_resume(bus, TRUE);
651 
652 	DHD_GENERAL_LOCK(bus->dhd, flags);
653 	DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd);
654 	dhd_os_busbusy_wake(bus->dhd);
655 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
656 
657 	return ret;
658 }
659 
dhdpcie_pci_resume(struct pci_dev * pdev)660 static int dhdpcie_pci_resume(struct pci_dev *pdev)
661 {
662 	int ret = 0;
663 	dhdpcie_info_t *pch = pci_get_drvdata(pdev);
664 	dhd_bus_t *bus = NULL;
665 	unsigned long flags;
666 
667 	if (pch) {
668 		bus = pch->bus;
669 	}
670 	if (!bus) {
671 		return ret;
672 	}
673 
674 	DHD_GENERAL_LOCK(bus->dhd, flags);
675 	DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd);
676 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
677 
678 	if (!bus->dhd->dongle_reset)
679 		ret = dhdpcie_set_suspend_resume(bus, FALSE);
680 
681 	DHD_GENERAL_LOCK(bus->dhd, flags);
682 	DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd);
683 	dhd_os_busbusy_wake(bus->dhd);
684 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
685 
686 	return ret;
687 }
688 
689 static int
690 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
dhdpcie_set_suspend_resume(dhd_bus_t * bus,bool state,bool byint)691 dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state, bool byint)
692 #else
693 dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state)
694 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
695 {
696 	int ret = 0;
697 
698 	ASSERT(bus && !bus->dhd->dongle_reset);
699 
700 	/* When firmware is not loaded do the PCI bus */
701 	/* suspend/resume only */
702 	if (bus->dhd->busstate == DHD_BUS_DOWN) {
703 		ret = dhdpcie_pci_suspend_resume(bus, state);
704 		return ret;
705 	}
706 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
707 		ret = dhdpcie_bus_suspend(bus, state, byint);
708 #else
709 		ret = dhdpcie_bus_suspend(bus, state);
710 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
711 
712 	return ret;
713 }
714 
715 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
dhdpcie_pm_runtime_suspend(struct device * dev)716 static int dhdpcie_pm_runtime_suspend(struct device * dev)
717 {
718 	struct pci_dev *pdev = to_pci_dev(dev);
719 	dhdpcie_info_t *pch = pci_get_drvdata(pdev);
720 	dhd_bus_t *bus = NULL;
721 	int ret = 0;
722 
723 	if (!pch)
724 		return -EBUSY;
725 
726 	bus = pch->bus;
727 
728 	DHD_RPM(("%s Enter\n", __FUNCTION__));
729 
730 	if (atomic_read(&bus->dhd->block_bus))
731 		return -EHOSTDOWN;
732 
733 	dhd_netif_stop_queue(bus);
734 	atomic_set(&bus->dhd->block_bus, TRUE);
735 
736 	if (dhdpcie_set_suspend_resume(pdev, TRUE, TRUE)) {
737 		pm_runtime_mark_last_busy(dev);
738 		ret = -EAGAIN;
739 	}
740 
741 	atomic_set(&bus->dhd->block_bus, FALSE);
742 	dhd_bus_start_queue(bus);
743 
744 	return ret;
745 }
746 
dhdpcie_pm_runtime_resume(struct device * dev)747 static int dhdpcie_pm_runtime_resume(struct device * dev)
748 {
749 	struct pci_dev *pdev = to_pci_dev(dev);
750 	dhdpcie_info_t *pch = pci_get_drvdata(pdev);
751 	dhd_bus_t *bus = pch->bus;
752 
753 	DHD_RPM(("%s Enter\n", __FUNCTION__));
754 
755 	if (atomic_read(&bus->dhd->block_bus))
756 		return -EHOSTDOWN;
757 
758 	if (dhdpcie_set_suspend_resume(pdev, FALSE, TRUE))
759 		return -EAGAIN;
760 
761 	return 0;
762 }
763 
dhdpcie_pm_system_suspend_noirq(struct device * dev)764 static int dhdpcie_pm_system_suspend_noirq(struct device * dev)
765 {
766 	struct pci_dev *pdev = to_pci_dev(dev);
767 	dhdpcie_info_t *pch = pci_get_drvdata(pdev);
768 	dhd_bus_t *bus = NULL;
769 	int ret;
770 
771 	DHD_RPM(("%s Enter\n", __FUNCTION__));
772 
773 	if (!pch)
774 		return -EBUSY;
775 
776 	bus = pch->bus;
777 
778 	if (atomic_read(&bus->dhd->block_bus))
779 		return -EHOSTDOWN;
780 
781 	dhd_netif_stop_queue(bus);
782 	atomic_set(&bus->dhd->block_bus, TRUE);
783 
784 	ret = dhdpcie_set_suspend_resume(pdev, TRUE, FALSE);
785 
786 	if (ret) {
787 		dhd_bus_start_queue(bus);
788 		atomic_set(&bus->dhd->block_bus, FALSE);
789 	}
790 
791 	return ret;
792 }
793 
dhdpcie_pm_system_resume_noirq(struct device * dev)794 static int dhdpcie_pm_system_resume_noirq(struct device * dev)
795 {
796 	struct pci_dev *pdev = to_pci_dev(dev);
797 	dhdpcie_info_t *pch = pci_get_drvdata(pdev);
798 	dhd_bus_t *bus = NULL;
799 	int ret;
800 
801 	if (!pch)
802 		return -EBUSY;
803 
804 	bus = pch->bus;
805 
806 	DHD_RPM(("%s Enter\n", __FUNCTION__));
807 
808 	ret = dhdpcie_set_suspend_resume(pdev, FALSE, FALSE);
809 
810 	atomic_set(&bus->dhd->block_bus, FALSE);
811 	dhd_bus_start_queue(bus);
812 	pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
813 
814 	return ret;
815 }
816 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
817 
818 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
819 extern void dhd_dpc_tasklet_kill(dhd_pub_t *dhdp);
820 #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
821 
822 static void
dhdpcie_suspend_dump_cfgregs(struct dhd_bus * bus,char * suspend_state)823 dhdpcie_suspend_dump_cfgregs(struct dhd_bus *bus, char *suspend_state)
824 {
825 	DHD_ERROR(("%s: BaseAddress0(0x%x)=0x%x, "
826 		"BaseAddress1(0x%x)=0x%x PCIE_CFG_PMCSR(0x%x)=0x%x\n",
827 		suspend_state,
828 		PCIECFGREG_BASEADDR0,
829 		dhd_pcie_config_read(bus->osh,
830 			PCIECFGREG_BASEADDR0, sizeof(uint32)),
831 		PCIECFGREG_BASEADDR1,
832 		dhd_pcie_config_read(bus->osh,
833 			PCIECFGREG_BASEADDR1, sizeof(uint32)),
834 		PCIE_CFG_PMCSR,
835 		dhd_pcie_config_read(bus->osh,
836 			PCIE_CFG_PMCSR, sizeof(uint32))));
837 }
838 
dhdpcie_suspend_dev(struct pci_dev * dev)839 static int dhdpcie_suspend_dev(struct pci_dev *dev)
840 {
841 	int ret;
842 	dhdpcie_info_t *pch = pci_get_drvdata(dev);
843 	dhd_bus_t *bus = pch->bus;
844 
845 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
846 	if (bus->is_linkdown) {
847 		DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
848 		return BCME_ERROR;
849 	}
850 #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
851 	DHD_ERROR(("%s: Enter\n", __FUNCTION__));
852 	dhdpcie_suspend_dump_cfgregs(bus, "BEFORE_EP_SUSPEND");
853 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
854 	dhd_dpc_tasklet_kill(bus->dhd);
855 #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
856 	pci_save_state(dev);
857 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
858 	pch->state = pci_store_saved_state(dev);
859 #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
860 	pci_enable_wake(dev, PCI_D0, TRUE);
861 	if (pci_is_enabled(dev))
862 		pci_disable_device(dev);
863 
864 	ret = pci_set_power_state(dev, PCI_D3hot);
865 	if (ret) {
866 		DHD_ERROR(("%s: pci_set_power_state error %d\n",
867 			__FUNCTION__, ret));
868 	}
869 //	dev->state_saved = FALSE;
870 	dhdpcie_suspend_dump_cfgregs(bus, "AFTER_EP_SUSPEND");
871 	return ret;
872 }
873 
874 #ifdef DHD_WAKE_STATUS
bcmpcie_get_total_wake(struct dhd_bus * bus)875 int bcmpcie_get_total_wake(struct dhd_bus *bus)
876 {
877 	dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
878 
879 	return pch->total_wake_count;
880 }
881 
bcmpcie_set_get_wake(struct dhd_bus * bus,int flag)882 int bcmpcie_set_get_wake(struct dhd_bus *bus, int flag)
883 {
884 	dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
885 	unsigned long flags;
886 	int ret;
887 
888 	spin_lock_irqsave(&pch->pcie_lock, flags);
889 
890 	ret = pch->pkt_wake;
891 	pch->total_wake_count += flag;
892 	pch->pkt_wake = flag;
893 
894 	spin_unlock_irqrestore(&pch->pcie_lock, flags);
895 	return ret;
896 }
897 #endif /* DHD_WAKE_STATUS */
898 
dhdpcie_resume_dev(struct pci_dev * dev)899 static int dhdpcie_resume_dev(struct pci_dev *dev)
900 {
901 	int err = 0;
902 	dhdpcie_info_t *pch = pci_get_drvdata(dev);
903 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
904 	pci_load_and_free_saved_state(dev, &pch->state);
905 #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
906 	DHD_ERROR(("%s: Enter\n", __FUNCTION__));
907 //	dev->state_saved = TRUE;
908 	pci_restore_state(dev);
909 #ifdef FORCE_TPOWERON
910 	if (dhdpcie_chip_req_forced_tpoweron(pch->bus)) {
911 		dhd_bus_set_tpoweron(pch->bus, tpoweron_scale);
912 	}
913 #endif /* FORCE_TPOWERON */
914 	err = pci_enable_device(dev);
915 	if (err) {
916 		printf("%s:pci_enable_device error %d \n", __FUNCTION__, err);
917 		goto out;
918 	}
919 	pci_set_master(dev);
920 	err = pci_set_power_state(dev, PCI_D0);
921 	if (err) {
922 		printf("%s:pci_set_power_state error %d \n", __FUNCTION__, err);
923 		goto out;
924 	}
925 	BCM_REFERENCE(pch);
926 	dhdpcie_suspend_dump_cfgregs(pch->bus, "AFTER_EP_RESUME");
927 out:
928 	return err;
929 }
930 
dhdpcie_resume_host_dev(dhd_bus_t * bus)931 static int dhdpcie_resume_host_dev(dhd_bus_t *bus)
932 {
933 	int bcmerror = 0;
934 #ifdef USE_EXYNOS_PCIE_RC_PMPATCH
935 	bcmerror = exynos_pcie_pm_resume(SAMSUNG_PCIE_CH_NUM);
936 #endif /* USE_EXYNOS_PCIE_RC_PMPATCH */
937 #ifdef CONFIG_ARCH_MSM
938 	bcmerror = dhdpcie_start_host_pcieclock(bus);
939 #endif /* CONFIG_ARCH_MSM */
940 #ifdef CONFIG_ARCH_TEGRA
941 	bcmerror = tegra_pcie_pm_resume();
942 #endif /* CONFIG_ARCH_TEGRA */
943 	if (bcmerror < 0) {
944 		DHD_ERROR(("%s: PCIe RC resume failed!!! (%d)\n",
945 			__FUNCTION__, bcmerror));
946 		bus->is_linkdown = 1;
947 	}
948 
949 	return bcmerror;
950 }
951 
dhdpcie_suspend_host_dev(dhd_bus_t * bus)952 static int dhdpcie_suspend_host_dev(dhd_bus_t *bus)
953 {
954 	int bcmerror = 0;
955 #ifdef USE_EXYNOS_PCIE_RC_PMPATCH
956 	if (bus->rc_dev) {
957 		pci_save_state(bus->rc_dev);
958 	} else {
959 		DHD_ERROR(("%s: RC %x:%x handle is NULL\n",
960 			__FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID));
961 	}
962 	exynos_pcie_pm_suspend(SAMSUNG_PCIE_CH_NUM);
963 #endif	/* USE_EXYNOS_PCIE_RC_PMPATCH */
964 #ifdef CONFIG_ARCH_MSM
965 	bcmerror = dhdpcie_stop_host_pcieclock(bus);
966 #endif	/* CONFIG_ARCH_MSM */
967 #ifdef CONFIG_ARCH_TEGRA
968 	bcmerror = tegra_pcie_pm_suspend();
969 #endif /* CONFIG_ARCH_TEGRA */
970 	return bcmerror;
971 }
972 
973 /**
974  * dhdpcie_os_setbar1win
975  *
976  * Interface function for setting bar1 window in order to allow
977  * os layer to be aware of current window positon.
978  *
979  * @bus: dhd bus context
980  * @addr: new backplane windows address for BAR1
981  */
982 void
dhdpcie_os_setbar1win(dhd_bus_t * bus,uint32 addr)983 dhdpcie_os_setbar1win(dhd_bus_t *bus, uint32 addr)
984 {
985 	dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
986 
987 	osl_pci_write_config(bus->osh, PCI_BAR1_WIN, 4, addr);
988 	pch->curr_bar1_win = addr;
989 }
990 
991 /**
992  * dhdpcie_os_chkbpoffset
993  *
994  * Check the provided address is within the current BAR1 window,
995  * if not, shift the window
996  *
997  * @bus: dhd bus context
998  * @offset: back plane address that the caller wants to access
999  *
1000  * Return: new offset for access
1001  */
1002 static ulong
dhdpcie_os_chkbpoffset(dhdpcie_info_t * pch,ulong offset)1003 dhdpcie_os_chkbpoffset(dhdpcie_info_t *pch, ulong offset)
1004 {
1005 	/* Determine BAR1 backplane window using window size
1006 	 * Window address mask should be ~(size - 1)
1007 	 */
1008 	uint32 bpwin = (uint32)(offset & ~(pch->bar1_size - 1));
1009 
1010 	if (bpwin != pch->curr_bar1_win) {
1011 		/* Move BAR1 window */
1012 		dhdpcie_os_setbar1win(pch->bus, bpwin);
1013 	}
1014 
1015 	return offset - bpwin;
1016 }
1017 
1018 /**
1019  * dhdpcie os layer tcm read/write interface
1020  */
1021 void
dhdpcie_os_wtcm8(dhd_bus_t * bus,ulong offset,uint8 data)1022 dhdpcie_os_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data)
1023 {
1024 	dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
1025 
1026 	offset = dhdpcie_os_chkbpoffset(pch, offset);
1027 	W_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset), data);
1028 }
1029 
1030 uint8
dhdpcie_os_rtcm8(dhd_bus_t * bus,ulong offset)1031 dhdpcie_os_rtcm8(dhd_bus_t *bus, ulong offset)
1032 {
1033 	volatile uint8 data;
1034 	dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
1035 
1036 	offset = dhdpcie_os_chkbpoffset(pch, offset);
1037 	data = R_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset));
1038 	return data;
1039 }
1040 
1041 void
dhdpcie_os_wtcm16(dhd_bus_t * bus,ulong offset,uint16 data)1042 dhdpcie_os_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data)
1043 {
1044 	dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
1045 
1046 	offset = dhdpcie_os_chkbpoffset(pch, offset);
1047 	W_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset), data);
1048 }
1049 
1050 uint16
dhdpcie_os_rtcm16(dhd_bus_t * bus,ulong offset)1051 dhdpcie_os_rtcm16(dhd_bus_t *bus, ulong offset)
1052 {
1053 	volatile uint16 data;
1054 	dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
1055 
1056 	offset = dhdpcie_os_chkbpoffset(pch, offset);
1057 	data = R_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset));
1058 	return data;
1059 }
1060 
1061 void
dhdpcie_os_wtcm32(dhd_bus_t * bus,ulong offset,uint32 data)1062 dhdpcie_os_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data)
1063 {
1064 	dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
1065 
1066 	offset = dhdpcie_os_chkbpoffset(pch, offset);
1067 	W_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset), data);
1068 }
1069 
1070 uint32
dhdpcie_os_rtcm32(dhd_bus_t * bus,ulong offset)1071 dhdpcie_os_rtcm32(dhd_bus_t *bus, ulong offset)
1072 {
1073 	volatile uint32 data;
1074 	dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
1075 
1076 	offset = dhdpcie_os_chkbpoffset(pch, offset);
1077 	data = R_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset));
1078 	return data;
1079 }
1080 
1081 #ifdef DHD_SUPPORT_64BIT
1082 void
dhdpcie_os_wtcm64(dhd_bus_t * bus,ulong offset,uint64 data)1083 dhdpcie_os_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data)
1084 {
1085 	dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
1086 
1087 	offset = dhdpcie_os_chkbpoffset(pch, offset);
1088 	W_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset), data);
1089 }
1090 
1091 uint64
dhdpcie_os_rtcm64(dhd_bus_t * bus,ulong offset)1092 dhdpcie_os_rtcm64(dhd_bus_t *bus, ulong offset)
1093 {
1094 	volatile uint64 data;
1095 	dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
1096 
1097 	offset = dhdpcie_os_chkbpoffset(pch, offset);
1098 	data = R_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset));
1099 	return data;
1100 }
1101 #endif /* DHD_SUPPORT_64BIT */
1102 
1103 uint32
dhdpcie_rc_config_read(dhd_bus_t * bus,uint offset)1104 dhdpcie_rc_config_read(dhd_bus_t *bus, uint offset)
1105 {
1106 	uint val = -1; /* Initialise to 0xfffffff */
1107 	if (bus->rc_dev) {
1108 		pci_read_config_dword(bus->rc_dev, offset, &val);
1109 		OSL_DELAY(100);
1110 	} else {
1111 		DHD_ERROR(("%s: RC %x:%x handle is NULL\n",
1112 			__FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID));
1113 	}
1114 	DHD_ERROR(("%s: RC %x:%x offset 0x%x val 0x%x\n",
1115 		__FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, offset, val));
1116 	return (val);
1117 }
1118 
1119 /*
1120  * Reads/ Writes the value of capability register
1121  * from the given CAP_ID section of PCI Root Port
1122  *
1123  * Arguements
1124  * @bus current dhd_bus_t pointer
1125  * @cap Capability or Extended Capability ID to get
1126  * @offset offset of Register to Read
1127  * @is_ext TRUE if @cap is given for Extended Capability
1128  * @is_write is set to TRUE to indicate write
1129  * @val value to write
1130  *
1131  * Return Value
1132  * Returns 0xffffffff on error
1133  * on write success returns BCME_OK (0)
1134  * on Read Success returns the value of register requested
1135  * Note: caller shoud ensure valid capability ID and Ext. Capability ID.
1136  */
1137 
1138 uint32
dhdpcie_access_cap(struct pci_dev * pdev,int cap,uint offset,bool is_ext,bool is_write,uint32 writeval)1139 dhdpcie_access_cap(struct pci_dev *pdev, int cap, uint offset, bool is_ext, bool is_write,
1140 	uint32 writeval)
1141 {
1142 	int cap_ptr = 0;
1143 	uint32 ret = -1;
1144 	uint32 readval;
1145 
1146 	if (!(pdev)) {
1147 		DHD_ERROR(("%s: pdev is NULL\n", __FUNCTION__));
1148 		return ret;
1149 	}
1150 
1151 	/* Find Capability offset */
1152 	if (is_ext) {
1153 		/* removing max EXT_CAP_ID check as
1154 		 * linux kernel definition's max value is not upadted yet as per spec
1155 		 */
1156 		cap_ptr = pci_find_ext_capability(pdev, cap);
1157 
1158 	} else {
1159 		/* removing max PCI_CAP_ID_MAX check as
1160 		 * pervious kernel versions dont have this definition
1161 		 */
1162 		cap_ptr = pci_find_capability(pdev, cap);
1163 	}
1164 
1165 	/* Return if capability with given ID not found */
1166 	if (cap_ptr == 0) {
1167 		DHD_ERROR(("%s: PCI Cap(0x%02x) not supported.\n",
1168 			__FUNCTION__, cap));
1169 		return BCME_ERROR;
1170 	}
1171 
1172 	if (is_write) {
1173 		pci_write_config_dword(pdev, (cap_ptr + offset), writeval);
1174 		ret = BCME_OK;
1175 
1176 	} else {
1177 
1178 		pci_read_config_dword(pdev, (cap_ptr + offset), &readval);
1179 		ret = readval;
1180 	}
1181 
1182 	return ret;
1183 }
1184 
1185 uint32
dhdpcie_rc_access_cap(dhd_bus_t * bus,int cap,uint offset,bool is_ext,bool is_write,uint32 writeval)1186 dhdpcie_rc_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext, bool is_write,
1187 	uint32 writeval)
1188 {
1189 	if (!(bus->rc_dev)) {
1190 		DHD_ERROR(("%s: RC %x:%x handle is NULL\n",
1191 			__FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID));
1192 		return BCME_ERROR;
1193 	}
1194 
1195 	return dhdpcie_access_cap(bus->rc_dev, cap, offset, is_ext, is_write, writeval);
1196 }
1197 
1198 uint32
dhdpcie_ep_access_cap(dhd_bus_t * bus,int cap,uint offset,bool is_ext,bool is_write,uint32 writeval)1199 dhdpcie_ep_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext, bool is_write,
1200 	uint32 writeval)
1201 {
1202 	if (!(bus->dev)) {
1203 		DHD_ERROR(("%s: EP handle is NULL\n", __FUNCTION__));
1204 		return BCME_ERROR;
1205 	}
1206 
1207 	return dhdpcie_access_cap(bus->dev, cap, offset, is_ext, is_write, writeval);
1208 }
1209 
1210 /* API wrapper to read Root Port link capability
1211  * Returns 2 = GEN2 1 = GEN1 BCME_ERR on linkcap not found
1212  */
1213 
dhd_debug_get_rc_linkcap(dhd_bus_t * bus)1214 uint32 dhd_debug_get_rc_linkcap(dhd_bus_t *bus)
1215 {
1216 	uint32 linkcap = -1;
1217 	linkcap = dhdpcie_rc_access_cap(bus, PCIE_CAP_ID_EXP,
1218 			PCIE_CAP_LINKCAP_OFFSET, FALSE, FALSE, 0);
1219 	linkcap &= PCIE_CAP_LINKCAP_LNKSPEED_MASK;
1220 	return linkcap;
1221 }
1222 
dhdpcie_config_save_restore_coherent(dhd_bus_t * bus,bool state)1223 static void dhdpcie_config_save_restore_coherent(dhd_bus_t *bus, bool state)
1224 {
1225 	if (bus->coreid == ARMCA7_CORE_ID) {
1226 		if (state) {
1227 			/* Sleep */
1228 			bus->coherent_state = dhdpcie_bus_cfg_read_dword(bus,
1229 				PCIE_CFG_SUBSYSTEM_CONTROL, 4) & PCIE_BARCOHERENTACCEN_MASK;
1230 		} else {
1231 			uint32 val = (dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL,
1232 				4) & ~PCIE_BARCOHERENTACCEN_MASK) | bus->coherent_state;
1233 			dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4, val);
1234 		}
1235 	}
1236 }
1237 
dhdpcie_pci_suspend_resume(dhd_bus_t * bus,bool state)1238 int dhdpcie_pci_suspend_resume(dhd_bus_t *bus, bool state)
1239 {
1240 	int rc;
1241 
1242 	struct pci_dev *dev = bus->dev;
1243 
1244 	if (state) {
1245 		dhdpcie_config_save_restore_coherent(bus, state);
1246 #if !defined(BCMPCIE_OOB_HOST_WAKE)
1247 		dhdpcie_pme_active(bus->osh, state);
1248 #endif // endif
1249 		rc = dhdpcie_suspend_dev(dev);
1250 		if (!rc) {
1251 			dhdpcie_suspend_host_dev(bus);
1252 		}
1253 	} else {
1254 		rc = dhdpcie_resume_host_dev(bus);
1255 		if (!rc) {
1256 			rc = dhdpcie_resume_dev(dev);
1257 			if (PCIECTO_ENAB(bus)) {
1258 				/* reinit CTO configuration
1259 				 * because cfg space got reset at D3 (PERST)
1260 				 */
1261 				dhdpcie_cto_cfg_init(bus, TRUE);
1262 			}
1263 			if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) {
1264 				dhdpcie_ssreset_dis_enum_rst(bus);
1265 			}
1266 #if !defined(BCMPCIE_OOB_HOST_WAKE)
1267 			dhdpcie_pme_active(bus->osh, state);
1268 #endif // endif
1269 		}
1270 		dhdpcie_config_save_restore_coherent(bus, state);
1271 		if (bus->is_linkdown) {
1272 			bus->dhd->hang_reason = HANG_REASON_PCIE_RC_LINK_UP_FAIL;
1273 			dhd_os_send_hang_message(bus->dhd);
1274 		}
1275 	}
1276 	return rc;
1277 }
1278 
dhdpcie_device_scan(struct device * dev,void * data)1279 static int dhdpcie_device_scan(struct device *dev, void *data)
1280 {
1281 	struct pci_dev *pcidev;
1282 	int *cnt = data;
1283 
1284 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1285 #pragma GCC diagnostic push
1286 #pragma GCC diagnostic ignored "-Wcast-qual"
1287 #endif // endif
1288 	pcidev = container_of(dev, struct pci_dev, dev);
1289 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1290 #pragma GCC diagnostic pop
1291 #endif // endif
1292 	if (pcidev->vendor != 0x14e4)
1293 		return 0;
1294 
1295 	DHD_INFO(("Found Broadcom PCI device 0x%04x\n", pcidev->device));
1296 	*cnt += 1;
1297 	if (pcidev->driver && strcmp(pcidev->driver->name, dhdpcie_driver.name))
1298 		DHD_ERROR(("Broadcom PCI Device 0x%04x has allocated with driver %s\n",
1299 			pcidev->device, pcidev->driver->name));
1300 
1301 	return 0;
1302 }
1303 
1304 int
dhdpcie_bus_register(void)1305 dhdpcie_bus_register(void)
1306 {
1307 	int error = 0;
1308 
1309 	if (!(error = pci_register_driver(&dhdpcie_driver))) {
1310 		bus_for_each_dev(dhdpcie_driver.driver.bus, NULL, &error, dhdpcie_device_scan);
1311 		if (!error) {
1312 			DHD_ERROR(("No Broadcom PCI device enumerated!\n"));
1313 		} else if (!dhdpcie_init_succeeded) {
1314 			DHD_ERROR(("%s: dhdpcie initialize failed.\n", __FUNCTION__));
1315 		} else {
1316 			return 0;
1317 		}
1318 
1319 		pci_unregister_driver(&dhdpcie_driver);
1320 		error = BCME_ERROR;
1321 	}
1322 
1323 	return error;
1324 }
1325 
1326 void
dhdpcie_bus_unregister(void)1327 dhdpcie_bus_unregister(void)
1328 {
1329 	pci_unregister_driver(&dhdpcie_driver);
1330 }
1331 
1332 int __devinit
dhdpcie_pci_probe(struct pci_dev * pdev,const struct pci_device_id * ent)1333 dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1334 {
1335 	int err = 0;
1336 	DHD_MUTEX_LOCK();
1337 
1338 	if (dhdpcie_chipmatch (pdev->vendor, pdev->device)) {
1339 		DHD_ERROR(("%s: chipmatch failed!!\n", __FUNCTION__));
1340 		err = -ENODEV;
1341 		goto exit;
1342 	}
1343 
1344 	printf("PCI_PROBE:  bus %X, slot %X,vendor %X, device %X"
1345 		"(good PCI location)\n", pdev->bus->number,
1346 		PCI_SLOT(pdev->devfn), pdev->vendor, pdev->device);
1347 
1348 	if (dhdpcie_init_succeeded == TRUE) {
1349 		DHD_ERROR(("%s(): === Driver Already attached to a BRCM device === \r\n",
1350 			__FUNCTION__));
1351 		err = -ENODEV;
1352 		goto exit;
1353 	}
1354 
1355 	if (dhdpcie_init (pdev)) {
1356 		DHD_ERROR(("%s: PCIe Enumeration failed\n", __FUNCTION__));
1357 		err = -ENODEV;
1358 		goto exit;
1359 	}
1360 
1361 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1362 	/*
1363 	Since MSM PCIe RC dev usage conunt already incremented +2 even
1364 	before dhdpcie_pci_probe() called, then we inevitably to call
1365 	pm_runtime_put_noidle() two times to make the count start with zero.
1366 	*/
1367 
1368 	pm_runtime_put_noidle(&pdev->dev);
1369 	pm_runtime_put_noidle(&pdev->dev);
1370 	pm_runtime_set_suspended(&pdev->dev);
1371 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1372 
1373 #ifdef BCMPCIE_DISABLE_ASYNC_SUSPEND
1374 	/* disable async suspend */
1375 	device_disable_async_suspend(&pdev->dev);
1376 #endif /* BCMPCIE_DISABLE_ASYNC_SUSPEND */
1377 
1378 	DHD_TRACE(("%s: PCIe Enumeration done!!\n", __FUNCTION__));
1379 exit:
1380 	DHD_MUTEX_UNLOCK();
1381 	return err;
1382 }
1383 
1384 int
dhdpcie_detach(dhdpcie_info_t * pch)1385 dhdpcie_detach(dhdpcie_info_t *pch)
1386 {
1387 	if (pch) {
1388 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1389 		if (!dhd_download_fw_on_driverload) {
1390 			pci_load_and_free_saved_state(pch->dev, &pch->default_state);
1391 		}
1392 #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1393 		MFREE(pch->osh, pch, sizeof(dhdpcie_info_t));
1394 	}
1395 	return 0;
1396 }
1397 
1398 void __devexit
dhdpcie_pci_remove(struct pci_dev * pdev)1399 dhdpcie_pci_remove(struct pci_dev *pdev)
1400 {
1401 	osl_t *osh = NULL;
1402 	dhdpcie_info_t *pch = NULL;
1403 	dhd_bus_t *bus = NULL;
1404 
1405 	DHD_TRACE(("%s Enter\n", __FUNCTION__));
1406 
1407 	DHD_MUTEX_LOCK();
1408 
1409 	pch = pci_get_drvdata(pdev);
1410 	bus = pch->bus;
1411 	osh = pch->osh;
1412 
1413 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1414 	pm_runtime_get_noresume(&pdev->dev);
1415 	pm_runtime_get_noresume(&pdev->dev);
1416 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1417 
1418 	if (bus) {
1419 
1420 		bus->rc_dev = NULL;
1421 
1422 		dhdpcie_bus_release(bus);
1423 	}
1424 
1425 	if (pci_is_enabled(pdev))
1426 		pci_disable_device(pdev);
1427 #ifdef BCMPCIE_OOB_HOST_WAKE
1428 	/* pcie os info detach */
1429 	MFREE(osh, pch->os_cxt, sizeof(dhdpcie_os_info_t));
1430 #endif /* BCMPCIE_OOB_HOST_WAKE */
1431 #ifdef USE_SMMU_ARCH_MSM
1432 	/* smmu info detach */
1433 	dhdpcie_smmu_remove(pdev, pch->smmu_cxt);
1434 	MFREE(osh, pch->smmu_cxt, sizeof(dhdpcie_smmu_info_t));
1435 #endif /* USE_SMMU_ARCH_MSM */
1436 	/* pcie info detach */
1437 	dhdpcie_detach(pch);
1438 	/* osl detach */
1439 	osl_detach(osh);
1440 
1441 #if defined(BCMPCIE_OOB_HOST_WAKE) && defined(CUSTOMER_HW2) && \
1442 	defined(CONFIG_ARCH_APQ8084)
1443 	brcm_pcie_wake.wake_irq = NULL;
1444 	brcm_pcie_wake.data = NULL;
1445 #endif /* BCMPCIE_OOB_HOST_WAKE && CUSTOMR_HW2 && CONFIG_ARCH_APQ8084 */
1446 
1447 	dhdpcie_init_succeeded = FALSE;
1448 
1449 	DHD_MUTEX_UNLOCK();
1450 
1451 	DHD_TRACE(("%s Exit\n", __FUNCTION__));
1452 
1453 	return;
1454 }
1455 
1456 /* Enable Linux Msi */
1457 int
dhdpcie_enable_msi(struct pci_dev * pdev,unsigned int min_vecs,unsigned int max_vecs)1458 dhdpcie_enable_msi(struct pci_dev *pdev, unsigned int min_vecs, unsigned int max_vecs)
1459 {
1460 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
1461 	return pci_alloc_irq_vectors(pdev, min_vecs, max_vecs, PCI_IRQ_MSI);
1462 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
1463 	return pci_enable_msi_range(pdev, min_vecs, max_vecs);
1464 #else
1465 	return pci_enable_msi_block(pdev, max_vecs);
1466 #endif // endif
1467 }
1468 
1469 /* Disable Linux Msi */
1470 void
dhdpcie_disable_msi(struct pci_dev * pdev)1471 dhdpcie_disable_msi(struct pci_dev *pdev)
1472 {
1473 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
1474 	pci_free_irq_vectors(pdev);
1475 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0))
1476 	pci_disable_msi(pdev);
1477 #else
1478 	pci_disable_msi(pdev);
1479 #endif // endif
1480 	return;
1481 }
1482 
1483 /* Request Linux irq */
1484 int
dhdpcie_request_irq(dhdpcie_info_t * dhdpcie_info)1485 dhdpcie_request_irq(dhdpcie_info_t *dhdpcie_info)
1486 {
1487 	dhd_bus_t *bus = dhdpcie_info->bus;
1488 	struct pci_dev *pdev = dhdpcie_info->bus->dev;
1489 	int host_irq_disabled;
1490 
1491 	if (!bus->irq_registered) {
1492 		snprintf(dhdpcie_info->pciname, sizeof(dhdpcie_info->pciname),
1493 			"dhdpcie:%s", pci_name(pdev));
1494 
1495 		if (bus->d2h_intr_method == PCIE_MSI) {
1496 			if (dhdpcie_enable_msi(pdev, 1, 1) < 0) {
1497 				DHD_ERROR(("%s: dhdpcie_enable_msi() failed\n", __FUNCTION__));
1498 				dhdpcie_disable_msi(pdev);
1499 				bus->d2h_intr_method = PCIE_INTX;
1500 			}
1501 		}
1502 
1503 		if (bus->d2h_intr_method == PCIE_MSI)
1504 			printf("%s: MSI enabled\n", __FUNCTION__);
1505 		else
1506 			printf("%s: INTx enabled\n", __FUNCTION__);
1507 
1508 		if (request_irq(pdev->irq, dhdpcie_isr, IRQF_SHARED,
1509 			dhdpcie_info->pciname, bus) < 0) {
1510 			DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__));
1511 			if (bus->d2h_intr_method == PCIE_MSI) {
1512 				dhdpcie_disable_msi(pdev);
1513 			}
1514 			return -1;
1515 		}
1516 		else {
1517 			bus->irq_registered = TRUE;
1518 		}
1519 	} else {
1520 		DHD_ERROR(("%s: PCI IRQ is already registered\n", __FUNCTION__));
1521 	}
1522 
1523 	host_irq_disabled = dhdpcie_irq_disabled(bus);
1524 	if (host_irq_disabled) {
1525 		DHD_ERROR(("%s: PCIe IRQ was disabled(%d), so, enabled it again\n",
1526 			__FUNCTION__, host_irq_disabled));
1527 		dhdpcie_enable_irq(bus);
1528 	}
1529 
1530 	DHD_TRACE(("%s %s\n", __FUNCTION__, dhdpcie_info->pciname));
1531 
1532 	return 0; /* SUCCESS */
1533 }
1534 
1535 /**
1536  *	dhdpcie_get_pcieirq - return pcie irq number to linux-dhd
1537  */
1538 int
dhdpcie_get_pcieirq(struct dhd_bus * bus,unsigned int * irq)1539 dhdpcie_get_pcieirq(struct dhd_bus *bus, unsigned int *irq)
1540 {
1541 	struct pci_dev *pdev = bus->dev;
1542 
1543 	if (!pdev) {
1544 		DHD_ERROR(("%s : bus->dev is NULL\n", __FUNCTION__));
1545 		return -ENODEV;
1546 	}
1547 
1548 	*irq  = pdev->irq;
1549 
1550 	return 0; /* SUCCESS */
1551 }
1552 
1553 #ifdef CONFIG_PHYS_ADDR_T_64BIT
1554 #define PRINTF_RESOURCE	"0x%016llx"
1555 #else
1556 #define PRINTF_RESOURCE	"0x%08x"
1557 #endif // endif
1558 
1559 #ifdef EXYNOS_PCIE_MODULE_PATCH
1560 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1561 extern struct pci_saved_state *bcm_pcie_default_state;
1562 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1563 #endif /* EXYNOS_MODULE_PATCH */
1564 
1565 /*
1566 
1567 Name:  osl_pci_get_resource
1568 
1569 Parametrs:
1570 
1571 1: struct pci_dev *pdev   -- pci device structure
1572 2: pci_res                       -- structure containing pci configuration space values
1573 
1574 Return value:
1575 
1576 int   - Status (TRUE or FALSE)
1577 
1578 Description:
1579 Access PCI configuration space, retrieve  PCI allocated resources , updates in resource structure.
1580 
1581  */
dhdpcie_get_resource(dhdpcie_info_t * dhdpcie_info)1582 int dhdpcie_get_resource(dhdpcie_info_t *dhdpcie_info)
1583 {
1584 	phys_addr_t  bar0_addr, bar1_addr;
1585 	ulong bar1_size;
1586 	struct pci_dev *pdev = NULL;
1587 	pdev = dhdpcie_info->dev;
1588 #ifdef EXYNOS_PCIE_MODULE_PATCH
1589 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1590 	if (bcm_pcie_default_state) {
1591 		pci_load_saved_state(pdev, bcm_pcie_default_state);
1592 		pci_restore_state(pdev);
1593 	}
1594 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1595 #endif /* EXYNOS_MODULE_PATCH */
1596 	do {
1597 		if (pci_enable_device(pdev)) {
1598 			printf("%s: Cannot enable PCI device\n", __FUNCTION__);
1599 			break;
1600 		}
1601 		pci_set_master(pdev);
1602 		bar0_addr = pci_resource_start(pdev, 0);	/* Bar-0 mapped address */
1603 		bar1_addr = pci_resource_start(pdev, 2);	/* Bar-1 mapped address */
1604 
1605 		/* read Bar-1 mapped memory range */
1606 		bar1_size = pci_resource_len(pdev, 2);
1607 
1608 		if ((bar1_size == 0) || (bar1_addr == 0)) {
1609 			printf("%s: BAR1 Not enabled for this device  size(%ld),"
1610 				" addr(0x"PRINTF_RESOURCE")\n",
1611 				__FUNCTION__, bar1_size, bar1_addr);
1612 			goto err;
1613 		}
1614 
1615 		dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE);
1616 		dhdpcie_info->bar1_size =
1617 			(bar1_size > DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE;
1618 		dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->bar1_size);
1619 
1620 		if (!dhdpcie_info->regs || !dhdpcie_info->tcm) {
1621 			DHD_ERROR(("%s:ioremap() failed\n", __FUNCTION__));
1622 			break;
1623 		}
1624 #ifdef EXYNOS_PCIE_MODULE_PATCH
1625 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1626 		if (bcm_pcie_default_state == NULL) {
1627 			pci_save_state(pdev);
1628 			bcm_pcie_default_state = pci_store_saved_state(pdev);
1629 		}
1630 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1631 #endif /* EXYNOS_MODULE_PATCH */
1632 
1633 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1634 	/* Backup PCIe configuration so as to use Wi-Fi on/off process
1635 	 * in case of built in driver
1636 	 */
1637 	pci_save_state(pdev);
1638 	dhdpcie_info->default_state = pci_store_saved_state(pdev);
1639 
1640 	if (dhdpcie_info->default_state == NULL) {
1641 		DHD_ERROR(("%s pci_store_saved_state returns NULL\n",
1642 			__FUNCTION__));
1643 		REG_UNMAP(dhdpcie_info->regs);
1644 		REG_UNMAP(dhdpcie_info->tcm);
1645 		pci_disable_device(pdev);
1646 		break;
1647 	}
1648 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1649 
1650 		DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n",
1651 			__FUNCTION__, dhdpcie_info->regs, bar0_addr));
1652 		DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE" \n",
1653 			__FUNCTION__, dhdpcie_info->tcm, bar1_addr));
1654 
1655 		return 0; /* SUCCESS  */
1656 	} while (0);
1657 err:
1658 	return -1;  /* FAILURE */
1659 }
1660 
dhdpcie_scan_resource(dhdpcie_info_t * dhdpcie_info)1661 int dhdpcie_scan_resource(dhdpcie_info_t *dhdpcie_info)
1662 {
1663 
1664 	DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
1665 
1666 	do {
1667 		/* define it here only!! */
1668 		if (dhdpcie_get_resource (dhdpcie_info)) {
1669 			DHD_ERROR(("%s: Failed to get PCI resources\n", __FUNCTION__));
1670 			break;
1671 		}
1672 		DHD_TRACE(("%s:Exit - SUCCESS \n",
1673 			__FUNCTION__));
1674 
1675 		return 0; /* SUCCESS */
1676 
1677 	} while (0);
1678 
1679 	DHD_TRACE(("%s:Exit - FAILURE \n", __FUNCTION__));
1680 
1681 	return -1; /* FAILURE */
1682 
1683 }
1684 
dhdpcie_dump_resource(dhd_bus_t * bus)1685 void dhdpcie_dump_resource(dhd_bus_t *bus)
1686 {
1687 	dhdpcie_info_t *pch;
1688 
1689 	if (bus == NULL) {
1690 		DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
1691 		return;
1692 	}
1693 
1694 	if (bus->dev == NULL) {
1695 		DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
1696 		return;
1697 	}
1698 
1699 	pch = pci_get_drvdata(bus->dev);
1700 	if (pch == NULL) {
1701 		DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
1702 		return;
1703 	}
1704 
1705 	/* BAR0 */
1706 	DHD_ERROR(("%s: BAR0(VA): 0x%pK, BAR0(PA): "PRINTF_RESOURCE", SIZE: %d\n",
1707 		__FUNCTION__, pch->regs, pci_resource_start(bus->dev, 0),
1708 		DONGLE_REG_MAP_SIZE));
1709 
1710 	/* BAR1 */
1711 	DHD_ERROR(("%s: BAR1(VA): 0x%pK, BAR1(PA): "PRINTF_RESOURCE", SIZE: %d\n",
1712 		__FUNCTION__, pch->tcm, pci_resource_start(bus->dev, 2),
1713 		pch->bar1_size));
1714 }
1715 
dhdpcie_init(struct pci_dev * pdev)1716 int dhdpcie_init(struct pci_dev *pdev)
1717 {
1718 
1719 	osl_t 				*osh = NULL;
1720 	dhd_bus_t 			*bus = NULL;
1721 	dhdpcie_info_t		*dhdpcie_info =  NULL;
1722 	wifi_adapter_info_t	*adapter = NULL;
1723 #ifdef BCMPCIE_OOB_HOST_WAKE
1724 	dhdpcie_os_info_t	*dhdpcie_osinfo = NULL;
1725 #endif /* BCMPCIE_OOB_HOST_WAKE */
1726 #ifdef USE_SMMU_ARCH_MSM
1727 	dhdpcie_smmu_info_t	*dhdpcie_smmu_info = NULL;
1728 #endif /* USE_SMMU_ARCH_MSM */
1729 	int ret = 0;
1730 
1731 	do {
1732 		/* osl attach */
1733 		if (!(osh = osl_attach(pdev, PCI_BUS, FALSE))) {
1734 			DHD_ERROR(("%s: osl_attach failed\n", __FUNCTION__));
1735 			break;
1736 		}
1737 
1738 		/* initialize static buffer */
1739 		adapter = dhd_wifi_platform_get_adapter(PCI_BUS, pdev->bus->number,
1740 			PCI_SLOT(pdev->devfn));
1741 		if (adapter != NULL) {
1742 			DHD_ERROR(("%s: found adapter info '%s'\n", __FUNCTION__, adapter->name));
1743 			adapter->bus_type = PCI_BUS;
1744 			adapter->bus_num = pdev->bus->number;
1745 			adapter->slot_num = PCI_SLOT(pdev->devfn);
1746 			adapter->pci_dev = pdev;
1747 		} else
1748 			DHD_ERROR(("%s: can't find adapter info for this chip\n", __FUNCTION__));
1749 		osl_static_mem_init(osh, adapter);
1750 
1751 		/* Set ACP coherence flag */
1752 		if (OSL_ACP_WAR_ENAB() || OSL_ARCH_IS_COHERENT())
1753 			osl_flag_set(osh, OSL_ACP_COHERENCE);
1754 
1755 		/*  allocate linux spcific pcie structure here */
1756 		if (!(dhdpcie_info = MALLOC(osh, sizeof(dhdpcie_info_t)))) {
1757 			DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
1758 			break;
1759 		}
1760 		bzero(dhdpcie_info, sizeof(dhdpcie_info_t));
1761 		dhdpcie_info->osh = osh;
1762 		dhdpcie_info->dev = pdev;
1763 
1764 #ifdef BCMPCIE_OOB_HOST_WAKE
1765 		/* allocate OS speicific structure */
1766 		dhdpcie_osinfo = MALLOC(osh, sizeof(dhdpcie_os_info_t));
1767 		if (dhdpcie_osinfo == NULL) {
1768 			DHD_ERROR(("%s: MALLOC of dhdpcie_os_info_t failed\n",
1769 				__FUNCTION__));
1770 			break;
1771 		}
1772 		bzero(dhdpcie_osinfo, sizeof(dhdpcie_os_info_t));
1773 		dhdpcie_info->os_cxt = (void *)dhdpcie_osinfo;
1774 
1775 		/* Initialize host wake IRQ */
1776 		spin_lock_init(&dhdpcie_osinfo->oob_irq_spinlock);
1777 		/* Get customer specific host wake IRQ parametres: IRQ number as IRQ type */
1778 		dhdpcie_osinfo->oob_irq_num = wifi_platform_get_irq_number(adapter,
1779 			&dhdpcie_osinfo->oob_irq_flags);
1780 		if (dhdpcie_osinfo->oob_irq_num < 0) {
1781 			DHD_ERROR(("%s: Host OOB irq is not defined\n", __FUNCTION__));
1782 		}
1783 #endif /* BCMPCIE_OOB_HOST_WAKE */
1784 
1785 #ifdef USE_SMMU_ARCH_MSM
1786 		/* allocate private structure for using SMMU */
1787 		dhdpcie_smmu_info = MALLOC(osh, sizeof(dhdpcie_smmu_info_t));
1788 		if (dhdpcie_smmu_info == NULL) {
1789 			DHD_ERROR(("%s: MALLOC of dhdpcie_smmu_info_t failed\n",
1790 				__FUNCTION__));
1791 			break;
1792 		}
1793 		bzero(dhdpcie_smmu_info, sizeof(dhdpcie_smmu_info_t));
1794 		dhdpcie_info->smmu_cxt = (void *)dhdpcie_smmu_info;
1795 
1796 		/* Initialize smmu structure */
1797 		if (dhdpcie_smmu_init(pdev, dhdpcie_info->smmu_cxt) < 0) {
1798 			DHD_ERROR(("%s: Failed to initialize SMMU\n",
1799 				__FUNCTION__));
1800 			break;
1801 		}
1802 #endif /* USE_SMMU_ARCH_MSM */
1803 
1804 #ifdef DHD_WAKE_STATUS
1805 		/* Initialize pcie_lock */
1806 		spin_lock_init(&dhdpcie_info->pcie_lock);
1807 #endif /* DHD_WAKE_STATUS */
1808 
1809 		/* Find the PCI resources, verify the  */
1810 		/* vendor and device ID, map BAR regions and irq,  update in structures */
1811 		if (dhdpcie_scan_resource(dhdpcie_info)) {
1812 			DHD_ERROR(("%s: dhd_Scan_PCI_Res failed\n", __FUNCTION__));
1813 
1814 			break;
1815 		}
1816 
1817 		/* Bus initialization */
1818 		ret = dhdpcie_bus_attach(osh, &bus, dhdpcie_info->regs, dhdpcie_info->tcm, pdev, adapter);
1819 		if (ret != BCME_OK) {
1820 			DHD_ERROR(("%s:dhdpcie_bus_attach() failed\n", __FUNCTION__));
1821 			break;
1822 		}
1823 
1824 		dhdpcie_info->bus = bus;
1825 		bus->is_linkdown = 0;
1826 		bus->no_bus_init = FALSE;
1827 		bus->cto_triggered = 0;
1828 
1829 		bus->rc_dev = NULL;
1830 
1831 		/* Get RC Device Handle */
1832 		if (bus->dev->bus) {
1833 			/* self member of structure pci_bus is bridge device as seen by parent */
1834 			bus->rc_dev = bus->dev->bus->self;
1835 			if (bus->rc_dev)
1836 				DHD_ERROR(("%s: rc_dev from dev->bus->self (%x:%x) is %pK\n", __FUNCTION__,
1837 					bus->rc_dev->vendor, bus->rc_dev->device, bus->rc_dev));
1838 			else
1839 				DHD_ERROR(("%s: bus->dev->bus->self is NULL\n", __FUNCTION__));
1840 		} else {
1841 			DHD_ERROR(("%s: unable to get rc_dev as dev->bus is NULL\n", __FUNCTION__));
1842 		}
1843 
1844 		/* if rc_dev is still NULL, try to get from vendor/device IDs */
1845 		if (bus->rc_dev == NULL) {
1846 			bus->rc_dev = pci_get_device(PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, NULL);
1847 			DHD_ERROR(("%s: rc_dev from pci_get_device (%x:%x) is %p\n", __FUNCTION__,
1848 				PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, bus->rc_dev));
1849 		}
1850 
1851 		bus->rc_ep_aspm_cap = dhd_bus_is_rc_ep_aspm_capable(bus);
1852 		bus->rc_ep_l1ss_cap = dhd_bus_is_rc_ep_l1ss_capable(bus);
1853 		DHD_ERROR(("%s: rc_ep_aspm_cap: %d rc_ep_l1ss_cap: %d\n",
1854 			__FUNCTION__, bus->rc_ep_aspm_cap, bus->rc_ep_l1ss_cap));
1855 
1856 #ifdef FORCE_TPOWERON
1857 		if (dhdpcie_chip_req_forced_tpoweron(bus)) {
1858 			dhd_bus_set_tpoweron(bus, tpoweron_scale);
1859 		}
1860 #endif /* FORCE_TPOWERON */
1861 
1862 #if defined(BCMPCIE_OOB_HOST_WAKE) && defined(CUSTOMER_HW2) && \
1863 	defined(CONFIG_ARCH_APQ8084)
1864 		brcm_pcie_wake.wake_irq = wlan_oob_irq;
1865 		brcm_pcie_wake.data = bus;
1866 #endif /* BCMPCIE_OOB_HOST_WAKE && CUSTOMR_HW2 && CONFIG_ARCH_APQ8084 */
1867 
1868 #ifdef DONGLE_ENABLE_ISOLATION
1869 		bus->dhd->dongle_isolation = TRUE;
1870 #endif /* DONGLE_ENABLE_ISOLATION */
1871 
1872 		if (bus->intr) {
1873 			/* Register interrupt callback, but mask it (not operational yet). */
1874 			DHD_INTR(("%s: Registering and masking interrupts\n", __FUNCTION__));
1875 			dhdpcie_bus_intr_disable(bus);
1876 
1877 			if (dhdpcie_request_irq(dhdpcie_info)) {
1878 				DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__));
1879 				break;
1880 			}
1881 		} else {
1882 			bus->pollrate = 1;
1883 			DHD_INFO(("%s: PCIe interrupt function is NOT registered "
1884 				"due to polling mode\n", __FUNCTION__));
1885 		}
1886 
1887 #if defined(BCM_REQUEST_FW)
1888 		if (dhd_bus_download_firmware(bus, osh, NULL, NULL) < 0) {
1889 		DHD_ERROR(("%s: failed to download firmware\n", __FUNCTION__));
1890 		}
1891 		bus->nv_path = NULL;
1892 		bus->fw_path = NULL;
1893 #endif /* BCM_REQUEST_FW */
1894 
1895 		/* set private data for pci_dev */
1896 		pci_set_drvdata(pdev, dhdpcie_info);
1897 
1898 #if defined(BCMDHD_MODULAR) && defined(INSMOD_FW_LOAD)
1899 		if (1)
1900 #else
1901 		if (dhd_download_fw_on_driverload)
1902 #endif
1903 		{
1904 			if (dhd_bus_start(bus->dhd)) {
1905 				DHD_ERROR(("%s: dhd_bud_start() failed\n", __FUNCTION__));
1906 				if (!allow_delay_fwdl)
1907 					break;
1908 			}
1909 		} else {
1910 			/* Set ramdom MAC address during boot time */
1911 			get_random_bytes(&bus->dhd->mac.octet[3], 3);
1912 			/* Adding BRCM OUI */
1913 			bus->dhd->mac.octet[0] = 0;
1914 			bus->dhd->mac.octet[1] = 0x90;
1915 			bus->dhd->mac.octet[2] = 0x4C;
1916 		}
1917 
1918 		/* Attach to the OS network interface */
1919 		DHD_TRACE(("%s(): Calling dhd_register_if() \n", __FUNCTION__));
1920 		if (dhd_attach_net(bus->dhd, TRUE)) {
1921 			DHD_ERROR(("%s(): ERROR.. dhd_register_if() failed\n", __FUNCTION__));
1922 			break;
1923 		}
1924 
1925 		dhdpcie_init_succeeded = TRUE;
1926 
1927 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1928 		pm_runtime_set_autosuspend_delay(&pdev->dev, AUTO_SUSPEND_TIMEOUT);
1929 		pm_runtime_use_autosuspend(&pdev->dev);
1930 		atomic_set(&bus->dhd->block_bus, FALSE);
1931 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1932 
1933 #if defined(MULTIPLE_SUPPLICANT)
1934 		wl_android_post_init(); // terence 20120530: fix critical section in dhd_open and dhdsdio_probe
1935 #endif /* MULTIPLE_SUPPLICANT */
1936 
1937 		DHD_TRACE(("%s:Exit - SUCCESS \n", __FUNCTION__));
1938 		return 0;  /* return  SUCCESS  */
1939 
1940 	} while (0);
1941 	/* reverse the initialization in order in case of error */
1942 
1943 	if (bus)
1944 		dhdpcie_bus_release(bus);
1945 
1946 #ifdef BCMPCIE_OOB_HOST_WAKE
1947 	if (dhdpcie_osinfo) {
1948 		MFREE(osh, dhdpcie_osinfo, sizeof(dhdpcie_os_info_t));
1949 	}
1950 #endif /* BCMPCIE_OOB_HOST_WAKE */
1951 
1952 #ifdef USE_SMMU_ARCH_MSM
1953 	if (dhdpcie_smmu_info) {
1954 		MFREE(osh, dhdpcie_smmu_info, sizeof(dhdpcie_smmu_info_t));
1955 		dhdpcie_info->smmu_cxt = NULL;
1956 	}
1957 #endif /* USE_SMMU_ARCH_MSM */
1958 
1959 	if (dhdpcie_info)
1960 		dhdpcie_detach(dhdpcie_info);
1961 	pci_disable_device(pdev);
1962 	if (osh)
1963 		osl_detach(osh);
1964 	if (adapter != NULL) {
1965 		adapter->bus_type = -1;
1966 		adapter->bus_num = -1;
1967 		adapter->slot_num = -1;
1968 	}
1969 
1970 	dhdpcie_init_succeeded = FALSE;
1971 
1972 	DHD_TRACE(("%s:Exit - FAILURE \n", __FUNCTION__));
1973 
1974 	return -1; /* return FAILURE  */
1975 }
1976 
1977 /* Free Linux irq */
1978 void
dhdpcie_free_irq(dhd_bus_t * bus)1979 dhdpcie_free_irq(dhd_bus_t *bus)
1980 {
1981 	struct pci_dev *pdev = NULL;
1982 
1983 	DHD_TRACE(("%s: freeing up the IRQ\n", __FUNCTION__));
1984 	if (bus) {
1985 		pdev = bus->dev;
1986 		if (bus->irq_registered) {
1987 #if defined(SET_PCIE_IRQ_CPU_CORE) && defined(CONFIG_ARCH_SM8150)
1988 			/* clean up the affinity_hint before
1989 			 * the unregistration of PCIe irq
1990 			 */
1991 			(void)irq_set_affinity_hint(pdev->irq, NULL);
1992 #endif /* SET_PCIE_IRQ_CPU_CORE && CONFIG_ARCH_SM8150 */
1993 			free_irq(pdev->irq, bus);
1994 			bus->irq_registered = FALSE;
1995 			if (bus->d2h_intr_method == PCIE_MSI) {
1996 				dhdpcie_disable_msi(pdev);
1997 			}
1998 		} else {
1999 			DHD_ERROR(("%s: PCIe IRQ is not registered\n", __FUNCTION__));
2000 		}
2001 	}
2002 	DHD_TRACE(("%s: Exit\n", __FUNCTION__));
2003 	return;
2004 }
2005 
2006 /*
2007 
2008 Name:  dhdpcie_isr
2009 
2010 Parametrs:
2011 
2012 1: IN int irq   -- interrupt vector
2013 2: IN void *arg      -- handle to private data structure
2014 
2015 Return value:
2016 
2017 Status (TRUE or FALSE)
2018 
2019 Description:
2020 Interrupt Service routine checks for the status register,
2021 disable interrupt and queue DPC if mail box interrupts are raised.
2022 */
2023 
2024 irqreturn_t
dhdpcie_isr(int irq,void * arg)2025 dhdpcie_isr(int irq, void *arg)
2026 {
2027 	dhd_bus_t *bus = (dhd_bus_t*)arg;
2028 	bus->isr_entry_time = OSL_LOCALTIME_NS();
2029 	if (!dhdpcie_bus_isr(bus)) {
2030 		DHD_LOG_MEM(("%s: dhdpcie_bus_isr returns with FALSE\n", __FUNCTION__));
2031 	}
2032 	bus->isr_exit_time = OSL_LOCALTIME_NS();
2033 	return IRQ_HANDLED;
2034 }
2035 
2036 int
dhdpcie_disable_irq_nosync(dhd_bus_t * bus)2037 dhdpcie_disable_irq_nosync(dhd_bus_t *bus)
2038 {
2039 	struct pci_dev *dev;
2040 	if ((bus == NULL) || (bus->dev == NULL)) {
2041 		DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__));
2042 		return BCME_ERROR;
2043 	}
2044 
2045 	dev = bus->dev;
2046 	disable_irq_nosync(dev->irq);
2047 	return BCME_OK;
2048 }
2049 
2050 int
dhdpcie_disable_irq(dhd_bus_t * bus)2051 dhdpcie_disable_irq(dhd_bus_t *bus)
2052 {
2053 	struct pci_dev *dev;
2054 	if ((bus == NULL) || (bus->dev == NULL)) {
2055 		DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__));
2056 		return BCME_ERROR;
2057 	}
2058 
2059 	dev = bus->dev;
2060 	disable_irq(dev->irq);
2061 	return BCME_OK;
2062 }
2063 
2064 int
dhdpcie_enable_irq(dhd_bus_t * bus)2065 dhdpcie_enable_irq(dhd_bus_t *bus)
2066 {
2067 	struct pci_dev *dev;
2068 	if ((bus == NULL) || (bus->dev == NULL)) {
2069 		DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__));
2070 		return BCME_ERROR;
2071 	}
2072 
2073 	dev = bus->dev;
2074 	enable_irq(dev->irq);
2075 	return BCME_OK;
2076 }
2077 
2078 int
dhdpcie_irq_disabled(dhd_bus_t * bus)2079 dhdpcie_irq_disabled(dhd_bus_t *bus)
2080 {
2081 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
2082 	struct irq_desc *desc = irq_to_desc(bus->dev->irq);
2083 	/* depth will be zero, if enabled */
2084 	return desc->depth;
2085 #else
2086 	/* return ERROR by default as there is no support for lower versions */
2087 	return BCME_ERROR;
2088 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
2089 }
2090 
2091 int
dhdpcie_start_host_pcieclock(dhd_bus_t * bus)2092 dhdpcie_start_host_pcieclock(dhd_bus_t *bus)
2093 {
2094 	int ret = 0;
2095 #ifdef CONFIG_ARCH_MSM
2096 #endif /* CONFIG_ARCH_MSM */
2097 	DHD_TRACE(("%s Enter:\n", __FUNCTION__));
2098 
2099 	if (bus == NULL) {
2100 		return BCME_ERROR;
2101 	}
2102 
2103 	if (bus->dev == NULL) {
2104 		return BCME_ERROR;
2105 	}
2106 
2107 #ifdef CONFIG_ARCH_MSM
2108 	ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number,
2109 		bus->dev, NULL, 0);
2110 	if (ret) {
2111 		DHD_ERROR(("%s Failed to bring up PCIe link\n", __FUNCTION__));
2112 		goto done;
2113 	}
2114 
2115 done:
2116 #endif /* CONFIG_ARCH_MSM */
2117 	DHD_TRACE(("%s Exit:\n", __FUNCTION__));
2118 	return ret;
2119 }
2120 
2121 int
dhdpcie_stop_host_pcieclock(dhd_bus_t * bus)2122 dhdpcie_stop_host_pcieclock(dhd_bus_t *bus)
2123 {
2124 	int ret = 0;
2125 #ifdef CONFIG_ARCH_MSM
2126 #endif /* CONFIG_ARCH_MSM */
2127 
2128 	DHD_TRACE(("%s Enter:\n", __FUNCTION__));
2129 
2130 	if (bus == NULL) {
2131 		return BCME_ERROR;
2132 	}
2133 
2134 	if (bus->dev == NULL) {
2135 		return BCME_ERROR;
2136 	}
2137 
2138 #ifdef CONFIG_ARCH_MSM
2139 	ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number,
2140 		bus->dev, NULL, 0);
2141 	if (ret) {
2142 		DHD_ERROR(("Failed to stop PCIe link\n"));
2143 		goto done;
2144 	}
2145 done:
2146 #endif /* CONFIG_ARCH_MSM */
2147 	DHD_TRACE(("%s Exit:\n", __FUNCTION__));
2148 	return ret;
2149 }
2150 
2151 int
dhdpcie_disable_device(dhd_bus_t * bus)2152 dhdpcie_disable_device(dhd_bus_t *bus)
2153 {
2154 	DHD_TRACE(("%s Enter:\n", __FUNCTION__));
2155 
2156 	if (bus == NULL) {
2157 		return BCME_ERROR;
2158 	}
2159 
2160 	if (bus->dev == NULL) {
2161 		return BCME_ERROR;
2162 	}
2163 
2164 	if (pci_is_enabled(bus->dev))
2165 		pci_disable_device(bus->dev);
2166 
2167 	return 0;
2168 }
2169 
2170 int
dhdpcie_enable_device(dhd_bus_t * bus)2171 dhdpcie_enable_device(dhd_bus_t *bus)
2172 {
2173 	int ret = BCME_ERROR;
2174 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
2175 	dhdpcie_info_t *pch;
2176 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
2177 
2178 	DHD_TRACE(("%s Enter:\n", __FUNCTION__));
2179 
2180 	if (bus == NULL) {
2181 		return BCME_ERROR;
2182 	}
2183 
2184 	if (bus->dev == NULL) {
2185 		return BCME_ERROR;
2186 	}
2187 
2188 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
2189 	pch = pci_get_drvdata(bus->dev);
2190 	if (pch == NULL) {
2191 		return BCME_ERROR;
2192 	}
2193 
2194 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) && (LINUX_VERSION_CODE < \
2195 	KERNEL_VERSION(3, 19, 0)) && !defined(CONFIG_SOC_EXYNOS8890)
2196 	/* Updated with pci_load_and_free_saved_state to compatible
2197 	 * with Kernel version 3.14.0 to 3.18.41.
2198 	 */
2199 	pci_load_and_free_saved_state(bus->dev, &pch->default_state);
2200 	pch->default_state = pci_store_saved_state(bus->dev);
2201 #else
2202 	pci_load_saved_state(bus->dev, pch->default_state);
2203 #endif /* LINUX_VERSION >= 3.14.0 && LINUX_VERSION < 3.19.0 && !CONFIG_SOC_EXYNOS8890 */
2204 
2205 	/* Check if Device ID is valid */
2206 	if (bus->dev->state_saved) {
2207 		uint32 vid, saved_vid;
2208 		pci_read_config_dword(bus->dev, PCI_CFG_VID, &vid);
2209 		saved_vid = bus->dev->saved_config_space[PCI_CFG_VID];
2210 		if (vid != saved_vid) {
2211 			DHD_ERROR(("%s: VID(0x%x) is different from saved VID(0x%x) "
2212 				"Skip the bus init\n", __FUNCTION__, vid, saved_vid));
2213 			bus->no_bus_init = TRUE;
2214 			/* Check if the PCIe link is down */
2215 			if (vid == (uint32)-1) {
2216 				bus->is_linkdown = 1;
2217 			}
2218 			return BCME_ERROR;
2219 		}
2220 	}
2221 
2222 	pci_restore_state(bus->dev);
2223 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) */
2224 
2225 	ret = pci_enable_device(bus->dev);
2226 	if (ret) {
2227 		pci_disable_device(bus->dev);
2228 	} else {
2229 		pci_set_master(bus->dev);
2230 	}
2231 
2232 	return ret;
2233 }
2234 
2235 int
dhdpcie_alloc_resource(dhd_bus_t * bus)2236 dhdpcie_alloc_resource(dhd_bus_t *bus)
2237 {
2238 	dhdpcie_info_t *dhdpcie_info;
2239 	phys_addr_t bar0_addr, bar1_addr;
2240 	ulong bar1_size;
2241 
2242 	do {
2243 		if (bus == NULL) {
2244 			DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2245 			break;
2246 		}
2247 
2248 		if (bus->dev == NULL) {
2249 			DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2250 			break;
2251 		}
2252 
2253 		dhdpcie_info = pci_get_drvdata(bus->dev);
2254 		if (dhdpcie_info == NULL) {
2255 			DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__));
2256 			break;
2257 		}
2258 
2259 		bar0_addr = pci_resource_start(bus->dev, 0);	/* Bar-0 mapped address */
2260 		bar1_addr = pci_resource_start(bus->dev, 2);	/* Bar-1 mapped address */
2261 
2262 		/* read Bar-1 mapped memory range */
2263 		bar1_size = pci_resource_len(bus->dev, 2);
2264 
2265 		if ((bar1_size == 0) || (bar1_addr == 0)) {
2266 			printf("%s: BAR1 Not enabled for this device size(%ld),"
2267 				" addr(0x"PRINTF_RESOURCE")\n",
2268 				__FUNCTION__, bar1_size, bar1_addr);
2269 			break;
2270 		}
2271 
2272 		dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE);
2273 		if (!dhdpcie_info->regs) {
2274 			DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__));
2275 			break;
2276 		}
2277 
2278 		bus->regs = dhdpcie_info->regs;
2279 		dhdpcie_info->bar1_size =
2280 			(bar1_size > DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE;
2281 		dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->bar1_size);
2282 		if (!dhdpcie_info->tcm) {
2283 			DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__));
2284 			REG_UNMAP(dhdpcie_info->regs);
2285 			bus->regs = NULL;
2286 			break;
2287 		}
2288 
2289 		bus->tcm = dhdpcie_info->tcm;
2290 
2291 		DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n",
2292 			__FUNCTION__, dhdpcie_info->regs, bar0_addr));
2293 		DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE" \n",
2294 			__FUNCTION__, dhdpcie_info->tcm, bar1_addr));
2295 
2296 		return 0;
2297 	} while (0);
2298 
2299 	return BCME_ERROR;
2300 }
2301 
2302 void
dhdpcie_free_resource(dhd_bus_t * bus)2303 dhdpcie_free_resource(dhd_bus_t *bus)
2304 {
2305 	dhdpcie_info_t *dhdpcie_info;
2306 
2307 	if (bus == NULL) {
2308 		DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2309 		return;
2310 	}
2311 
2312 	if (bus->dev == NULL) {
2313 		DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2314 		return;
2315 	}
2316 
2317 	dhdpcie_info = pci_get_drvdata(bus->dev);
2318 	if (dhdpcie_info == NULL) {
2319 		DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__));
2320 		return;
2321 	}
2322 
2323 	if (bus->regs) {
2324 		REG_UNMAP(dhdpcie_info->regs);
2325 		bus->regs = NULL;
2326 	}
2327 
2328 	if (bus->tcm) {
2329 		REG_UNMAP(dhdpcie_info->tcm);
2330 		bus->tcm = NULL;
2331 	}
2332 }
2333 
2334 int
dhdpcie_bus_request_irq(struct dhd_bus * bus)2335 dhdpcie_bus_request_irq(struct dhd_bus *bus)
2336 {
2337 	dhdpcie_info_t *dhdpcie_info;
2338 	int ret = 0;
2339 
2340 	if (bus == NULL) {
2341 		DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2342 		return BCME_ERROR;
2343 	}
2344 
2345 	if (bus->dev == NULL) {
2346 		DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2347 		return BCME_ERROR;
2348 	}
2349 
2350 	dhdpcie_info = pci_get_drvdata(bus->dev);
2351 	if (dhdpcie_info == NULL) {
2352 		DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__));
2353 		return BCME_ERROR;
2354 	}
2355 
2356 	if (bus->intr) {
2357 		/* Register interrupt callback, but mask it (not operational yet). */
2358 		DHD_INTR(("%s: Registering and masking interrupts\n", __FUNCTION__));
2359 		dhdpcie_bus_intr_disable(bus);
2360 		ret = dhdpcie_request_irq(dhdpcie_info);
2361 		if (ret) {
2362 			DHD_ERROR(("%s: request_irq() failed, ret=%d\n",
2363 				__FUNCTION__, ret));
2364 			return ret;
2365 		}
2366 	}
2367 
2368 	return ret;
2369 }
2370 
2371 #ifdef BCMPCIE_OOB_HOST_WAKE
2372 #ifdef CONFIG_BCMDHD_GET_OOB_STATE
2373 extern int dhd_get_wlan_oob_gpio(void);
2374 #endif /* CONFIG_BCMDHD_GET_OOB_STATE */
2375 
dhdpcie_get_oob_irq_level(void)2376 int dhdpcie_get_oob_irq_level(void)
2377 {
2378 	int gpio_level;
2379 
2380 #ifdef CONFIG_BCMDHD_GET_OOB_STATE
2381 	gpio_level = dhd_get_wlan_oob_gpio();
2382 #else
2383 	gpio_level = BCME_UNSUPPORTED;
2384 #endif /* CONFIG_BCMDHD_GET_OOB_STATE */
2385 	return gpio_level;
2386 }
2387 
dhdpcie_get_oob_irq_status(struct dhd_bus * bus)2388 int dhdpcie_get_oob_irq_status(struct dhd_bus *bus)
2389 {
2390 	dhdpcie_info_t *pch;
2391 	dhdpcie_os_info_t *dhdpcie_osinfo;
2392 
2393 	if (bus == NULL) {
2394 		DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2395 		return 0;
2396 	}
2397 
2398 	if (bus->dev == NULL) {
2399 		DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2400 		return 0;
2401 	}
2402 
2403 	pch = pci_get_drvdata(bus->dev);
2404 	if (pch == NULL) {
2405 		DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
2406 		return 0;
2407 	}
2408 
2409 	dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
2410 
2411 	return dhdpcie_osinfo ? dhdpcie_osinfo->oob_irq_enabled : 0;
2412 }
2413 
dhdpcie_get_oob_irq_num(struct dhd_bus * bus)2414 int dhdpcie_get_oob_irq_num(struct dhd_bus *bus)
2415 {
2416 	dhdpcie_info_t *pch;
2417 	dhdpcie_os_info_t *dhdpcie_osinfo;
2418 
2419 	if (bus == NULL) {
2420 		DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2421 		return 0;
2422 	}
2423 
2424 	if (bus->dev == NULL) {
2425 		DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2426 		return 0;
2427 	}
2428 
2429 	pch = pci_get_drvdata(bus->dev);
2430 	if (pch == NULL) {
2431 		DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
2432 		return 0;
2433 	}
2434 
2435 	dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
2436 
2437 	return dhdpcie_osinfo ? dhdpcie_osinfo->oob_irq_num : 0;
2438 }
2439 
dhdpcie_oob_intr_set(dhd_bus_t * bus,bool enable)2440 void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable)
2441 {
2442 	unsigned long flags;
2443 	dhdpcie_info_t *pch;
2444 	dhdpcie_os_info_t *dhdpcie_osinfo;
2445 
2446 	if (bus == NULL) {
2447 		DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2448 		return;
2449 	}
2450 
2451 	if (bus->dev == NULL) {
2452 		DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2453 		return;
2454 	}
2455 
2456 	pch = pci_get_drvdata(bus->dev);
2457 	if (pch == NULL) {
2458 		DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
2459 		return;
2460 	}
2461 
2462 	dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
2463 	spin_lock_irqsave(&dhdpcie_osinfo->oob_irq_spinlock, flags);
2464 	if ((dhdpcie_osinfo->oob_irq_enabled != enable) &&
2465 		(dhdpcie_osinfo->oob_irq_num > 0)) {
2466 		if (enable) {
2467 			enable_irq(dhdpcie_osinfo->oob_irq_num);
2468 			bus->oob_intr_enable_count++;
2469 			bus->last_oob_irq_enable_time = OSL_LOCALTIME_NS();
2470 		} else {
2471 			disable_irq_nosync(dhdpcie_osinfo->oob_irq_num);
2472 			bus->oob_intr_disable_count++;
2473 			bus->last_oob_irq_disable_time = OSL_LOCALTIME_NS();
2474 		}
2475 		dhdpcie_osinfo->oob_irq_enabled = enable;
2476 	}
2477 	spin_unlock_irqrestore(&dhdpcie_osinfo->oob_irq_spinlock, flags);
2478 }
2479 
wlan_oob_irq(int irq,void * data)2480 static irqreturn_t wlan_oob_irq(int irq, void *data)
2481 {
2482 	dhd_bus_t *bus;
2483 	unsigned long flags_bus;
2484 	DHD_TRACE(("%s: IRQ Triggered\n", __FUNCTION__));
2485 	bus = (dhd_bus_t *)data;
2486 	dhdpcie_oob_intr_set(bus, FALSE);
2487 	bus->last_oob_irq_time = OSL_LOCALTIME_NS();
2488 	bus->oob_intr_count++;
2489 #ifdef DHD_WAKE_STATUS
2490 	{
2491 		bcmpcie_set_get_wake(bus, 1);
2492 	}
2493 #endif /* DHD_WAKE_STATUS */
2494 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
2495 	dhd_bus_wakeup_work(bus->dhd);
2496 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
2497 	DHD_BUS_LOCK(bus->bus_lock, flags_bus);
2498 	/* Hold wakelock if bus_low_power_state is
2499 	 * DHD_BUS_D3_INFORM_SENT OR DHD_BUS_D3_ACK_RECIEVED
2500 	 */
2501 	if (bus->dhd->up && bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
2502 		DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(bus->dhd, OOB_WAKE_LOCK_TIMEOUT);
2503 	}
2504 	DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
2505 	return IRQ_HANDLED;
2506 }
2507 
dhdpcie_oob_intr_register(dhd_bus_t * bus)2508 int dhdpcie_oob_intr_register(dhd_bus_t *bus)
2509 {
2510 	int err = 0;
2511 	dhdpcie_info_t *pch;
2512 	dhdpcie_os_info_t *dhdpcie_osinfo;
2513 
2514 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2515 	if (bus == NULL) {
2516 		DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2517 		return -EINVAL;
2518 	}
2519 
2520 	if (bus->dev == NULL) {
2521 		DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2522 		return -EINVAL;
2523 	}
2524 
2525 	pch = pci_get_drvdata(bus->dev);
2526 	if (pch == NULL) {
2527 		DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
2528 		return -EINVAL;
2529 	}
2530 
2531 	dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
2532 	if (dhdpcie_osinfo->oob_irq_registered) {
2533 		DHD_ERROR(("%s: irq is already registered\n", __FUNCTION__));
2534 		return -EBUSY;
2535 	}
2536 
2537 	if (dhdpcie_osinfo->oob_irq_num > 0) {
2538 		printf("%s OOB irq=%d flags=0x%X\n", __FUNCTION__,
2539 			(int)dhdpcie_osinfo->oob_irq_num,
2540 			(int)dhdpcie_osinfo->oob_irq_flags);
2541 		err = request_irq(dhdpcie_osinfo->oob_irq_num, wlan_oob_irq,
2542 			dhdpcie_osinfo->oob_irq_flags, "dhdpcie_host_wake",
2543 			bus);
2544 		if (err) {
2545 			DHD_ERROR(("%s: request_irq failed with %d\n",
2546 				__FUNCTION__, err));
2547 			return err;
2548 		}
2549 #if defined(DISABLE_WOWLAN)
2550 		printf("%s: disable_irq_wake\n", __FUNCTION__);
2551 		dhdpcie_osinfo->oob_irq_wake_enabled = FALSE;
2552 #else
2553 		printf("%s: enable_irq_wake\n", __FUNCTION__);
2554 		err = enable_irq_wake(dhdpcie_osinfo->oob_irq_num);
2555 		if (!err) {
2556 			dhdpcie_osinfo->oob_irq_wake_enabled = TRUE;
2557 		} else
2558 			printf("%s: enable_irq_wake failed with %d\n", __FUNCTION__, err);
2559 #endif
2560 		dhdpcie_osinfo->oob_irq_enabled = TRUE;
2561 	}
2562 
2563 	dhdpcie_osinfo->oob_irq_registered = TRUE;
2564 
2565 	return 0;
2566 }
2567 
dhdpcie_oob_intr_unregister(dhd_bus_t * bus)2568 void dhdpcie_oob_intr_unregister(dhd_bus_t *bus)
2569 {
2570 	int err = 0;
2571 	dhdpcie_info_t *pch;
2572 	dhdpcie_os_info_t *dhdpcie_osinfo;
2573 
2574 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2575 	if (bus == NULL) {
2576 		DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2577 		return;
2578 	}
2579 
2580 	if (bus->dev == NULL) {
2581 		DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2582 		return;
2583 	}
2584 
2585 	pch = pci_get_drvdata(bus->dev);
2586 	if (pch == NULL) {
2587 		DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
2588 		return;
2589 	}
2590 
2591 	dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
2592 	if (!dhdpcie_osinfo->oob_irq_registered) {
2593 		DHD_ERROR(("%s: irq is not registered\n", __FUNCTION__));
2594 		return;
2595 	}
2596 	if (dhdpcie_osinfo->oob_irq_num > 0) {
2597 		if (dhdpcie_osinfo->oob_irq_wake_enabled) {
2598 			err = disable_irq_wake(dhdpcie_osinfo->oob_irq_num);
2599 			if (!err) {
2600 				dhdpcie_osinfo->oob_irq_wake_enabled = FALSE;
2601 			}
2602 		}
2603 		if (dhdpcie_osinfo->oob_irq_enabled) {
2604 			disable_irq(dhdpcie_osinfo->oob_irq_num);
2605 			dhdpcie_osinfo->oob_irq_enabled = FALSE;
2606 		}
2607 		free_irq(dhdpcie_osinfo->oob_irq_num, bus);
2608 	}
2609 	dhdpcie_osinfo->oob_irq_registered = FALSE;
2610 }
2611 #endif /* BCMPCIE_OOB_HOST_WAKE */
2612 
dhd_bus_to_dev(dhd_bus_t * bus)2613 struct device * dhd_bus_to_dev(dhd_bus_t *bus)
2614 {
2615 	struct pci_dev *pdev;
2616 	pdev = bus->dev;
2617 
2618 	if (pdev)
2619 		return &pdev->dev;
2620 	else
2621 		return NULL;
2622 }
2623 
2624 #define KIRQ_PRINT_BUF_LEN 256
2625 
2626 void
dhd_print_kirqstats(dhd_pub_t * dhd,unsigned int irq_num)2627 dhd_print_kirqstats(dhd_pub_t *dhd, unsigned int irq_num)
2628 {
2629 	unsigned long flags = 0;
2630 	struct irq_desc *desc;
2631 	int i;          /* cpu iterator */
2632 	struct bcmstrbuf strbuf;
2633 	char tmp_buf[KIRQ_PRINT_BUF_LEN];
2634 
2635 	desc = irq_to_desc(irq_num);
2636 	if (!desc) {
2637 		DHD_ERROR(("%s : irqdesc is not found \n", __FUNCTION__));
2638 		return;
2639 	}
2640 	bcm_binit(&strbuf, tmp_buf, KIRQ_PRINT_BUF_LEN);
2641 	raw_spin_lock_irqsave(&desc->lock, flags);
2642 	bcm_bprintf(&strbuf, "dhd irq %u:", irq_num);
2643 	for_each_online_cpu(i)
2644 		bcm_bprintf(&strbuf, "%10u ",
2645 			desc->kstat_irqs ? *per_cpu_ptr(desc->kstat_irqs, i) : 0);
2646 	if (desc->irq_data.chip) {
2647 		if (desc->irq_data.chip->name)
2648 			bcm_bprintf(&strbuf, " %8s", desc->irq_data.chip->name);
2649 		else
2650 			bcm_bprintf(&strbuf, " %8s", "-");
2651 	} else {
2652 		bcm_bprintf(&strbuf, " %8s", "None");
2653 	}
2654 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
2655 	if (desc->irq_data.domain)
2656 		bcm_bprintf(&strbuf, " %d", (int)desc->irq_data.hwirq);
2657 #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
2658 	bcm_bprintf(&strbuf, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
2659 #endif // endif
2660 #endif /* LINUX VERSION > 3.1.0 */
2661 
2662 	if (desc->name)
2663 		bcm_bprintf(&strbuf, "-%-8s", desc->name);
2664 
2665 	DHD_ERROR(("%s\n", strbuf.origbuf));
2666 	raw_spin_unlock_irqrestore(&desc->lock, flags);
2667 }
2668 
2669 void
dhd_show_kirqstats(dhd_pub_t * dhd)2670 dhd_show_kirqstats(dhd_pub_t *dhd)
2671 {
2672 	unsigned int irq = -1;
2673 #ifdef BCMPCIE
2674 	dhdpcie_get_pcieirq(dhd->bus, &irq);
2675 #endif /* BCMPCIE */
2676 #ifdef BCMSDIO
2677 	irq = ((wifi_adapter_info_t *)dhd->info->adapter)->irq_num;
2678 #endif /* BCMSDIO */
2679 	if (irq != -1) {
2680 #ifdef BCMPCIE
2681 		DHD_ERROR(("DUMP data kernel irq stats : \n"));
2682 #endif /* BCMPCIE */
2683 #ifdef BCMSDIO
2684 		DHD_ERROR(("DUMP data/host wakeup kernel irq stats : \n"));
2685 #endif /* BCMSDIO */
2686 		dhd_print_kirqstats(dhd, irq);
2687 	}
2688 #ifdef BCMPCIE_OOB_HOST_WAKE
2689 	irq = dhdpcie_get_oob_irq_num(dhd->bus);
2690 	if (irq) {
2691 		DHD_ERROR(("DUMP PCIE host wakeup kernel irq stats : \n"));
2692 		dhd_print_kirqstats(dhd, irq);
2693 	}
2694 #endif /* BCMPCIE_OOB_HOST_WAKE */
2695 }
2696 
2697 #ifdef DHD_FW_COREDUMP
2698 int
dhd_dongle_mem_dump(void)2699 dhd_dongle_mem_dump(void)
2700 {
2701 	if (!g_dhd_bus) {
2702 		DHD_ERROR(("%s: Bus is NULL\n", __FUNCTION__));
2703 		return -ENODEV;
2704 	}
2705 
2706 	dhd_bus_dump_console_buffer(g_dhd_bus);
2707 	dhd_prot_debug_info_print(g_dhd_bus->dhd);
2708 
2709 	g_dhd_bus->dhd->memdump_enabled = DUMP_MEMFILE_BUGON;
2710 	g_dhd_bus->dhd->memdump_type = DUMP_TYPE_AP_ABNORMAL_ACCESS;
2711 
2712 	dhd_bus_mem_dump(g_dhd_bus->dhd);
2713 	return 0;
2714 }
2715 EXPORT_SYMBOL(dhd_dongle_mem_dump);
2716 #endif /* DHD_FW_COREDUMP */
2717 
2718 bool
dhd_bus_check_driver_up(void)2719 dhd_bus_check_driver_up(void)
2720 {
2721 	dhd_bus_t *bus;
2722 	dhd_pub_t *dhdp;
2723 	bool isup = FALSE;
2724 
2725 	bus = (dhd_bus_t *)g_dhd_bus;
2726 	if (!bus) {
2727 		DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2728 		return isup;
2729 	}
2730 
2731 	dhdp = bus->dhd;
2732 	if (dhdp) {
2733 		isup = dhdp->up;
2734 	}
2735 
2736 	return isup;
2737 }
2738 EXPORT_SYMBOL(dhd_bus_check_driver_up);
2739