• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Linux DHD Bus Module for PCIE
3  *
4  * Copyright (C) 2020, Broadcom.
5  *
6  *      Unless you and Broadcom execute a separate written software license
7  * agreement governing use of this software, this software is licensed to you
8  * under the terms of the GNU General Public License version 2 (the "GPL"),
9  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10  * following added to such license:
11  *
12  *      As a special exception, the copyright holders of this software give you
13  * permission to link this software with independent modules, and to copy and
14  * distribute the resulting executable under terms of your choice, provided that
15  * you also meet, for each linked independent module, the terms and conditions of
16  * the license of that module.  An independent module is a module which is not
17  * derived from this software.  The special exception does not apply to any
18  * modifications of the software.
19  *
20  *
21  * <<Broadcom-WL-IPTag/Open:>>
22  *
23  * $Id$
24  */
25 
26 /* include files */
27 #include <typedefs.h>
28 #include <bcmutils.h>
29 #include <bcmdevs.h>
30 #include <bcmdevs_legacy.h>    /* need to still support chips no longer in trunk firmware */
31 #include <siutils.h>
32 #include <hndsoc.h>
33 #include <hndpmu.h>
34 #include <sbchipc.h>
35 #if defined(DHD_DEBUG)
36 #include <hnd_armtrap.h>
37 #include <hnd_cons.h>
38 #endif /* defined(DHD_DEBUG) */
39 #include <dngl_stats.h>
40 #include <pcie_core.h>
41 #include <dhd.h>
42 #include <dhd_bus.h>
43 #include <dhd_proto.h>
44 #include <dhd_dbg.h>
45 #include <dhdioctl.h>
46 #include <bcmmsgbuf.h>
47 #include <pcicfg.h>
48 #include <dhd_pcie.h>
49 #include <dhd_linux.h>
50 #ifdef OEM_ANDROID
51 #ifdef CONFIG_ARCH_MSM
52 #if defined(CONFIG_PCI_MSM) || defined(CONFIG_ARCH_MSM8996)
53 #include <linux/msm_pcie.h>
54 #else
55 #include <mach/msm_pcie.h>
56 #endif /* CONFIG_PCI_MSM */
57 #endif /* CONFIG_ARCH_MSM */
58 #endif /* OEM_ANDROID */
59 
60 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
61 #include <linux/pm_runtime.h>
62 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
63 
64 #if defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820) || \
65 	defined(CONFIG_SOC_EXYNOS9830) || defined(CONFIG_SOC_EXYNOS2100) || \
66 	defined(CONFIG_SOC_EXYNOS1000) || defined(CONFIG_SOC_GS101)
67 #include <linux/exynos-pci-ctrl.h>
68 #endif /* CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 ||
69 	* CONFIG_SOC_EXYNOS9830 || CONFIG_SOC_EXYNOS2100 ||
70 	* CONFIG_SOC_EXYNOS1000 || CONFIG_SOC_GS101
71 	*/
72 
73 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
74 #ifndef AUTO_SUSPEND_TIMEOUT
75 #define AUTO_SUSPEND_TIMEOUT 1000
76 #endif /* AUTO_SUSPEND_TIMEOUT */
77 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
78 
79 #ifdef DHD_PCIE_RUNTIMEPM
80 #define RPM_WAKE_UP_TIMEOUT 10000 /* ms */
81 #endif /* DHD_PCIE_RUNTIMEPM */
82 
83 #include <linux/irq.h>
84 #ifdef USE_SMMU_ARCH_MSM
85 #include <asm/dma-iommu.h>
86 #include <linux/iommu.h>
87 #include <linux/of.h>
88 #include <linux/platform_device.h>
89 #endif /* USE_SMMU_ARCH_MSM */
90 #include <dhd_config.h>
91 
92 #ifdef PCIE_OOB
93 #include "ftdi_sio_external.h"
94 #endif /* PCIE_OOB */
95 
96 #define PCI_CFG_RETRY 		10	/* PR15065: retry count for pci cfg accesses */
97 #define OS_HANDLE_MAGIC		0x1234abcd	/* Magic # to recognize osh */
98 #define BCM_MEM_FILENAME_LEN 	24		/* Mem. filename length */
99 
100 #ifdef PCIE_OOB
101 #define HOST_WAKE 4   /* GPIO_0 (HOST_WAKE) - Output from WLAN */
102 #define DEVICE_WAKE 5  /* GPIO_1 (DEVICE_WAKE) - Input to WLAN */
103 #define BIT_WL_REG_ON 6
104 #define BIT_BT_REG_ON 7
105 
106 int gpio_handle_val = 0;
107 unsigned char gpio_port = 0;
108 unsigned char gpio_direction = 0;
109 #define OOB_PORT "ttyUSB0"
110 #endif /* PCIE_OOB */
111 
112 #ifndef BCMPCI_DEV_ID
113 #define BCMPCI_DEV_ID PCI_ANY_ID
114 #endif
115 
116 #ifdef FORCE_TPOWERON
117 extern uint32 tpoweron_scale;
118 #endif /* FORCE_TPOWERON */
119 /* user defined data structures  */
120 
121 typedef bool (*dhdpcie_cb_fn_t)(void *);
122 
123 typedef struct dhdpcie_info
124 {
125 	dhd_bus_t	*bus;
126 	osl_t		*osh;
127 	struct pci_dev  *dev;		/* pci device handle */
128 	volatile char	*regs;		/* pci device memory va */
129 	volatile char	*tcm;		/* pci device memory va */
130 	uint32		bar1_size;	/* pci device memory size */
131 	struct pcos_info *pcos_info;
132 	uint16		last_intrstatus;	/* to cache intrstatus */
133 	int	irq;
134 	char pciname[32];
135 	struct pci_saved_state* default_state;
136 	struct pci_saved_state* state;
137 #ifdef BCMPCIE_OOB_HOST_WAKE
138 	void *os_cxt;			/* Pointer to per-OS private data */
139 #endif /* BCMPCIE_OOB_HOST_WAKE */
140 #ifdef DHD_WAKE_STATUS
141 	spinlock_t	pkt_wake_lock;
142 	unsigned int	total_wake_count;
143 	int		pkt_wake;
144 	int		wake_irq;
145 #endif /* DHD_WAKE_STATUS */
146 #ifdef USE_SMMU_ARCH_MSM
147 	void *smmu_cxt;
148 #endif /* USE_SMMU_ARCH_MSM */
149 } dhdpcie_info_t;
150 
151 struct pcos_info {
152 	dhdpcie_info_t *pc;
153 	spinlock_t lock;
154 	wait_queue_head_t intr_wait_queue;
155 	timer_list_compat_t tuning_timer;
156 	int tuning_timer_exp;
157 	atomic_t timer_enab;
158 	struct tasklet_struct tuning_tasklet;
159 };
160 
161 #ifdef BCMPCIE_OOB_HOST_WAKE
162 typedef struct dhdpcie_os_info {
163 	int			oob_irq_num;	/* valid when hardware or software oob in use */
164 	unsigned long		oob_irq_flags;	/* valid when hardware or software oob in use */
165 	bool			oob_irq_registered;
166 	bool			oob_irq_enabled;
167 	bool			oob_irq_wake_enabled;
168 	spinlock_t		oob_irq_spinlock;
169 	void			*dev;		/* handle to the underlying device */
170 } dhdpcie_os_info_t;
171 static irqreturn_t wlan_oob_irq(int irq, void *data);
172 #ifdef CUSTOMER_HW2
173 extern struct brcm_pcie_wake brcm_pcie_wake;
174 #endif /* CUSTOMER_HW2 */
175 #endif /* BCMPCIE_OOB_HOST_WAKE */
176 
177 #ifdef USE_SMMU_ARCH_MSM
178 typedef struct dhdpcie_smmu_info {
179 	struct dma_iommu_mapping *smmu_mapping;
180 	dma_addr_t smmu_iova_start;
181 	size_t smmu_iova_len;
182 } dhdpcie_smmu_info_t;
183 #endif /* USE_SMMU_ARCH_MSM */
184 
185 /* function declarations */
186 static int __devinit
187 dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
188 static void __devexit
189 dhdpcie_pci_remove(struct pci_dev *pdev);
190 static int dhdpcie_init(struct pci_dev *pdev);
191 static irqreturn_t dhdpcie_isr(int irq, void *arg);
192 /* OS Routine functions for PCI suspend/resume */
193 
194 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
195 static int dhdpcie_set_suspend_resume(struct pci_dev *dev, bool state, bool byint);
196 #else
197 static int dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state);
198 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
199 static int dhdpcie_resume_host_dev(dhd_bus_t *bus);
200 static int dhdpcie_suspend_host_dev(dhd_bus_t *bus);
201 static int dhdpcie_resume_dev(struct pci_dev *dev);
202 static int dhdpcie_suspend_dev(struct pci_dev *dev);
203 #ifdef DHD_PCIE_RUNTIMEPM
204 static int dhdpcie_pm_suspend(struct device *dev);
205 static int dhdpcie_pm_prepare(struct device *dev);
206 static int dhdpcie_pm_resume(struct device *dev);
207 static void dhdpcie_pm_complete(struct device *dev);
208 #else
209 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
210 static int dhdpcie_pm_system_suspend_noirq(struct device * dev);
211 static int dhdpcie_pm_system_resume_noirq(struct device * dev);
212 #else
213 static int dhdpcie_pci_suspend(struct pci_dev *dev, pm_message_t state);
214 static int dhdpcie_pci_resume(struct pci_dev *dev);
215 #if defined(BT_OVER_PCIE)
216 static int dhdpcie_pci_resume_early(struct pci_dev *dev);
217 #endif /* BT_OVER_PCIE */
218 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
219 #endif /* DHD_PCIE_RUNTIMEPM */
220 
221 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
222 static int dhdpcie_pm_runtime_suspend(struct device * dev);
223 static int dhdpcie_pm_runtime_resume(struct device * dev);
224 static int dhdpcie_pm_system_suspend_noirq(struct device * dev);
225 static int dhdpcie_pm_system_resume_noirq(struct device * dev);
226 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
227 
228 #ifdef SUPPORT_EXYNOS7420
exynos_pcie_pm_suspend(int ch_num)229 void exynos_pcie_pm_suspend(int ch_num) {}
exynos_pcie_pm_resume(int ch_num)230 void exynos_pcie_pm_resume(int ch_num) {}
231 #endif /* SUPPORT_EXYNOS7420 */
232 
233 static void dhdpcie_config_save_restore_coherent(dhd_bus_t *bus, bool state);
234 
235 uint32
236 dhdpcie_access_cap(struct pci_dev *pdev, int cap, uint offset, bool is_ext, bool is_write,
237 	uint32 writeval);
238 
239 static struct pci_device_id dhdpcie_pci_devid[] __devinitdata = {
240 	{ vendor: VENDOR_BROADCOM,
241 	device: BCMPCI_DEV_ID,
242 	subvendor: PCI_ANY_ID,
243 	subdevice: PCI_ANY_ID,
244 	class: PCI_CLASS_NETWORK_OTHER << 8,
245 	class_mask: 0xffff00,
246 	driver_data: 0,
247 	},
248 	{ 0, 0, 0, 0, 0, 0, 0}
249 };
250 MODULE_DEVICE_TABLE(pci, dhdpcie_pci_devid);
251 
252 /* Power Management Hooks */
253 #ifdef DHD_PCIE_RUNTIMEPM
254 static const struct dev_pm_ops dhd_pcie_pm_ops = {
255 	.prepare = dhdpcie_pm_prepare,
256 	.suspend = dhdpcie_pm_suspend,
257 	.resume = dhdpcie_pm_resume,
258 	.complete = dhdpcie_pm_complete,
259 };
260 #endif /* DHD_PCIE_RUNTIMEPM */
261 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
262 static const struct dev_pm_ops dhdpcie_pm_ops = {
263 	SET_RUNTIME_PM_OPS(dhdpcie_pm_runtime_suspend, dhdpcie_pm_runtime_resume, NULL)
264 	.suspend_noirq = dhdpcie_pm_system_suspend_noirq,
265 	.resume_noirq = dhdpcie_pm_system_resume_noirq
266 };
267 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
268 
269 static struct pci_driver dhdpcie_driver = {
270 	node:		{&dhdpcie_driver.node, &dhdpcie_driver.node},
271 	name:		"pcieh",
272 	id_table:	dhdpcie_pci_devid,
273 	probe:		dhdpcie_pci_probe,
274 	remove:		dhdpcie_pci_remove,
275 #if defined (DHD_PCIE_RUNTIMEPM) || defined (DHD_PCIE_NATIVE_RUNTIMEPM)
276 	.driver.pm = &dhd_pcie_pm_ops,
277 #else
278 	suspend:	dhdpcie_pci_suspend,
279 	resume:		dhdpcie_pci_resume,
280 #if defined(BT_OVER_PCIE)
281 	resume_early: dhdpcie_pci_resume_early,
282 #endif /* BT_OVER_PCIE */
283 #endif /* DHD_PCIE_RUNTIMEPM || DHD_PCIE_NATIVE_RUNTIMEPM */
284 };
285 
286 int dhdpcie_init_succeeded = FALSE;
287 
288 #ifdef USE_SMMU_ARCH_MSM
dhdpcie_smmu_init(struct pci_dev * pdev,void * smmu_cxt)289 static int dhdpcie_smmu_init(struct pci_dev *pdev, void *smmu_cxt)
290 {
291 	struct dma_iommu_mapping *mapping;
292 	struct device_node *root_node = NULL;
293 	dhdpcie_smmu_info_t *smmu_info = (dhdpcie_smmu_info_t *)smmu_cxt;
294 	int smmu_iova_address[2];
295 	char *wlan_node = "android,bcmdhd_wlan";
296 	char *wlan_smmu_node = "wlan-smmu-iova-address";
297 	int atomic_ctx = 1;
298 	int s1_bypass = 1;
299 	int ret = 0;
300 
301 	DHD_ERROR(("%s: SMMU initialize\n", __FUNCTION__));
302 
303 	root_node = of_find_compatible_node(NULL, NULL, wlan_node);
304 	if (!root_node) {
305 		WARN(1, "failed to get device node of BRCM WLAN\n");
306 		return -ENODEV;
307 	}
308 
309 	if (of_property_read_u32_array(root_node, wlan_smmu_node,
310 		smmu_iova_address, 2) == 0) {
311 		DHD_ERROR(("%s : get SMMU start address 0x%x, size 0x%x\n",
312 			__FUNCTION__, smmu_iova_address[0], smmu_iova_address[1]));
313 		smmu_info->smmu_iova_start = smmu_iova_address[0];
314 		smmu_info->smmu_iova_len = smmu_iova_address[1];
315 	} else {
316 		printf("%s : can't get smmu iova address property\n",
317 			__FUNCTION__);
318 		return -ENODEV;
319 	}
320 
321 	if (smmu_info->smmu_iova_len <= 0) {
322 		DHD_ERROR(("%s: Invalid smmu iova len %d\n",
323 			__FUNCTION__, (int)smmu_info->smmu_iova_len));
324 		return -EINVAL;
325 	}
326 
327 	DHD_ERROR(("%s : SMMU init start\n", __FUNCTION__));
328 
329 	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) ||
330 		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
331 		DHD_ERROR(("%s: DMA set 64bit mask failed.\n", __FUNCTION__));
332 		return -EINVAL;
333 	}
334 
335 	mapping = arm_iommu_create_mapping(&platform_bus_type,
336 		smmu_info->smmu_iova_start, smmu_info->smmu_iova_len);
337 	if (IS_ERR(mapping)) {
338 		DHD_ERROR(("%s: create mapping failed, err = %d\n",
339 			__FUNCTION__, ret));
340 		ret = PTR_ERR(mapping);
341 		goto map_fail;
342 	}
343 
344 	ret = iommu_domain_set_attr(mapping->domain,
345 		DOMAIN_ATTR_ATOMIC, &atomic_ctx);
346 	if (ret) {
347 		DHD_ERROR(("%s: set atomic_ctx attribute failed, err = %d\n",
348 			__FUNCTION__, ret));
349 		goto set_attr_fail;
350 	}
351 
352 	ret = iommu_domain_set_attr(mapping->domain,
353 		DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
354 	if (ret < 0) {
355 		DHD_ERROR(("%s: set s1_bypass attribute failed, err = %d\n",
356 			__FUNCTION__, ret));
357 		goto set_attr_fail;
358 	}
359 
360 	ret = arm_iommu_attach_device(&pdev->dev, mapping);
361 	if (ret) {
362 		DHD_ERROR(("%s: attach device failed, err = %d\n",
363 			__FUNCTION__, ret));
364 		goto attach_fail;
365 	}
366 
367 	smmu_info->smmu_mapping = mapping;
368 
369 	return ret;
370 
371 attach_fail:
372 set_attr_fail:
373 	arm_iommu_release_mapping(mapping);
374 map_fail:
375 	return ret;
376 }
377 
dhdpcie_smmu_remove(struct pci_dev * pdev,void * smmu_cxt)378 static void dhdpcie_smmu_remove(struct pci_dev *pdev, void *smmu_cxt)
379 {
380 	dhdpcie_smmu_info_t *smmu_info;
381 
382 	if (!smmu_cxt) {
383 		return;
384 	}
385 
386 	smmu_info = (dhdpcie_smmu_info_t *)smmu_cxt;
387 	if (smmu_info->smmu_mapping) {
388 		arm_iommu_detach_device(&pdev->dev);
389 		arm_iommu_release_mapping(smmu_info->smmu_mapping);
390 		smmu_info->smmu_mapping = NULL;
391 	}
392 }
393 #endif /* USE_SMMU_ARCH_MSM */
394 
395 #ifdef FORCE_TPOWERON
396 static void
dhd_bus_get_tpoweron(dhd_bus_t * bus)397 dhd_bus_get_tpoweron(dhd_bus_t *bus)
398 {
399 
400 	uint32 tpoweron_rc;
401 	uint32 tpoweron_ep;
402 
403 	tpoweron_rc = dhdpcie_rc_access_cap(bus, PCIE_EXTCAP_ID_L1SS,
404 		PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, FALSE, 0);
405 	tpoweron_ep = dhdpcie_ep_access_cap(bus, PCIE_EXTCAP_ID_L1SS,
406 		PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, FALSE, 0);
407 	DHD_ERROR(("%s: tpoweron_rc:0x%x tpoweron_ep:0x%x\n",
408 		__FUNCTION__, tpoweron_rc, tpoweron_ep));
409 }
410 
411 static void
dhd_bus_set_tpoweron(dhd_bus_t * bus,uint16 tpoweron)412 dhd_bus_set_tpoweron(dhd_bus_t *bus, uint16 tpoweron)
413 {
414 
415 	dhd_bus_get_tpoweron(bus);
416 	/* Set the tpoweron */
417 	DHD_ERROR(("%s tpoweron: 0x%x\n", __FUNCTION__, tpoweron));
418 	dhdpcie_rc_access_cap(bus, PCIE_EXTCAP_ID_L1SS,
419 		PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, TRUE, tpoweron);
420 	dhdpcie_ep_access_cap(bus, PCIE_EXTCAP_ID_L1SS,
421 		PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, TRUE, tpoweron);
422 
423 	dhd_bus_get_tpoweron(bus);
424 
425 }
426 
427 static bool
dhdpcie_chip_req_forced_tpoweron(dhd_bus_t * bus)428 dhdpcie_chip_req_forced_tpoweron(dhd_bus_t *bus)
429 {
430 	/*
431 	 * On Fire's reference platform, coming out of L1.2,
432 	 * there is a constant delay of 45us between CLKREQ# and stable REFCLK
433 	 * Due to this delay, with tPowerOn < 50
434 	 * there is a chance of the refclk sense to trigger on noise.
435 	 *
436 	 * Which ever chip needs forced tPowerOn of 50us should be listed below.
437 	 */
438 	if (si_chipid(bus->sih) == BCM4377_CHIP_ID) {
439 		return TRUE;
440 	}
441 	return FALSE;
442 }
443 #endif /* FORCE_TPOWERON */
444 
445 #ifdef BT_OVER_PCIE
dhd_bus_pwr_off(dhd_pub_t * dhdp,int reason)446 int dhd_bus_pwr_off(dhd_pub_t *dhdp, int reason)
447 {
448 	DHD_ERROR(("%s: WARNING ! function not implemented in %s\n",
449 			__FUNCTION__, __FILE__));
450 	return BCME_OK;
451 }
452 
dhd_bus_pwr_on(dhd_pub_t * dhdp,int reason)453 int dhd_bus_pwr_on(dhd_pub_t *dhdp, int reason)
454 {
455 	DHD_ERROR(("%s: WARNING ! function not implemented in %s\n",
456 			__FUNCTION__, __FILE__));
457 	return BCME_OK;
458 }
459 
dhd_bus_pwr_toggle(dhd_pub_t * dhdp,int reason)460 int dhd_bus_pwr_toggle(dhd_pub_t *dhdp, int reason)
461 {
462 	DHD_ERROR(("%s: WARNING ! function not implemented in %s\n",
463 			__FUNCTION__, __FILE__));
464 	return BCME_OK;
465 }
466 
dhdpcie_is_btop_chip(struct dhd_bus * bus)467 bool dhdpcie_is_btop_chip(struct dhd_bus *bus)
468 {
469 	DHD_ERROR(("%s: WARNING ! function not implemented in %s\n",
470 			__FUNCTION__, __FILE__));
471 	return FALSE;
472 }
473 
dhdpcie_redownload_fw(dhd_pub_t * dhdp)474 int dhdpcie_redownload_fw(dhd_pub_t *dhdp)
475 {
476 	DHD_ERROR(("%s: WARNING ! function not implemented in %s\n",
477 			__FUNCTION__, __FILE__));
478 	return BCME_OK;
479 }
480 #endif /* BT_OVER_PCIE */
481 
482 static bool
dhd_bus_aspm_enable_dev(dhd_bus_t * bus,struct pci_dev * dev,bool enable)483 dhd_bus_aspm_enable_dev(dhd_bus_t *bus, struct pci_dev *dev, bool enable)
484 {
485 	uint32 linkctrl_before;
486 	uint32 linkctrl_after = 0;
487 	uint8 linkctrl_asm;
488 	char *device;
489 
490 	device = (dev == bus->dev) ? "EP" : "RC";
491 
492 	linkctrl_before = dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET,
493 		FALSE, FALSE, 0);
494 	linkctrl_asm = (linkctrl_before & PCIE_ASPM_CTRL_MASK);
495 
496 	if (enable) {
497 		if (linkctrl_asm == PCIE_ASPM_L1_ENAB) {
498 			DHD_ERROR(("%s: %s already enabled  linkctrl: 0x%x\n",
499 				__FUNCTION__, device, linkctrl_before));
500 			return FALSE;
501 		}
502 		/* Enable only L1 ASPM (bit 1) */
503 		dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, FALSE,
504 			TRUE, (linkctrl_before | PCIE_ASPM_L1_ENAB));
505 	} else {
506 		if (linkctrl_asm == 0) {
507 			DHD_ERROR(("%s: %s already disabled linkctrl: 0x%x\n",
508 				__FUNCTION__, device, linkctrl_before));
509 			return FALSE;
510 		}
511 		/* Disable complete ASPM (bit 1 and bit 0) */
512 		dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, FALSE,
513 			TRUE, (linkctrl_before & (~PCIE_ASPM_ENAB)));
514 	}
515 
516 	linkctrl_after = dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET,
517 		FALSE, FALSE, 0);
518 	DHD_ERROR(("%s: %s %s, linkctrl_before: 0x%x linkctrl_after: 0x%x\n",
519 		__FUNCTION__, device, (enable ? "ENABLE " : "DISABLE"),
520 		linkctrl_before, linkctrl_after));
521 
522 	return TRUE;
523 }
524 
525 static bool
dhd_bus_is_rc_ep_aspm_capable(dhd_bus_t * bus)526 dhd_bus_is_rc_ep_aspm_capable(dhd_bus_t *bus)
527 {
528 	uint32 rc_aspm_cap;
529 	uint32 ep_aspm_cap;
530 
531 	/* RC ASPM capability */
532 	rc_aspm_cap = dhdpcie_access_cap(bus->rc_dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET,
533 		FALSE, FALSE, 0);
534 	if (rc_aspm_cap == BCME_ERROR) {
535 		DHD_ERROR(("%s RC is not ASPM capable\n", __FUNCTION__));
536 		return FALSE;
537 	}
538 
539 	/* EP ASPM capability */
540 	ep_aspm_cap = dhdpcie_access_cap(bus->dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET,
541 		FALSE, FALSE, 0);
542 	if (ep_aspm_cap == BCME_ERROR) {
543 		DHD_ERROR(("%s EP is not ASPM capable\n", __FUNCTION__));
544 		return FALSE;
545 	}
546 
547 	return TRUE;
548 }
549 
550 bool
dhd_bus_aspm_enable_rc_ep(dhd_bus_t * bus,bool enable)551 dhd_bus_aspm_enable_rc_ep(dhd_bus_t *bus, bool enable)
552 {
553 	bool ret;
554 
555 	if (!bus->rc_ep_aspm_cap) {
556 		DHD_ERROR(("%s: NOT ASPM  CAPABLE rc_ep_aspm_cap: %d\n",
557 			__FUNCTION__, bus->rc_ep_aspm_cap));
558 		return FALSE;
559 	}
560 
561 	if (enable) {
562 		/* Enable only L1 ASPM first RC then EP */
563 		ret = dhd_bus_aspm_enable_dev(bus, bus->rc_dev, enable);
564 		ret = dhd_bus_aspm_enable_dev(bus, bus->dev, enable);
565 	} else {
566 		/* Disable complete ASPM first EP then RC */
567 		ret = dhd_bus_aspm_enable_dev(bus, bus->dev, enable);
568 		ret = dhd_bus_aspm_enable_dev(bus, bus->rc_dev, enable);
569 	}
570 
571 	return ret;
572 }
573 
574 static void
dhd_bus_l1ss_enable_dev(dhd_bus_t * bus,struct pci_dev * dev,bool enable)575 dhd_bus_l1ss_enable_dev(dhd_bus_t *bus, struct pci_dev *dev, bool enable)
576 {
577 	uint32 l1ssctrl_before;
578 	uint32 l1ssctrl_after = 0;
579 	uint8 l1ss_ep;
580 	char *device;
581 
582 	device = (dev == bus->dev) ? "EP" : "RC";
583 
584 	/* Extendend Capacility Reg */
585 	l1ssctrl_before = dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS,
586 		PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0);
587 	l1ss_ep = (l1ssctrl_before & PCIE_EXT_L1SS_MASK);
588 
589 	if (enable) {
590 		if (l1ss_ep == PCIE_EXT_L1SS_ENAB) {
591 			DHD_ERROR(("%s: %s already enabled,  l1ssctrl: 0x%x\n",
592 				__FUNCTION__, device, l1ssctrl_before));
593 			return;
594 		}
595 		dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS, PCIE_EXTCAP_L1SS_CONTROL_OFFSET,
596 			TRUE, TRUE, (l1ssctrl_before | PCIE_EXT_L1SS_ENAB));
597 	} else {
598 		if (l1ss_ep == 0) {
599 			DHD_ERROR(("%s: %s already disabled, l1ssctrl: 0x%x\n",
600 				__FUNCTION__, device, l1ssctrl_before));
601 			return;
602 		}
603 		dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS, PCIE_EXTCAP_L1SS_CONTROL_OFFSET,
604 			TRUE, TRUE, (l1ssctrl_before & (~PCIE_EXT_L1SS_ENAB)));
605 	}
606 	l1ssctrl_after = dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS,
607 		PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0);
608 	DHD_ERROR(("%s: %s %s, l1ssctrl_before: 0x%x l1ssctrl_after: 0x%x\n",
609 		__FUNCTION__, device, (enable ? "ENABLE " : "DISABLE"),
610 		l1ssctrl_before, l1ssctrl_after));
611 
612 }
613 
614 static bool
dhd_bus_is_rc_ep_l1ss_capable(dhd_bus_t * bus)615 dhd_bus_is_rc_ep_l1ss_capable(dhd_bus_t *bus)
616 {
617 	uint32 rc_l1ss_cap;
618 	uint32 ep_l1ss_cap;
619 
620 	/* RC Extendend Capacility */
621 	rc_l1ss_cap = dhdpcie_access_cap(bus->rc_dev, PCIE_EXTCAP_ID_L1SS,
622 		PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0);
623 	if (rc_l1ss_cap == BCME_ERROR) {
624 		DHD_ERROR(("%s RC is not l1ss capable\n", __FUNCTION__));
625 		return FALSE;
626 	}
627 
628 	/* EP Extendend Capacility */
629 	ep_l1ss_cap = dhdpcie_access_cap(bus->dev, PCIE_EXTCAP_ID_L1SS,
630 		PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0);
631 	if (ep_l1ss_cap == BCME_ERROR) {
632 		DHD_ERROR(("%s EP is not l1ss capable\n", __FUNCTION__));
633 		return FALSE;
634 	}
635 
636 	return TRUE;
637 }
638 
639 void
dhd_bus_l1ss_enable_rc_ep(dhd_bus_t * bus,bool enable)640 dhd_bus_l1ss_enable_rc_ep(dhd_bus_t *bus, bool enable)
641 {
642 	bool ret;
643 
644 	if ((!bus->rc_ep_aspm_cap) || (!bus->rc_ep_l1ss_cap)) {
645 		DHD_ERROR(("%s: NOT L1SS CAPABLE rc_ep_aspm_cap: %d rc_ep_l1ss_cap: %d\n",
646 			__FUNCTION__, bus->rc_ep_aspm_cap, bus->rc_ep_l1ss_cap));
647 		return;
648 	}
649 
650 	/* Disable ASPM of RC and EP */
651 	ret = dhd_bus_aspm_enable_rc_ep(bus, FALSE);
652 
653 	if (enable) {
654 		/* Enable RC then EP */
655 		dhd_bus_l1ss_enable_dev(bus, bus->rc_dev, enable);
656 		dhd_bus_l1ss_enable_dev(bus, bus->dev, enable);
657 	} else {
658 		/* Disable EP then RC */
659 		dhd_bus_l1ss_enable_dev(bus, bus->dev, enable);
660 		dhd_bus_l1ss_enable_dev(bus, bus->rc_dev, enable);
661 	}
662 
663 	/* Enable ASPM of RC and EP only if this API disabled */
664 	if (ret == TRUE) {
665 		dhd_bus_aspm_enable_rc_ep(bus, TRUE);
666 	}
667 }
668 
669 void
dhd_bus_aer_config(dhd_bus_t * bus)670 dhd_bus_aer_config(dhd_bus_t *bus)
671 {
672 	uint32 val;
673 
674 	DHD_ERROR(("%s: Configure AER registers for EP\n", __FUNCTION__));
675 	val = dhdpcie_ep_access_cap(bus, PCIE_ADVERRREP_CAPID,
676 		PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, FALSE, 0);
677 	if (val != (uint32)-1) {
678 		val &= ~CORR_ERR_AE;
679 		dhdpcie_ep_access_cap(bus, PCIE_ADVERRREP_CAPID,
680 			PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, TRUE, val);
681 	} else {
682 		DHD_ERROR(("%s: Invalid EP's PCIE_ADV_CORR_ERR_MASK: 0x%x\n",
683 			__FUNCTION__, val));
684 	}
685 
686 	DHD_ERROR(("%s: Configure AER registers for RC\n", __FUNCTION__));
687 	val = dhdpcie_rc_access_cap(bus, PCIE_ADVERRREP_CAPID,
688 		PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, FALSE, 0);
689 	if (val != (uint32)-1) {
690 		val &= ~CORR_ERR_AE;
691 		dhdpcie_rc_access_cap(bus, PCIE_ADVERRREP_CAPID,
692 			PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, TRUE, val);
693 	} else {
694 		DHD_ERROR(("%s: Invalid RC's PCIE_ADV_CORR_ERR_MASK: 0x%x\n",
695 			__FUNCTION__, val));
696 	}
697 }
698 
699 #ifdef DHD_PCIE_RUNTIMEPM
dhdpcie_pm_suspend(struct device * dev)700 static int dhdpcie_pm_suspend(struct device *dev)
701 {
702 	int ret = 0;
703 	struct pci_dev *pdev = to_pci_dev(dev);
704 	dhdpcie_info_t *pch = pci_get_drvdata(pdev);
705 	dhd_bus_t *bus = NULL;
706 	unsigned long flags;
707 
708 	if (pch) {
709 		bus = pch->bus;
710 	}
711 	if (!bus) {
712 		return ret;
713 	}
714 
715 	DHD_GENERAL_LOCK(bus->dhd, flags);
716 	if (!DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) {
717 		DHD_ERROR(("%s: Bus not IDLE!! dhd_bus_busy_state = 0x%x\n",
718 			__FUNCTION__, bus->dhd->dhd_bus_busy_state));
719 		DHD_GENERAL_UNLOCK(bus->dhd, flags);
720 		return -EBUSY;
721 	}
722 	DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd);
723 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
724 
725 	if (bus->dhd->up)
726 		ret = dhdpcie_set_suspend_resume(bus, TRUE);
727 
728 	DHD_GENERAL_LOCK(bus->dhd, flags);
729 	DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd);
730 	dhd_os_busbusy_wake(bus->dhd);
731 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
732 
733 	return ret;
734 
735 }
736 
dhdpcie_pm_prepare(struct device * dev)737 static int dhdpcie_pm_prepare(struct device *dev)
738 {
739 	struct pci_dev *pdev = to_pci_dev(dev);
740 	dhdpcie_info_t *pch = pci_get_drvdata(pdev);
741 	dhd_bus_t *bus = NULL;
742 
743 	if (!pch || !pch->bus) {
744 		return 0;
745 	}
746 
747 	bus = pch->bus;
748 	DHD_DISABLE_RUNTIME_PM(bus->dhd);
749 	bus->chk_pm = TRUE;
750 
751 	return 0;
752 }
753 
dhdpcie_pm_resume(struct device * dev)754 static int dhdpcie_pm_resume(struct device *dev)
755 {
756 	int ret = 0;
757 	struct pci_dev *pdev = to_pci_dev(dev);
758 	dhdpcie_info_t *pch = pci_get_drvdata(pdev);
759 	dhd_bus_t *bus = NULL;
760 	unsigned long flags;
761 
762 	if (pch) {
763 		bus = pch->bus;
764 	}
765 	if (!bus) {
766 		return ret;
767 	}
768 
769 	DHD_GENERAL_LOCK(bus->dhd, flags);
770 	DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd);
771 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
772 
773 	if (bus->dhd->up)
774 		ret = dhdpcie_set_suspend_resume(bus, FALSE);
775 
776 	DHD_GENERAL_LOCK(bus->dhd, flags);
777 	DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd);
778 	dhd_os_busbusy_wake(bus->dhd);
779 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
780 
781 	return ret;
782 }
783 
dhdpcie_pm_complete(struct device * dev)784 static void dhdpcie_pm_complete(struct device *dev)
785 {
786 	struct pci_dev *pdev = to_pci_dev(dev);
787 	dhdpcie_info_t *pch = pci_get_drvdata(pdev);
788 	dhd_bus_t *bus = NULL;
789 
790 	if (!pch || !pch->bus) {
791 		return;
792 	}
793 
794 	bus = pch->bus;
795 	DHD_ENABLE_RUNTIME_PM(bus->dhd);
796 	bus->chk_pm = FALSE;
797 
798 	return;
799 }
800 #else
dhdpcie_pci_suspend(struct pci_dev * pdev,pm_message_t state)801 static int dhdpcie_pci_suspend(struct pci_dev * pdev, pm_message_t state)
802 {
803 	int ret = 0;
804 	dhdpcie_info_t *pch = pci_get_drvdata(pdev);
805 	dhd_bus_t *bus = NULL;
806 	unsigned long flags;
807 	uint32 i = 0;
808 
809 	if (pch) {
810 		bus = pch->bus;
811 	}
812 	if (!bus) {
813 		return ret;
814 	}
815 
816 	BCM_REFERENCE(state);
817 
818 	DHD_GENERAL_LOCK(bus->dhd, flags);
819 	if (!DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) {
820 		DHD_ERROR(("%s: Bus not IDLE!! dhd_bus_busy_state = 0x%x\n",
821 			__FUNCTION__, bus->dhd->dhd_bus_busy_state));
822 
823 		DHD_GENERAL_UNLOCK(bus->dhd, flags);
824 		OSL_DELAY(1000);
825 		/* retry till the transaction is complete */
826 		while (i < 100) {
827 			OSL_DELAY(1000);
828 			i++;
829 
830 			DHD_GENERAL_LOCK(bus->dhd, flags);
831 			if (DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) {
832 				DHD_ERROR(("%s: Bus enter IDLE!! after %d ms\n",
833 					__FUNCTION__, i));
834 				break;
835 			}
836 			if (i != 100) {
837 				DHD_GENERAL_UNLOCK(bus->dhd, flags);
838 			}
839 		}
840 		if (!DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) {
841 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
842 			DHD_ERROR(("%s: Bus not IDLE!! Failed after %d ms, "
843 				"dhd_bus_busy_state = 0x%x\n",
844 				__FUNCTION__, i, bus->dhd->dhd_bus_busy_state));
845 			return -EBUSY;
846 		}
847 	}
848 	DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd);
849 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
850 
851 #ifdef DHD_CFG80211_SUSPEND_RESUME
852 	dhd_cfg80211_suspend(bus->dhd);
853 #endif /* DHD_CFG80211_SUSPEND_RESUME */
854 
855 	if (!bus->dhd->dongle_reset)
856 		ret = dhdpcie_set_suspend_resume(bus, TRUE);
857 
858 	DHD_GENERAL_LOCK(bus->dhd, flags);
859 	DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd);
860 	dhd_os_busbusy_wake(bus->dhd);
861 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
862 
863 	return ret;
864 }
865 
866 #if defined(BT_OVER_PCIE)
dhdpcie_pci_resume_early(struct pci_dev * pdev)867 static int dhdpcie_pci_resume_early(struct pci_dev *pdev)
868 {
869 	int ret = 0;
870 	dhdpcie_info_t *pch = pci_get_drvdata(pdev);
871 	dhd_bus_t *bus = NULL;
872 	uint32 pmcsr;
873 
874 	if (pch) {
875 		bus = pch->bus;
876 	}
877 	if (!bus) {
878 		return ret;
879 	}
880 
881 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 9))
882 	/* On fc30 (linux ver 5.0.9),
883 	 *  PMEStat of PMCSR(cfg reg) is cleared before this callback by kernel.
884 	 *  So, we use SwPme of FunctionControl(enum reg) instead of PMEStat without kernel change.
885 	 */
886 	if (bus->sih->buscorerev >= 64) {
887 		uint32 ftnctrl;
888 		volatile void *regsva = (volatile void *)bus->regs;
889 
890 		ftnctrl = pcie_corereg(bus->osh, regsva,
891 				OFFSETOF(sbpcieregs_t, ftn_ctrl.control), 0, 0);
892 		pmcsr = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_PMCSR, sizeof(pmcsr));
893 
894 		DHD_ERROR(("%s(): pmcsr is 0x%x, ftnctrl is 0x%8x \r\n",
895 			__FUNCTION__, pmcsr, ftnctrl));
896 		if (ftnctrl & PCIE_FTN_SWPME_MASK) {
897 			DHD_ERROR(("%s(): Wakeup due to WLAN \r\n", __FUNCTION__));
898 		}
899 	} else
900 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 9)) */
901 	{
902 		pmcsr = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_PMCSR, sizeof(pmcsr));
903 
904 		DHD_ERROR(("%s(): pmcsr is 0x%x \r\n", __FUNCTION__, pmcsr));
905 		if (pmcsr & PCIE_PMCSR_PMESTAT) {
906 			DHD_ERROR(("%s(): Wakeup due to WLAN \r\n", __FUNCTION__));
907 		}
908 	}
909 
910 	/*
911 	 * TODO: Add code to take adavantage of what is read from pmcsr
912 	 */
913 
914 	return ret;
915 }
916 #endif /* BT_OVER_PCIE */
917 
dhdpcie_pci_resume(struct pci_dev * pdev)918 static int dhdpcie_pci_resume(struct pci_dev *pdev)
919 {
920 	int ret = 0;
921 	dhdpcie_info_t *pch = pci_get_drvdata(pdev);
922 	dhd_bus_t *bus = NULL;
923 	unsigned long flags;
924 
925 	if (pch) {
926 		bus = pch->bus;
927 	}
928 	if (!bus) {
929 		return ret;
930 	}
931 
932 	DHD_GENERAL_LOCK(bus->dhd, flags);
933 	DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd);
934 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
935 
936 	if (!bus->dhd->dongle_reset)
937 		ret = dhdpcie_set_suspend_resume(bus, FALSE);
938 
939 	DHD_GENERAL_LOCK(bus->dhd, flags);
940 	DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd);
941 	dhd_os_busbusy_wake(bus->dhd);
942 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
943 
944 #ifdef DHD_CFG80211_SUSPEND_RESUME
945 	dhd_cfg80211_resume(bus->dhd);
946 #endif /* DHD_CFG80211_SUSPEND_RESUME */
947 	return ret;
948 }
949 
950 #endif /* DHD_PCIE_RUNTIMEPM */
951 
952 static int
953 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
dhdpcie_set_suspend_resume(dhd_bus_t * bus,bool state,bool byint)954 dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state, bool byint)
955 #else
956 dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state)
957 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
958 {
959 	int ret = 0;
960 
961 	ASSERT(bus && !bus->dhd->dongle_reset);
962 
963 #ifdef DHD_PCIE_RUNTIMEPM
964 	/* if wakelock is held during suspend, return failed */
965 	if (state == TRUE && dhd_os_check_wakelock_all(bus->dhd)) {
966 		return -EBUSY;
967 	}
968 	mutex_lock(&bus->pm_lock);
969 #endif /* DHD_PCIE_RUNTIMEPM */
970 
971 	/* When firmware is not loaded do the PCI bus */
972 	/* suspend/resume only */
973 	if (bus->dhd->busstate == DHD_BUS_DOWN) {
974 		ret = dhdpcie_pci_suspend_resume(bus, state);
975 #ifdef DHD_PCIE_RUNTIMEPM
976 		mutex_unlock(&bus->pm_lock);
977 #endif /* DHD_PCIE_RUNTIMEPM */
978 		return ret;
979 	}
980 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
981 	ret = dhdpcie_bus_suspend(bus, state, byint);
982 #else
983 	ret = dhdpcie_bus_suspend(bus, state);
984 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
985 
986 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) && defined(DHD_TCP_LIMIT_OUTPUT)
987 	if (ret == BCME_OK) {
988 		/*
989 		 * net.ipv4.tcp_limit_output_bytes is used for all ipv4 sockets
990 		 * so, returning back to original value when there is no traffic(suspend)
991 		 */
992 		if (state == TRUE) {
993 			dhd_ctrl_tcp_limit_output_bytes(0);
994 		} else {
995 			dhd_ctrl_tcp_limit_output_bytes(1);
996 		}
997 	}
998 #endif /* LINUX_VERSION_CODE > 4.19.0 && DHD_TCP_LIMIT_OUTPUT */
999 
1000 #ifdef DHD_PCIE_RUNTIMEPM
1001 	mutex_unlock(&bus->pm_lock);
1002 #endif /* DHD_PCIE_RUNTIMEPM */
1003 
1004 	return ret;
1005 }
1006 
1007 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
dhdpcie_pm_runtime_suspend(struct device * dev)1008 static int dhdpcie_pm_runtime_suspend(struct device * dev)
1009 {
1010 	struct pci_dev *pdev = to_pci_dev(dev);
1011 	dhdpcie_info_t *pch = pci_get_drvdata(pdev);
1012 	dhd_bus_t *bus = NULL;
1013 	int ret = 0;
1014 
1015 	if (!pch)
1016 		return -EBUSY;
1017 
1018 	bus = pch->bus;
1019 
1020 	DHD_RPM(("%s Enter\n", __FUNCTION__));
1021 
1022 	if (atomic_read(&bus->dhd->block_bus))
1023 		return -EHOSTDOWN;
1024 
1025 	dhd_netif_stop_queue(bus);
1026 	atomic_set(&bus->dhd->block_bus, TRUE);
1027 
1028 	if (dhdpcie_set_suspend_resume(pdev, TRUE, TRUE)) {
1029 		pm_runtime_mark_last_busy(dev);
1030 		ret = -EAGAIN;
1031 	}
1032 
1033 	atomic_set(&bus->dhd->block_bus, FALSE);
1034 	dhd_bus_start_queue(bus);
1035 
1036 	return ret;
1037 }
1038 
dhdpcie_pm_runtime_resume(struct device * dev)1039 static int dhdpcie_pm_runtime_resume(struct device * dev)
1040 {
1041 	struct pci_dev *pdev = to_pci_dev(dev);
1042 	dhdpcie_info_t *pch = pci_get_drvdata(pdev);
1043 	dhd_bus_t *bus = pch->bus;
1044 
1045 	DHD_RPM(("%s Enter\n", __FUNCTION__));
1046 
1047 	if (atomic_read(&bus->dhd->block_bus))
1048 		return -EHOSTDOWN;
1049 
1050 	if (dhdpcie_set_suspend_resume(pdev, FALSE, TRUE))
1051 		return -EAGAIN;
1052 
1053 	return 0;
1054 }
1055 
dhdpcie_pm_system_suspend_noirq(struct device * dev)1056 static int dhdpcie_pm_system_suspend_noirq(struct device * dev)
1057 {
1058 	struct pci_dev *pdev = to_pci_dev(dev);
1059 	dhdpcie_info_t *pch = pci_get_drvdata(pdev);
1060 	dhd_bus_t *bus = NULL;
1061 	int ret;
1062 
1063 	DHD_RPM(("%s Enter\n", __FUNCTION__));
1064 
1065 	if (!pch)
1066 		return -EBUSY;
1067 
1068 	bus = pch->bus;
1069 
1070 	if (atomic_read(&bus->dhd->block_bus))
1071 		return -EHOSTDOWN;
1072 
1073 	dhd_netif_stop_queue(bus);
1074 	atomic_set(&bus->dhd->block_bus, TRUE);
1075 
1076 	ret = dhdpcie_set_suspend_resume(pdev, TRUE, FALSE);
1077 
1078 	if (ret) {
1079 		dhd_bus_start_queue(bus);
1080 		atomic_set(&bus->dhd->block_bus, FALSE);
1081 	}
1082 
1083 	return ret;
1084 }
1085 
dhdpcie_pm_system_resume_noirq(struct device * dev)1086 static int dhdpcie_pm_system_resume_noirq(struct device * dev)
1087 {
1088 	struct pci_dev *pdev = to_pci_dev(dev);
1089 	dhdpcie_info_t *pch = pci_get_drvdata(pdev);
1090 	dhd_bus_t *bus = NULL;
1091 	int ret;
1092 
1093 	if (!pch)
1094 		return -EBUSY;
1095 
1096 	bus = pch->bus;
1097 
1098 	DHD_RPM(("%s Enter\n", __FUNCTION__));
1099 
1100 	ret = dhdpcie_set_suspend_resume(pdev, FALSE, FALSE);
1101 
1102 	atomic_set(&bus->dhd->block_bus, FALSE);
1103 	dhd_bus_start_queue(bus);
1104 	pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
1105 
1106 	return ret;
1107 }
1108 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1109 
1110 #if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1111 extern void dhd_dpc_tasklet_kill(dhd_pub_t *dhdp);
1112 #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1113 
1114 static void
dhdpcie_suspend_dump_cfgregs(struct dhd_bus * bus,char * suspend_state)1115 dhdpcie_suspend_dump_cfgregs(struct dhd_bus *bus, char *suspend_state)
1116 {
1117 	DHD_ERROR(("%s: BaseAddress0(0x%x)=0x%x, "
1118 		"BaseAddress1(0x%x)=0x%x PCIE_CFG_PMCSR(0x%x)=0x%x "
1119 		"PCI_BAR1_WIN(0x%x)=(0x%x)\n",
1120 		suspend_state,
1121 		PCIECFGREG_BASEADDR0,
1122 		dhd_pcie_config_read(bus,
1123 			PCIECFGREG_BASEADDR0, sizeof(uint32)),
1124 		PCIECFGREG_BASEADDR1,
1125 		dhd_pcie_config_read(bus,
1126 			PCIECFGREG_BASEADDR1, sizeof(uint32)),
1127 		PCIE_CFG_PMCSR,
1128 		dhd_pcie_config_read(bus,
1129 			PCIE_CFG_PMCSR, sizeof(uint32)),
1130 		PCI_BAR1_WIN,
1131 		dhd_pcie_config_read(bus,
1132 			PCI_BAR1_WIN, sizeof(uint32))));
1133 }
1134 
dhdpcie_suspend_dev(struct pci_dev * dev)1135 static int dhdpcie_suspend_dev(struct pci_dev *dev)
1136 {
1137 	int ret;
1138 	dhdpcie_info_t *pch = pci_get_drvdata(dev);
1139 	dhd_bus_t *bus = pch->bus;
1140 
1141 #if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1142 	if (bus->is_linkdown) {
1143 		DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
1144 		return BCME_ERROR;
1145 	}
1146 #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1147 	DHD_ERROR(("%s: Enter\n", __FUNCTION__));
1148 #if defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820) || \
1149 	defined(CONFIG_SOC_EXYNOS9830) || defined(CONFIG_SOC_EXYNOS2100) || \
1150 	defined(CONFIG_SOC_EXYNOS1000)
1151 	DHD_ERROR(("%s: Disable L1ss EP side\n", __FUNCTION__));
1152 	exynos_pcie_l1ss_ctrl(0, PCIE_L1SS_CTRL_WIFI);
1153 #endif /* CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 ||
1154 	* CONFIG_SOC_EXYNOS9830 || CONFIG_SOC_EXYNOS2100 ||
1155 	* CONFIG_SOC_EXYNOS1000
1156 	*/
1157 #if defined(CONFIG_SOC_GS101)
1158 	DHD_ERROR(("%s: Disable L1ss EP side\n", __FUNCTION__));
1159 	exynos_pcie_rc_l1ss_ctrl(0, PCIE_L1SS_CTRL_WIFI, 1);
1160 #endif /* CONFIG_SOC_GS101 */
1161 
1162 	dhdpcie_suspend_dump_cfgregs(bus, "BEFORE_EP_SUSPEND");
1163 #if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1164 	dhd_dpc_tasklet_kill(bus->dhd);
1165 #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1166 	pci_save_state(dev);
1167 #if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1168 	pch->state = pci_store_saved_state(dev);
1169 #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1170 	pci_enable_wake(dev, PCI_D0, TRUE);
1171 	if (pci_is_enabled(dev))
1172 		pci_disable_device(dev);
1173 
1174 	ret = pci_set_power_state(dev, PCI_D3hot);
1175 	if (ret) {
1176 		DHD_ERROR(("%s: pci_set_power_state error %d\n",
1177 			__FUNCTION__, ret));
1178 	}
1179 #ifdef OEM_ANDROID
1180 //	dev->state_saved = FALSE;
1181 #endif /* OEM_ANDROID */
1182 	dhdpcie_suspend_dump_cfgregs(bus, "AFTER_EP_SUSPEND");
1183 	return ret;
1184 }
1185 
1186 #ifdef DHD_WAKE_STATUS
bcmpcie_get_total_wake(struct dhd_bus * bus)1187 int bcmpcie_get_total_wake(struct dhd_bus *bus)
1188 {
1189 	dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
1190 
1191 	return pch->total_wake_count;
1192 }
1193 
bcmpcie_set_get_wake(struct dhd_bus * bus,int flag)1194 int bcmpcie_set_get_wake(struct dhd_bus *bus, int flag)
1195 {
1196 	dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
1197 	unsigned long flags;
1198 	int ret;
1199 
1200 	DHD_PKT_WAKE_LOCK(&pch->pkt_wake_lock, flags);
1201 
1202 	ret = pch->pkt_wake;
1203 	pch->total_wake_count += flag;
1204 	pch->pkt_wake = flag;
1205 
1206 	DHD_PKT_WAKE_UNLOCK(&pch->pkt_wake_lock, flags);
1207 	return ret;
1208 }
1209 #endif /* DHD_WAKE_STATUS */
1210 
dhdpcie_resume_dev(struct pci_dev * dev)1211 static int dhdpcie_resume_dev(struct pci_dev *dev)
1212 {
1213 	int err = 0;
1214 	dhdpcie_info_t *pch = pci_get_drvdata(dev);
1215 #if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1216 	pci_load_and_free_saved_state(dev, &pch->state);
1217 #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1218 	DHD_ERROR(("%s: Enter\n", __FUNCTION__));
1219 #ifdef OEM_ANDROID
1220 //	dev->state_saved = TRUE;
1221 #endif /* OEM_ANDROID */
1222 	pci_restore_state(dev);
1223 
1224 	/* Resture back current bar1 window */
1225 	OSL_PCI_WRITE_CONFIG(pch->bus->osh, PCI_BAR1_WIN, 4, pch->bus->curr_bar1_win);
1226 
1227 #ifdef FORCE_TPOWERON
1228 	if (dhdpcie_chip_req_forced_tpoweron(pch->bus)) {
1229 		dhd_bus_set_tpoweron(pch->bus, tpoweron_scale);
1230 	}
1231 #endif /* FORCE_TPOWERON */
1232 	err = pci_enable_device(dev);
1233 	if (err) {
1234 		printf("%s:pci_enable_device error %d \n", __FUNCTION__, err);
1235 		goto out;
1236 	}
1237 	pci_set_master(dev);
1238 	err = pci_set_power_state(dev, PCI_D0);
1239 	if (err) {
1240 		printf("%s:pci_set_power_state error %d \n", __FUNCTION__, err);
1241 		goto out;
1242 	}
1243 	BCM_REFERENCE(pch);
1244 	dhdpcie_suspend_dump_cfgregs(pch->bus, "AFTER_EP_RESUME");
1245 #if defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820) || \
1246 	defined(CONFIG_SOC_EXYNOS9830) || defined(CONFIG_SOC_EXYNOS2100) || \
1247 	defined(CONFIG_SOC_EXYNOS1000)
1248 	DHD_ERROR(("%s: Enable L1ss EP side\n", __FUNCTION__));
1249 	exynos_pcie_l1ss_ctrl(1, PCIE_L1SS_CTRL_WIFI);
1250 #endif /* CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 ||
1251 	* CONFIG_SOC_EXYNOS9830 || CONFIG_SOC_EXYNOS2100 ||
1252 	* CONFIG_SOC_EXYNOS1000
1253 	*/
1254 #if defined(CONFIG_SOC_GS101)
1255 	DHD_ERROR(("%s: Enable L1ss EP side\n", __FUNCTION__));
1256 	exynos_pcie_rc_l1ss_ctrl(1, PCIE_L1SS_CTRL_WIFI, 1);
1257 #endif /* CONFIG_SOC_GS101 */
1258 
1259 out:
1260 	return err;
1261 }
1262 
dhdpcie_resume_host_dev(dhd_bus_t * bus)1263 static int dhdpcie_resume_host_dev(dhd_bus_t *bus)
1264 {
1265 	int bcmerror = 0;
1266 
1267 	bcmerror = dhdpcie_start_host_dev(bus);
1268 	if (bcmerror < 0) {
1269 		DHD_ERROR(("%s: PCIe RC resume failed!!! (%d)\n",
1270 			__FUNCTION__, bcmerror));
1271 		bus->is_linkdown = 1;
1272 #ifdef SUPPORT_LINKDOWN_RECOVERY
1273 #ifdef CONFIG_ARCH_MSM
1274 		bus->no_cfg_restore = 1;
1275 #endif /* CONFIG_ARCH_MSM */
1276 #endif /* SUPPORT_LINKDOWN_RECOVERY */
1277 	}
1278 
1279 	return bcmerror;
1280 }
1281 
dhdpcie_suspend_host_dev(dhd_bus_t * bus)1282 static int dhdpcie_suspend_host_dev(dhd_bus_t *bus)
1283 {
1284 	int bcmerror = 0;
1285 #ifdef CONFIG_ARCH_EXYNOS
1286 	/*
1287 	 * XXX : SWWLAN-82173, SWWLAN-82183 WAR for SS PCIe RC
1288 	 * SS PCIe RC/EP is 1 to 1 mapping using different channel
1289 	 * RC0 - LTE, RC1 - WiFi RC0-1 is working independently
1290 	 */
1291 
1292 	if (bus->rc_dev) {
1293 		pci_save_state(bus->rc_dev);
1294 	} else {
1295 		DHD_ERROR(("%s: RC %x:%x handle is NULL\n",
1296 			__FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID));
1297 	}
1298 #endif /* CONFIG_ARCH_EXYNOS */
1299 	bcmerror = dhdpcie_stop_host_dev(bus);
1300 	return bcmerror;
1301 }
1302 
1303 int
dhdpcie_set_master_and_d0_pwrstate(dhd_bus_t * bus)1304 dhdpcie_set_master_and_d0_pwrstate(dhd_bus_t *bus)
1305 {
1306 	int err;
1307 	pci_set_master(bus->dev);
1308 	err = pci_set_power_state(bus->dev, PCI_D0);
1309 	if (err) {
1310 		DHD_ERROR(("%s: pci_set_power_state error %d \n", __FUNCTION__, err));
1311 	}
1312 	return err;
1313 }
1314 
1315 uint32
dhdpcie_rc_config_read(dhd_bus_t * bus,uint offset)1316 dhdpcie_rc_config_read(dhd_bus_t *bus, uint offset)
1317 {
1318 	uint val = -1; /* Initialise to 0xfffffff */
1319 	if (bus->rc_dev) {
1320 		pci_read_config_dword(bus->rc_dev, offset, &val);
1321 		OSL_DELAY(100);
1322 	} else {
1323 		DHD_ERROR(("%s: RC %x:%x handle is NULL\n",
1324 			__FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID));
1325 	}
1326 	DHD_ERROR(("%s: RC %x:%x offset 0x%x val 0x%x\n",
1327 		__FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, offset, val));
1328 	return (val);
1329 }
1330 
1331 /*
1332  * Reads/ Writes the value of capability register
1333  * from the given CAP_ID section of PCI Root Port
1334  *
1335  * Arguements
1336  * @bus current dhd_bus_t pointer
1337  * @cap Capability or Extended Capability ID to get
1338  * @offset offset of Register to Read
1339  * @is_ext TRUE if @cap is given for Extended Capability
1340  * @is_write is set to TRUE to indicate write
1341  * @val value to write
1342  *
1343  * Return Value
1344  * Returns 0xffffffff on error
1345  * on write success returns BCME_OK (0)
1346  * on Read Success returns the value of register requested
1347  * Note: caller shoud ensure valid capability ID and Ext. Capability ID.
1348  */
1349 
1350 uint32
dhdpcie_access_cap(struct pci_dev * pdev,int cap,uint offset,bool is_ext,bool is_write,uint32 writeval)1351 dhdpcie_access_cap(struct pci_dev *pdev, int cap, uint offset, bool is_ext, bool is_write,
1352 	uint32 writeval)
1353 {
1354 	int cap_ptr = 0;
1355 	uint32 ret = -1;
1356 	uint32 readval;
1357 
1358 	if (!(pdev)) {
1359 		DHD_ERROR(("%s: pdev is NULL\n", __FUNCTION__));
1360 		return ret;
1361 	}
1362 
1363 	/* Find Capability offset */
1364 	if (is_ext) {
1365 		/* removing max EXT_CAP_ID check as
1366 		 * linux kernel definition's max value is not upadted yet as per spec
1367 		 */
1368 		cap_ptr = pci_find_ext_capability(pdev, cap);
1369 
1370 	} else {
1371 		/* removing max PCI_CAP_ID_MAX check as
1372 		 * pervious kernel versions dont have this definition
1373 		 */
1374 		cap_ptr = pci_find_capability(pdev, cap);
1375 	}
1376 
1377 	/* Return if capability with given ID not found */
1378 	if (cap_ptr == 0) {
1379 		DHD_ERROR(("%s: PCI Cap(0x%02x) not supported.\n",
1380 			__FUNCTION__, cap));
1381 		return BCME_ERROR;
1382 	}
1383 
1384 	if (is_write) {
1385 		pci_write_config_dword(pdev, (cap_ptr + offset), writeval);
1386 		ret = BCME_OK;
1387 
1388 	} else {
1389 
1390 		pci_read_config_dword(pdev, (cap_ptr + offset), &readval);
1391 		ret = readval;
1392 	}
1393 
1394 	return ret;
1395 }
1396 
1397 uint32
dhdpcie_rc_access_cap(dhd_bus_t * bus,int cap,uint offset,bool is_ext,bool is_write,uint32 writeval)1398 dhdpcie_rc_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext, bool is_write,
1399 	uint32 writeval)
1400 {
1401 	if (!(bus->rc_dev)) {
1402 		DHD_ERROR(("%s: RC %x:%x handle is NULL\n",
1403 			__FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID));
1404 		return BCME_ERROR;
1405 	}
1406 
1407 	return dhdpcie_access_cap(bus->rc_dev, cap, offset, is_ext, is_write, writeval);
1408 }
1409 
1410 uint32
dhdpcie_ep_access_cap(dhd_bus_t * bus,int cap,uint offset,bool is_ext,bool is_write,uint32 writeval)1411 dhdpcie_ep_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext, bool is_write,
1412 	uint32 writeval)
1413 {
1414 	if (!(bus->dev)) {
1415 		DHD_ERROR(("%s: EP handle is NULL\n", __FUNCTION__));
1416 		return BCME_ERROR;
1417 	}
1418 
1419 	return dhdpcie_access_cap(bus->dev, cap, offset, is_ext, is_write, writeval);
1420 }
1421 
1422 /* API wrapper to read Root Port link capability
1423  * Returns 2 = GEN2 1 = GEN1 BCME_ERR on linkcap not found
1424  */
1425 
dhd_debug_get_rc_linkcap(dhd_bus_t * bus)1426 uint32 dhd_debug_get_rc_linkcap(dhd_bus_t *bus)
1427 {
1428 	uint32 linkcap = -1;
1429 	linkcap = dhdpcie_rc_access_cap(bus, PCIE_CAP_ID_EXP,
1430 			PCIE_CAP_LINKCAP_OFFSET, FALSE, FALSE, 0);
1431 	linkcap &= PCIE_CAP_LINKCAP_LNKSPEED_MASK;
1432 	return linkcap;
1433 }
1434 
dhdpcie_config_save_restore_coherent(dhd_bus_t * bus,bool state)1435 static void dhdpcie_config_save_restore_coherent(dhd_bus_t *bus, bool state)
1436 {
1437 	if (bus->coreid == ARMCA7_CORE_ID) {
1438 		if (state) {
1439 			/* Sleep */
1440 			bus->coherent_state = dhdpcie_bus_cfg_read_dword(bus,
1441 				PCIE_CFG_SUBSYSTEM_CONTROL, 4) & PCIE_BARCOHERENTACCEN_MASK;
1442 		} else {
1443 			uint32 val = (dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL,
1444 				4) & ~PCIE_BARCOHERENTACCEN_MASK) | bus->coherent_state;
1445 			dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4, val);
1446 		}
1447 	}
1448 }
1449 
dhdpcie_pci_suspend_resume(dhd_bus_t * bus,bool state)1450 int dhdpcie_pci_suspend_resume(dhd_bus_t *bus, bool state)
1451 {
1452 	int rc;
1453 
1454 	struct pci_dev *dev = bus->dev;
1455 
1456 	if (state) {
1457 		dhdpcie_config_save_restore_coherent(bus, state);
1458 #if !defined(BCMPCIE_OOB_HOST_WAKE) && !defined(PCIE_OOB)
1459 		dhdpcie_pme_active(bus->osh, state);
1460 #endif /* !BCMPCIE_OOB_HOST_WAKE && !PCIE_OOB */
1461 		rc = dhdpcie_suspend_dev(dev);
1462 		if (!rc) {
1463 			dhdpcie_suspend_host_dev(bus);
1464 		}
1465 	} else {
1466 		rc = dhdpcie_resume_host_dev(bus);
1467 		if (!rc) {
1468 			rc = dhdpcie_resume_dev(dev);
1469 			if (PCIECTO_ENAB(bus)) {
1470 				/* reinit CTO configuration
1471 				 * because cfg space got reset at D3 (PERST)
1472 				 */
1473 				dhdpcie_cto_cfg_init(bus, TRUE);
1474 			}
1475 			if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) {
1476 				dhdpcie_ssreset_dis_enum_rst(bus);
1477 			}
1478 #if !defined(BCMPCIE_OOB_HOST_WAKE) && !defined(PCIE_OOB)
1479 			dhdpcie_pme_active(bus->osh, state);
1480 #endif /* !BCMPCIE_OOB_HOST_WAKE && !PCIE_OOB */
1481 		}
1482 		dhdpcie_config_save_restore_coherent(bus, state);
1483 #if defined(OEM_ANDROID)
1484 #if defined(DHD_HANG_SEND_UP_TEST)
1485 		if (bus->is_linkdown ||
1486 			bus->dhd->req_hang_type == HANG_REASON_PCIE_RC_LINK_UP_FAIL)
1487 #else /* DHD_HANG_SEND_UP_TEST */
1488 		if (bus->is_linkdown)
1489 #endif /* DHD_HANG_SEND_UP_TEST */
1490 		{
1491 			bus->dhd->hang_reason = HANG_REASON_PCIE_RC_LINK_UP_FAIL;
1492 			dhd_os_send_hang_message(bus->dhd);
1493 		}
1494 #endif /* OEM_ANDROID */
1495 	}
1496 	return rc;
1497 }
1498 
dhdpcie_device_scan(struct device * dev,void * data)1499 static int dhdpcie_device_scan(struct device *dev, void *data)
1500 {
1501 	struct pci_dev *pcidev;
1502 	int *cnt = data;
1503 
1504 	GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
1505 	pcidev = container_of(dev, struct pci_dev, dev);
1506 	GCC_DIAGNOSTIC_POP();
1507 
1508 	if (pcidev->vendor != 0x14e4)
1509 		return 0;
1510 
1511 	DHD_INFO(("Found Broadcom PCI device 0x%04x\n", pcidev->device));
1512 	*cnt += 1;
1513 	if (pcidev->driver && strcmp(pcidev->driver->name, dhdpcie_driver.name))
1514 		DHD_ERROR(("Broadcom PCI Device 0x%04x has allocated with driver %s\n",
1515 			pcidev->device, pcidev->driver->name));
1516 
1517 	return 0;
1518 }
1519 
1520 int
dhdpcie_bus_register(void)1521 dhdpcie_bus_register(void)
1522 {
1523 	int error = 0;
1524 
1525 	if (!(error = pci_register_driver(&dhdpcie_driver))) {
1526 		bus_for_each_dev(dhdpcie_driver.driver.bus, NULL, &error, dhdpcie_device_scan);
1527 		if (!error) {
1528 			DHD_ERROR(("No Broadcom PCI device enumerated!\n"));
1529 		} else if (!dhdpcie_init_succeeded) {
1530 			DHD_ERROR(("%s: dhdpcie initialize failed.\n", __FUNCTION__));
1531 		} else {
1532 			return 0;
1533 		}
1534 
1535 		pci_unregister_driver(&dhdpcie_driver);
1536 		error = BCME_ERROR;
1537 	}
1538 
1539 	return error;
1540 }
1541 
1542 void
dhdpcie_bus_unregister(void)1543 dhdpcie_bus_unregister(void)
1544 {
1545 	pci_unregister_driver(&dhdpcie_driver);
1546 }
1547 
1548 int __devinit
dhdpcie_pci_probe(struct pci_dev * pdev,const struct pci_device_id * ent)1549 dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1550 {
1551 	int err = 0;
1552 	DHD_MUTEX_LOCK();
1553 
1554 	if (dhdpcie_chipmatch (pdev->vendor, pdev->device)) {
1555 		DHD_ERROR(("%s: chipmatch failed!!\n", __FUNCTION__));
1556 		err = -ENODEV;
1557 		goto exit;
1558 	}
1559 
1560 	printf("PCI_PROBE:  bus %X, slot %X,vendor %X, device %X"
1561 		"(good PCI location)\n", pdev->bus->number,
1562 		PCI_SLOT(pdev->devfn), pdev->vendor, pdev->device);
1563 
1564 	if (dhdpcie_init_succeeded == TRUE) {
1565 		DHD_ERROR(("%s(): === Driver Already attached to a BRCM device === \r\n",
1566 			__FUNCTION__));
1567 		err = -ENODEV;
1568 		goto exit;
1569 	}
1570 
1571 	if (dhdpcie_init (pdev)) {
1572 		DHD_ERROR(("%s: PCIe Enumeration failed\n", __FUNCTION__));
1573 		err = -ENODEV;
1574 		goto exit;
1575 	}
1576 
1577 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1578 	/*
1579 	Since MSM PCIe RC dev usage conunt already incremented +2 even
1580 	before dhdpcie_pci_probe() called, then we inevitably to call
1581 	pm_runtime_put_noidle() two times to make the count start with zero.
1582 	*/
1583 
1584 	pm_runtime_put_noidle(&pdev->dev);
1585 	pm_runtime_put_noidle(&pdev->dev);
1586 	pm_runtime_set_suspended(&pdev->dev);
1587 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1588 
1589 #ifdef BCMPCIE_DISABLE_ASYNC_SUSPEND
1590 	/* disable async suspend */
1591 	device_disable_async_suspend(&pdev->dev);
1592 #endif /* BCMPCIE_DISABLE_ASYNC_SUSPEND */
1593 
1594 	DHD_TRACE(("%s: PCIe Enumeration done!!\n", __FUNCTION__));
1595 exit:
1596 	DHD_MUTEX_UNLOCK();
1597 	return err;
1598 }
1599 
1600 int
dhdpcie_detach(dhdpcie_info_t * pch)1601 dhdpcie_detach(dhdpcie_info_t *pch)
1602 {
1603 	if (pch) {
1604 #if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1605 		if (!dhd_download_fw_on_driverload) {
1606 			pci_load_and_free_saved_state(pch->dev, &pch->default_state);
1607 		}
1608 #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1609 		MFREE(pch->osh, pch, sizeof(dhdpcie_info_t));
1610 	}
1611 	return 0;
1612 }
1613 
1614 void __devexit
dhdpcie_pci_remove(struct pci_dev * pdev)1615 dhdpcie_pci_remove(struct pci_dev *pdev)
1616 {
1617 	osl_t *osh = NULL;
1618 	dhdpcie_info_t *pch = NULL;
1619 	dhd_bus_t *bus = NULL;
1620 
1621 	DHD_TRACE(("%s Enter\n", __FUNCTION__));
1622 
1623 	DHD_MUTEX_LOCK();
1624 
1625 	pch = pci_get_drvdata(pdev);
1626 	bus = pch->bus;
1627 	osh = pch->osh;
1628 
1629 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1630 	pm_runtime_get_noresume(&pdev->dev);
1631 	pm_runtime_get_noresume(&pdev->dev);
1632 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1633 
1634 	if (bus) {
1635 #ifdef SUPPORT_LINKDOWN_RECOVERY
1636 #ifdef CONFIG_ARCH_MSM
1637 		msm_pcie_deregister_event(&bus->pcie_event);
1638 #endif /* CONFIG_ARCH_MSM */
1639 #ifdef CONFIG_ARCH_EXYNOS
1640 		exynos_pcie_deregister_event(&bus->pcie_event);
1641 #endif /* CONFIG_ARCH_EXYNOS */
1642 #endif /* SUPPORT_LINKDOWN_RECOVERY */
1643 
1644 		bus->rc_dev = NULL;
1645 
1646 		dhdpcie_bus_release(bus);
1647 	}
1648 
1649 	/*
1650 	 * For module type driver,
1651 	 * it needs to back up configuration space before rmmod
1652 	 * Since original backed up configuration space won't be restored if state_saved = false
1653 	 * This back up the configuration space again & state_saved = true
1654 	 */
1655 	pci_save_state(pdev);
1656 
1657 	if (pci_is_enabled(pdev))
1658 		pci_disable_device(pdev);
1659 #ifdef BCMPCIE_OOB_HOST_WAKE
1660 	/* pcie os info detach */
1661 	MFREE(osh, pch->os_cxt, sizeof(dhdpcie_os_info_t));
1662 #endif /* BCMPCIE_OOB_HOST_WAKE */
1663 #ifdef USE_SMMU_ARCH_MSM
1664 	/* smmu info detach */
1665 	dhdpcie_smmu_remove(pdev, pch->smmu_cxt);
1666 	MFREE(osh, pch->smmu_cxt, sizeof(dhdpcie_smmu_info_t));
1667 #endif /* USE_SMMU_ARCH_MSM */
1668 	/* pcie info detach */
1669 	dhdpcie_detach(pch);
1670 	/* osl detach */
1671 	osl_detach(osh);
1672 
1673 #if defined(BCMPCIE_OOB_HOST_WAKE) && defined(CUSTOMER_HW2) && \
1674 	defined(CONFIG_ARCH_APQ8084)
1675 	brcm_pcie_wake.wake_irq = NULL;
1676 	brcm_pcie_wake.data = NULL;
1677 #endif /* BCMPCIE_OOB_HOST_WAKE && CUSTOMR_HW2 && CONFIG_ARCH_APQ8084 */
1678 
1679 	dhdpcie_init_succeeded = FALSE;
1680 
1681 	DHD_MUTEX_UNLOCK();
1682 
1683 	DHD_TRACE(("%s Exit\n", __FUNCTION__));
1684 
1685 	return;
1686 }
1687 
1688 /* Enable Linux Msi */
1689 int
dhdpcie_enable_msi(struct pci_dev * pdev,unsigned int min_vecs,unsigned int max_vecs)1690 dhdpcie_enable_msi(struct pci_dev *pdev, unsigned int min_vecs, unsigned int max_vecs)
1691 {
1692 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
1693 	return pci_alloc_irq_vectors(pdev, min_vecs, max_vecs, PCI_IRQ_MSI);
1694 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
1695 	return pci_enable_msi_range(pdev, min_vecs, max_vecs);
1696 #else
1697 	return pci_enable_msi_block(pdev, max_vecs);
1698 #endif
1699 }
1700 
1701 /* Disable Linux Msi */
1702 void
dhdpcie_disable_msi(struct pci_dev * pdev)1703 dhdpcie_disable_msi(struct pci_dev *pdev)
1704 {
1705 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
1706 	pci_free_irq_vectors(pdev);
1707 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0))
1708 	pci_disable_msi(pdev);
1709 #else
1710 	pci_disable_msi(pdev);
1711 #endif
1712 	return;
1713 }
1714 
1715 /* Request Linux irq */
1716 int
dhdpcie_request_irq(dhdpcie_info_t * dhdpcie_info)1717 dhdpcie_request_irq(dhdpcie_info_t *dhdpcie_info)
1718 {
1719 	dhd_bus_t *bus = dhdpcie_info->bus;
1720 	struct pci_dev *pdev = dhdpcie_info->bus->dev;
1721 	int host_irq_disabled;
1722 
1723 	if (!bus->irq_registered) {
1724 		snprintf(dhdpcie_info->pciname, sizeof(dhdpcie_info->pciname),
1725 			"dhdpcie:%s", pci_name(pdev));
1726 
1727 		if (bus->d2h_intr_method == PCIE_MSI) {
1728 			if (dhdpcie_enable_msi(pdev, 1, 1) < 0) {
1729 				DHD_ERROR(("%s: dhdpcie_enable_msi() failed\n", __FUNCTION__));
1730 				dhdpcie_disable_msi(pdev);
1731 				bus->d2h_intr_method = PCIE_INTX;
1732 			}
1733 		}
1734 
1735 		if (bus->d2h_intr_method == PCIE_MSI)
1736 			printf("%s: MSI enabled\n", __FUNCTION__);
1737 		else
1738 			printf("%s: INTx enabled\n", __FUNCTION__);
1739 
1740 		if (request_irq(pdev->irq, dhdpcie_isr, IRQF_SHARED,
1741 			dhdpcie_info->pciname, bus) < 0) {
1742 			DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__));
1743 			if (bus->d2h_intr_method == PCIE_MSI) {
1744 				dhdpcie_disable_msi(pdev);
1745 			}
1746 			return -1;
1747 		}
1748 		else {
1749 			bus->irq_registered = TRUE;
1750 		}
1751 	} else {
1752 		DHD_ERROR(("%s: PCI IRQ is already registered\n", __FUNCTION__));
1753 	}
1754 
1755 	host_irq_disabled = dhdpcie_irq_disabled(bus);
1756 	if (host_irq_disabled) {
1757 		DHD_ERROR(("%s: PCIe IRQ was disabled(%d), so, enabled it again\n",
1758 			__FUNCTION__, host_irq_disabled));
1759 		dhdpcie_enable_irq(bus);
1760 	}
1761 
1762 	DHD_TRACE(("%s %s\n", __FUNCTION__, dhdpcie_info->pciname));
1763 
1764 	return 0; /* SUCCESS */
1765 }
1766 
1767 /**
1768  *	dhdpcie_get_pcieirq - return pcie irq number to linux-dhd
1769  */
1770 int
dhdpcie_get_pcieirq(struct dhd_bus * bus,unsigned int * irq)1771 dhdpcie_get_pcieirq(struct dhd_bus *bus, unsigned int *irq)
1772 {
1773 	struct pci_dev *pdev = bus->dev;
1774 
1775 	if (!pdev) {
1776 		DHD_ERROR(("%s : bus->dev is NULL\n", __FUNCTION__));
1777 		return -ENODEV;
1778 	}
1779 
1780 	*irq  = pdev->irq;
1781 
1782 	return 0; /* SUCCESS */
1783 }
1784 
1785 #ifdef CONFIG_PHYS_ADDR_T_64BIT
1786 #define PRINTF_RESOURCE	"0x%016llx"
1787 #else
1788 #define PRINTF_RESOURCE	"0x%08x"
1789 #endif
1790 
1791 #ifdef EXYNOS_PCIE_MODULE_PATCH
1792 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1793 extern struct pci_saved_state *bcm_pcie_default_state;
1794 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1795 #endif /* EXYNOS_MODULE_PATCH */
1796 
1797 /*
1798 
1799 Name:  osl_pci_get_resource
1800 
1801 Parametrs:
1802 
1803 1: struct pci_dev *pdev   -- pci device structure
1804 2: pci_res                       -- structure containing pci configuration space values
1805 
1806 Return value:
1807 
1808 int   - Status (TRUE or FALSE)
1809 
1810 Description:
1811 Access PCI configuration space, retrieve  PCI allocated resources , updates in resource structure.
1812 
1813  */
dhdpcie_get_resource(dhdpcie_info_t * dhdpcie_info)1814 int dhdpcie_get_resource(dhdpcie_info_t *dhdpcie_info)
1815 {
1816 	phys_addr_t  bar0_addr, bar1_addr;
1817 	ulong bar1_size;
1818 	struct pci_dev *pdev = NULL;
1819 	pdev = dhdpcie_info->dev;
1820 #ifdef EXYNOS_PCIE_MODULE_PATCH
1821 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1822 	if (bcm_pcie_default_state) {
1823 		pci_load_saved_state(pdev, bcm_pcie_default_state);
1824 		pci_restore_state(pdev);
1825 	}
1826 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1827 #endif /* EXYNOS_MODULE_PATCH */
1828 
1829 	/*
1830 	 * For built-in type driver,
1831 	 * it can't restore configuration backup because of state_saved = false at first load time
1832 	 * For module type driver,
1833 	 * it couldn't remap the BAR0/BAR1 address
1834 	 * without restoring configuration backup at second load,
1835 	 * and remains configuration backup in pci_dev, DHD didn't remove it from the bus
1836 	 * pci_restore_state() restores proper BAR0/BAR1 address
1837 	 */
1838 	pci_restore_state(pdev);
1839 
1840 	do {
1841 		if (pci_enable_device(pdev)) {
1842 			printf("%s: Cannot enable PCI device\n", __FUNCTION__);
1843 			break;
1844 		}
1845 		pci_set_master(pdev);
1846 		bar0_addr = pci_resource_start(pdev, 0);	/* Bar-0 mapped address */
1847 		bar1_addr = pci_resource_start(pdev, 2);	/* Bar-1 mapped address */
1848 
1849 		/* read Bar-1 mapped memory range */
1850 		bar1_size = pci_resource_len(pdev, 2);
1851 
1852 		if ((bar1_size == 0) || (bar1_addr == 0)) {
1853 			printf("%s: BAR1 Not enabled for this device  size(%ld),"
1854 				" addr(0x"PRINTF_RESOURCE")\n",
1855 				__FUNCTION__, bar1_size, bar1_addr);
1856 			goto err;
1857 		}
1858 
1859 		dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE);
1860 		dhdpcie_info->bar1_size =
1861 			(bar1_size > DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE;
1862 		dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->bar1_size);
1863 
1864 		if (!dhdpcie_info->regs || !dhdpcie_info->tcm) {
1865 			DHD_ERROR(("%s:ioremap() failed\n", __FUNCTION__));
1866 			break;
1867 		}
1868 #ifdef EXYNOS_PCIE_MODULE_PATCH
1869 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1870 		if (bcm_pcie_default_state == NULL) {
1871 			pci_save_state(pdev);
1872 			bcm_pcie_default_state = pci_store_saved_state(pdev);
1873 		}
1874 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1875 #endif /* EXYNOS_MODULE_PATCH */
1876 
1877 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1878 	/* Backup PCIe configuration so as to use Wi-Fi on/off process
1879 	 * in case of built in driver
1880 	 */
1881 	pci_save_state(pdev);
1882 	dhdpcie_info->default_state = pci_store_saved_state(pdev);
1883 
1884 	if (dhdpcie_info->default_state == NULL) {
1885 		DHD_ERROR(("%s pci_store_saved_state returns NULL\n",
1886 			__FUNCTION__));
1887 		REG_UNMAP(dhdpcie_info->regs);
1888 		REG_UNMAP(dhdpcie_info->tcm);
1889 		pci_disable_device(pdev);
1890 		break;
1891 	}
1892 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1893 
1894 		DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n",
1895 			__FUNCTION__, dhdpcie_info->regs, bar0_addr));
1896 		DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE" \n",
1897 			__FUNCTION__, dhdpcie_info->tcm, bar1_addr));
1898 
1899 		return 0; /* SUCCESS  */
1900 	} while (0);
1901 err:
1902 	return -1;  /* FAILURE */
1903 }
1904 
dhdpcie_scan_resource(dhdpcie_info_t * dhdpcie_info)1905 int dhdpcie_scan_resource(dhdpcie_info_t *dhdpcie_info)
1906 {
1907 
1908 	DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
1909 
1910 	do {
1911 		/* define it here only!! */
1912 		if (dhdpcie_get_resource (dhdpcie_info)) {
1913 			DHD_ERROR(("%s: Failed to get PCI resources\n", __FUNCTION__));
1914 			break;
1915 		}
1916 		DHD_TRACE(("%s:Exit - SUCCESS \n",
1917 			__FUNCTION__));
1918 
1919 		return 0; /* SUCCESS */
1920 
1921 	} while (0);
1922 
1923 	DHD_TRACE(("%s:Exit - FAILURE \n", __FUNCTION__));
1924 
1925 	return -1; /* FAILURE */
1926 
1927 }
1928 
dhdpcie_dump_resource(dhd_bus_t * bus)1929 void dhdpcie_dump_resource(dhd_bus_t *bus)
1930 {
1931 	dhdpcie_info_t *pch;
1932 
1933 	if (bus == NULL) {
1934 		DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
1935 		return;
1936 	}
1937 
1938 	if (bus->dev == NULL) {
1939 		DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
1940 		return;
1941 	}
1942 
1943 	pch = pci_get_drvdata(bus->dev);
1944 	if (pch == NULL) {
1945 		DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
1946 		return;
1947 	}
1948 
1949 	/* BAR0 */
1950 	DHD_ERROR(("%s: BAR0(VA): 0x%pK, BAR0(PA): "PRINTF_RESOURCE", SIZE: %d\n",
1951 		__FUNCTION__, pch->regs, pci_resource_start(bus->dev, 0),
1952 		DONGLE_REG_MAP_SIZE));
1953 
1954 	/* BAR1 */
1955 	DHD_ERROR(("%s: BAR1(VA): 0x%pK, BAR1(PA): "PRINTF_RESOURCE", SIZE: %d\n",
1956 		__FUNCTION__, pch->tcm, pci_resource_start(bus->dev, 2),
1957 		pch->bar1_size));
1958 }
1959 
1960 #ifdef SUPPORT_LINKDOWN_RECOVERY
1961 #if defined(CONFIG_ARCH_MSM) || defined(CONFIG_ARCH_EXYNOS)
dhdpcie_linkdown_cb(struct_pcie_notify * noti)1962 void dhdpcie_linkdown_cb(struct_pcie_notify *noti)
1963 {
1964 	struct pci_dev *pdev = (struct pci_dev *)noti->user;
1965 	dhdpcie_info_t *pch = NULL;
1966 
1967 	if (pdev) {
1968 		pch = pci_get_drvdata(pdev);
1969 		if (pch) {
1970 			dhd_bus_t *bus = pch->bus;
1971 			if (bus) {
1972 				dhd_pub_t *dhd = bus->dhd;
1973 				if (dhd) {
1974 #ifdef CONFIG_ARCH_MSM
1975 					DHD_ERROR(("%s: Set no_cfg_restore flag\n",
1976 						__FUNCTION__));
1977 					bus->no_cfg_restore = 1;
1978 #endif /* CONFIG_ARCH_MSM */
1979 #ifdef DHD_SSSR_DUMP
1980 					if (dhd->fis_triggered) {
1981 						DHD_ERROR(("%s: PCIe linkdown due to FIS, Ignore\n",
1982 							__FUNCTION__));
1983 					} else
1984 #endif /* DHD_SSSR_DUMP */
1985 					{
1986 						DHD_ERROR(("%s: Event HANG send up "
1987 							"due to PCIe linkdown\n",
1988 							__FUNCTION__));
1989 						bus->is_linkdown = 1;
1990 						dhd->hang_reason =
1991 							HANG_REASON_PCIE_LINK_DOWN_RC_DETECT;
1992 						dhd_os_send_hang_message(dhd);
1993 					}
1994 				}
1995 			}
1996 		}
1997 	}
1998 
1999 }
2000 #endif /* CONFIG_ARCH_MSM || CONFIG_ARCH_EXYNOS */
2001 #endif /* SUPPORT_LINKDOWN_RECOVERY */
2002 
dhdpcie_init(struct pci_dev * pdev)2003 int dhdpcie_init(struct pci_dev *pdev)
2004 {
2005 
2006 	osl_t 				*osh = NULL;
2007 	dhd_bus_t 			*bus = NULL;
2008 	dhdpcie_info_t		*dhdpcie_info =  NULL;
2009 	wifi_adapter_info_t	*adapter = NULL;
2010 #ifdef BCMPCIE_OOB_HOST_WAKE
2011 	dhdpcie_os_info_t	*dhdpcie_osinfo = NULL;
2012 #endif /* BCMPCIE_OOB_HOST_WAKE */
2013 #ifdef USE_SMMU_ARCH_MSM
2014 	dhdpcie_smmu_info_t	*dhdpcie_smmu_info = NULL;
2015 #endif /* USE_SMMU_ARCH_MSM */
2016 	int ret = 0;
2017 
2018 	do {
2019 		/* osl attach */
2020 		if (!(osh = osl_attach(pdev, PCI_BUS, FALSE))) {
2021 			DHD_ERROR(("%s: osl_attach failed\n", __FUNCTION__));
2022 			break;
2023 		}
2024 
2025 		/* initialize static buffer */
2026 		adapter = dhd_wifi_platform_get_adapter(PCI_BUS, pdev->bus->number,
2027 			PCI_SLOT(pdev->devfn));
2028 		if (adapter != NULL) {
2029 			DHD_ERROR(("%s: found adapter info '%s'\n", __FUNCTION__, adapter->name));
2030 			adapter->bus_type = PCI_BUS;
2031 			adapter->bus_num = pdev->bus->number;
2032 			adapter->slot_num = PCI_SLOT(pdev->devfn);
2033 			adapter->pci_dev = pdev;
2034 		} else
2035 			DHD_ERROR(("%s: can't find adapter info for this chip\n", __FUNCTION__));
2036 		osl_static_mem_init(osh, adapter);
2037 
2038 		/*  allocate linux spcific pcie structure here */
2039 		if (!(dhdpcie_info = MALLOC(osh, sizeof(dhdpcie_info_t)))) {
2040 			DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
2041 			break;
2042 		}
2043 		bzero(dhdpcie_info, sizeof(dhdpcie_info_t));
2044 		dhdpcie_info->osh = osh;
2045 		dhdpcie_info->dev = pdev;
2046 
2047 #ifdef BCMPCIE_OOB_HOST_WAKE
2048 		/* allocate OS speicific structure */
2049 		dhdpcie_osinfo = MALLOC(osh, sizeof(dhdpcie_os_info_t));
2050 		if (dhdpcie_osinfo == NULL) {
2051 			DHD_ERROR(("%s: MALLOC of dhdpcie_os_info_t failed\n",
2052 				__FUNCTION__));
2053 			break;
2054 		}
2055 		bzero(dhdpcie_osinfo, sizeof(dhdpcie_os_info_t));
2056 		dhdpcie_info->os_cxt = (void *)dhdpcie_osinfo;
2057 
2058 		/* Initialize host wake IRQ */
2059 		spin_lock_init(&dhdpcie_osinfo->oob_irq_spinlock);
2060 		/* Get customer specific host wake IRQ parametres: IRQ number as IRQ type */
2061 		dhdpcie_osinfo->oob_irq_num = wifi_platform_get_irq_number(adapter,
2062 			&dhdpcie_osinfo->oob_irq_flags);
2063 		if (dhdpcie_osinfo->oob_irq_num < 0) {
2064 			DHD_ERROR(("%s: Host OOB irq is not defined\n", __FUNCTION__));
2065 		}
2066 #endif /* BCMPCIE_OOB_HOST_WAKE */
2067 
2068 #ifdef USE_SMMU_ARCH_MSM
2069 		/* allocate private structure for using SMMU */
2070 		dhdpcie_smmu_info = MALLOC(osh, sizeof(dhdpcie_smmu_info_t));
2071 		if (dhdpcie_smmu_info == NULL) {
2072 			DHD_ERROR(("%s: MALLOC of dhdpcie_smmu_info_t failed\n",
2073 				__FUNCTION__));
2074 			break;
2075 		}
2076 		bzero(dhdpcie_smmu_info, sizeof(dhdpcie_smmu_info_t));
2077 		dhdpcie_info->smmu_cxt = (void *)dhdpcie_smmu_info;
2078 
2079 		/* Initialize smmu structure */
2080 		if (dhdpcie_smmu_init(pdev, dhdpcie_info->smmu_cxt) < 0) {
2081 			DHD_ERROR(("%s: Failed to initialize SMMU\n",
2082 				__FUNCTION__));
2083 			break;
2084 		}
2085 #endif /* USE_SMMU_ARCH_MSM */
2086 
2087 #ifdef DHD_WAKE_STATUS
2088 		/* Initialize pkt_wake_lock */
2089 		spin_lock_init(&dhdpcie_info->pkt_wake_lock);
2090 #endif /* DHD_WAKE_STATUS */
2091 
2092 		/* Find the PCI resources, verify the  */
2093 		/* vendor and device ID, map BAR regions and irq,  update in structures */
2094 		if (dhdpcie_scan_resource(dhdpcie_info)) {
2095 			DHD_ERROR(("%s: dhd_Scan_PCI_Res failed\n", __FUNCTION__));
2096 
2097 			break;
2098 		}
2099 
2100 		/* Bus initialization */
2101 		ret = dhdpcie_bus_attach(osh, &bus, dhdpcie_info->regs, dhdpcie_info->tcm, pdev, adapter);
2102 		if (ret != BCME_OK) {
2103 			DHD_ERROR(("%s:dhdpcie_bus_attach() failed\n", __FUNCTION__));
2104 			break;
2105 		}
2106 
2107 		dhdpcie_info->bus = bus;
2108 		bus->bar1_size = dhdpcie_info->bar1_size;
2109 		bus->is_linkdown = 0;
2110 		bus->no_bus_init = FALSE;
2111 		bus->cto_triggered = 0;
2112 
2113 		bus->rc_dev = NULL;
2114 
2115 		/* Get RC Device Handle */
2116 		if (bus->dev->bus) {
2117 			/* self member of structure pci_bus is bridge device as seen by parent */
2118 			bus->rc_dev = bus->dev->bus->self;
2119 			if (bus->rc_dev)
2120 				DHD_ERROR(("%s: rc_dev from dev->bus->self (%x:%x) is %pK\n", __FUNCTION__,
2121 					bus->rc_dev->vendor, bus->rc_dev->device, bus->rc_dev));
2122 			else
2123 				DHD_ERROR(("%s: bus->dev->bus->self is NULL\n", __FUNCTION__));
2124 		} else {
2125 			DHD_ERROR(("%s: unable to get rc_dev as dev->bus is NULL\n", __FUNCTION__));
2126 		}
2127 
2128 		/* if rc_dev is still NULL, try to get from vendor/device IDs */
2129 		if (bus->rc_dev == NULL) {
2130 			bus->rc_dev = pci_get_device(PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, NULL);
2131 			DHD_ERROR(("%s: rc_dev from pci_get_device (%x:%x) is %p\n", __FUNCTION__,
2132 				PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, bus->rc_dev));
2133 		}
2134 
2135 		bus->rc_ep_aspm_cap = dhd_bus_is_rc_ep_aspm_capable(bus);
2136 		bus->rc_ep_l1ss_cap = dhd_bus_is_rc_ep_l1ss_capable(bus);
2137 		DHD_ERROR(("%s: rc_ep_aspm_cap: %d rc_ep_l1ss_cap: %d\n",
2138 			__FUNCTION__, bus->rc_ep_aspm_cap, bus->rc_ep_l1ss_cap));
2139 
2140 #ifdef FORCE_TPOWERON
2141 		if (dhdpcie_chip_req_forced_tpoweron(bus)) {
2142 			dhd_bus_set_tpoweron(bus, tpoweron_scale);
2143 		}
2144 #endif /* FORCE_TPOWERON */
2145 
2146 #if defined(BCMPCIE_OOB_HOST_WAKE) && defined(CUSTOMER_HW2) && \
2147 		defined(CONFIG_ARCH_APQ8084)
2148 		brcm_pcie_wake.wake_irq = wlan_oob_irq;
2149 		brcm_pcie_wake.data = bus;
2150 #endif /* BCMPCIE_OOB_HOST_WAKE && CUSTOMR_HW2 && CONFIG_ARCH_APQ8084 */
2151 
2152 #ifdef DONGLE_ENABLE_ISOLATION
2153 		bus->dhd->dongle_isolation = TRUE;
2154 #endif /* DONGLE_ENABLE_ISOLATION */
2155 #ifdef SUPPORT_LINKDOWN_RECOVERY
2156 #ifdef CONFIG_ARCH_MSM
2157 		bus->pcie_event.events = MSM_PCIE_EVENT_LINKDOWN;
2158 		bus->pcie_event.user = pdev;
2159 		bus->pcie_event.mode = MSM_PCIE_TRIGGER_CALLBACK;
2160 		bus->pcie_event.callback = dhdpcie_linkdown_cb;
2161 		bus->pcie_event.options = MSM_PCIE_CONFIG_NO_RECOVERY;
2162 		msm_pcie_register_event(&bus->pcie_event);
2163 		bus->no_cfg_restore = FALSE;
2164 #endif /* CONFIG_ARCH_MSM */
2165 #ifdef CONFIG_ARCH_EXYNOS
2166 		bus->pcie_event.events = EXYNOS_PCIE_EVENT_LINKDOWN;
2167 		bus->pcie_event.user = pdev;
2168 		bus->pcie_event.mode = EXYNOS_PCIE_TRIGGER_CALLBACK;
2169 		bus->pcie_event.callback = dhdpcie_linkdown_cb;
2170 		exynos_pcie_register_event(&bus->pcie_event);
2171 #endif /* CONFIG_ARCH_EXYNOS */
2172 		bus->read_shm_fail = FALSE;
2173 #endif /* SUPPORT_LINKDOWN_RECOVERY */
2174 
2175 		if (bus->intr) {
2176 			/* Register interrupt callback, but mask it (not operational yet). */
2177 			DHD_INTR(("%s: Registering and masking interrupts\n", __FUNCTION__));
2178 			bus->intr_enabled = FALSE;
2179 			dhdpcie_bus_intr_disable(bus);
2180 
2181 			if (dhdpcie_request_irq(dhdpcie_info)) {
2182 				DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__));
2183 				break;
2184 			}
2185 		} else {
2186 			bus->pollrate = 1;
2187 			DHD_INFO(("%s: PCIe interrupt function is NOT registered "
2188 				"due to polling mode\n", __FUNCTION__));
2189 		}
2190 
2191 #if defined(BCM_REQUEST_FW)
2192 		if (dhd_bus_download_firmware(bus, osh, NULL, NULL) < 0) {
2193 		DHD_ERROR(("%s: failed to download firmware\n", __FUNCTION__));
2194 		}
2195 		bus->nv_path = NULL;
2196 		bus->fw_path = NULL;
2197 #endif /* BCM_REQUEST_FW */
2198 
2199 		/* set private data for pci_dev */
2200 		pci_set_drvdata(pdev, dhdpcie_info);
2201 
2202 		/* Ensure BAR1 switch feature enable if needed before FW download */
2203 		dhdpcie_bar1_window_switch_enab(bus);
2204 
2205 #if defined(BCMDHD_MODULAR) && defined(INSMOD_FW_LOAD)
2206 		if (1)
2207 #else
2208 		if (dhd_download_fw_on_driverload)
2209 #endif
2210 		{
2211 			if (dhd_bus_start(bus->dhd)) {
2212 				DHD_ERROR(("%s: dhd_bus_start() failed\n", __FUNCTION__));
2213 				if (!allow_delay_fwdl)
2214 					break;
2215 			}
2216 		} else {
2217 			/* Set ramdom MAC address during boot time */
2218 			get_random_bytes(&bus->dhd->mac.octet[3], 3);
2219 			/* Adding BRCM OUI */
2220 			bus->dhd->mac.octet[0] = 0;
2221 			bus->dhd->mac.octet[1] = 0x90;
2222 			bus->dhd->mac.octet[2] = 0x4C;
2223 		}
2224 
2225 		/* Attach to the OS network interface */
2226 		DHD_TRACE(("%s(): Calling dhd_register_if() \n", __FUNCTION__));
2227 		if (dhd_attach_net(bus->dhd, TRUE)) {
2228 			DHD_ERROR(("%s(): ERROR.. dhd_register_if() failed\n", __FUNCTION__));
2229 			break;
2230 		}
2231 
2232 		dhdpcie_init_succeeded = TRUE;
2233 #ifdef CONFIG_ARCH_MSM
2234 		sec_pcie_set_use_ep_loaded(bus->rc_dev);
2235 #endif /* CONFIG_ARCH_MSM */
2236 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
2237 		pm_runtime_set_autosuspend_delay(&pdev->dev, AUTO_SUSPEND_TIMEOUT);
2238 		pm_runtime_use_autosuspend(&pdev->dev);
2239 		atomic_set(&bus->dhd->block_bus, FALSE);
2240 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
2241 
2242 #if defined(MULTIPLE_SUPPLICANT)
2243 		wl_android_post_init(); // terence 20120530: fix critical section in dhd_open and dhdsdio_probe
2244 #endif /* MULTIPLE_SUPPLICANT */
2245 
2246 		DHD_TRACE(("%s:Exit - SUCCESS \n", __FUNCTION__));
2247 		return 0;  /* return  SUCCESS  */
2248 
2249 	} while (0);
2250 	/* reverse the initialization in order in case of error */
2251 
2252 	if (bus)
2253 		dhdpcie_bus_release(bus);
2254 
2255 #ifdef BCMPCIE_OOB_HOST_WAKE
2256 	if (dhdpcie_osinfo) {
2257 		MFREE(osh, dhdpcie_osinfo, sizeof(dhdpcie_os_info_t));
2258 	}
2259 #endif /* BCMPCIE_OOB_HOST_WAKE */
2260 
2261 #ifdef USE_SMMU_ARCH_MSM
2262 	if (dhdpcie_smmu_info) {
2263 		MFREE(osh, dhdpcie_smmu_info, sizeof(dhdpcie_smmu_info_t));
2264 		dhdpcie_info->smmu_cxt = NULL;
2265 	}
2266 #endif /* USE_SMMU_ARCH_MSM */
2267 
2268 	if (dhdpcie_info)
2269 		dhdpcie_detach(dhdpcie_info);
2270 	pci_disable_device(pdev);
2271 	if (osh)
2272 		osl_detach(osh);
2273 	if (adapter != NULL) {
2274 		adapter->bus_type = -1;
2275 		adapter->bus_num = -1;
2276 		adapter->slot_num = -1;
2277 	}
2278 
2279 	dhdpcie_init_succeeded = FALSE;
2280 
2281 	DHD_TRACE(("%s:Exit - FAILURE \n", __FUNCTION__));
2282 
2283 	return -1; /* return FAILURE  */
2284 }
2285 
2286 /* Free Linux irq */
2287 void
dhdpcie_free_irq(dhd_bus_t * bus)2288 dhdpcie_free_irq(dhd_bus_t *bus)
2289 {
2290 	struct pci_dev *pdev = NULL;
2291 
2292 	DHD_TRACE(("%s: freeing up the IRQ\n", __FUNCTION__));
2293 	if (bus) {
2294 		pdev = bus->dev;
2295 		if (bus->irq_registered) {
2296 #if defined(SET_PCIE_IRQ_CPU_CORE) && defined(CONFIG_ARCH_SM8150)
2297 			/* clean up the affinity_hint before
2298 			 * the unregistration of PCIe irq
2299 			 */
2300 			(void)irq_set_affinity_hint(pdev->irq, NULL);
2301 #endif /* SET_PCIE_IRQ_CPU_CORE && CONFIG_ARCH_SM8150 */
2302 			free_irq(pdev->irq, bus);
2303 			bus->irq_registered = FALSE;
2304 			if (bus->d2h_intr_method == PCIE_MSI) {
2305 				dhdpcie_disable_msi(pdev);
2306 			}
2307 		} else {
2308 			DHD_ERROR(("%s: PCIe IRQ is not registered\n", __FUNCTION__));
2309 		}
2310 	}
2311 	DHD_TRACE(("%s: Exit\n", __FUNCTION__));
2312 	return;
2313 }
2314 
2315 /*
2316 
2317 Name:  dhdpcie_isr
2318 
2319 Parametrs:
2320 
2321 1: IN int irq   -- interrupt vector
2322 2: IN void *arg      -- handle to private data structure
2323 
2324 Return value:
2325 
2326 Status (TRUE or FALSE)
2327 
2328 Description:
2329 Interrupt Service routine checks for the status register,
2330 disable interrupt and queue DPC if mail box interrupts are raised.
2331 */
2332 
2333 irqreturn_t
dhdpcie_isr(int irq,void * arg)2334 dhdpcie_isr(int irq, void *arg)
2335 {
2336 	dhd_bus_t *bus = (dhd_bus_t*)arg;
2337 	bus->isr_entry_time = OSL_LOCALTIME_NS();
2338 	if (!dhdpcie_bus_isr(bus)) {
2339 		DHD_LOG_MEM(("%s: dhdpcie_bus_isr returns with FALSE\n", __FUNCTION__));
2340 	}
2341 	bus->isr_exit_time = OSL_LOCALTIME_NS();
2342 	return IRQ_HANDLED;
2343 }
2344 
2345 int
dhdpcie_disable_irq_nosync(dhd_bus_t * bus)2346 dhdpcie_disable_irq_nosync(dhd_bus_t *bus)
2347 {
2348 	struct pci_dev *dev;
2349 	if ((bus == NULL) || (bus->dev == NULL)) {
2350 		DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__));
2351 		return BCME_ERROR;
2352 	}
2353 
2354 	dev = bus->dev;
2355 	disable_irq_nosync(dev->irq);
2356 	return BCME_OK;
2357 }
2358 
2359 int
dhdpcie_disable_irq(dhd_bus_t * bus)2360 dhdpcie_disable_irq(dhd_bus_t *bus)
2361 {
2362 	struct pci_dev *dev;
2363 	if ((bus == NULL) || (bus->dev == NULL)) {
2364 		DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__));
2365 		return BCME_ERROR;
2366 	}
2367 
2368 	dev = bus->dev;
2369 	disable_irq(dev->irq);
2370 	return BCME_OK;
2371 }
2372 
2373 int
dhdpcie_enable_irq(dhd_bus_t * bus)2374 dhdpcie_enable_irq(dhd_bus_t *bus)
2375 {
2376 	struct pci_dev *dev;
2377 	if ((bus == NULL) || (bus->dev == NULL)) {
2378 		DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__));
2379 		return BCME_ERROR;
2380 	}
2381 
2382 	dev = bus->dev;
2383 	enable_irq(dev->irq);
2384 	return BCME_OK;
2385 }
2386 
2387 int
dhdpcie_irq_disabled(dhd_bus_t * bus)2388 dhdpcie_irq_disabled(dhd_bus_t *bus)
2389 {
2390 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
2391 	struct irq_desc *desc = irq_to_desc(bus->dev->irq);
2392 	/* depth will be zero, if enabled */
2393 	return desc->depth;
2394 #else
2395 	/* return ERROR by default as there is no support for lower versions */
2396 	return BCME_ERROR;
2397 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
2398 }
2399 
2400 #if defined(CONFIG_ARCH_EXYNOS)
2401 int pcie_ch_num = EXYNOS_PCIE_CH_NUM;
2402 #endif /* CONFIG_ARCH_EXYNOS */
2403 
2404 int
dhdpcie_start_host_dev(dhd_bus_t * bus)2405 dhdpcie_start_host_dev(dhd_bus_t *bus)
2406 {
2407 	int ret = 0;
2408 #ifdef CONFIG_ARCH_MSM
2409 #ifdef SUPPORT_LINKDOWN_RECOVERY
2410 	int options = 0;
2411 #endif /* SUPPORT_LINKDOWN_RECOVERY */
2412 #endif /* CONFIG_ARCH_MSM */
2413 	DHD_TRACE(("%s Enter:\n", __FUNCTION__));
2414 
2415 	if (bus == NULL) {
2416 		return BCME_ERROR;
2417 	}
2418 
2419 	if (bus->dev == NULL) {
2420 		return BCME_ERROR;
2421 	}
2422 
2423 #ifdef CONFIG_ARCH_EXYNOS
2424 	exynos_pcie_pm_resume(pcie_ch_num);
2425 #endif /* CONFIG_ARCH_EXYNOS */
2426 #ifdef CONFIG_ARCH_MSM
2427 #ifdef SUPPORT_LINKDOWN_RECOVERY
2428 	if (bus->no_cfg_restore) {
2429 		options = MSM_PCIE_CONFIG_NO_CFG_RESTORE;
2430 	}
2431 	ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number,
2432 		bus->dev, NULL, options);
2433 	if (bus->no_cfg_restore && !ret) {
2434 		msm_pcie_recover_config(bus->dev);
2435 		bus->no_cfg_restore = 0;
2436 	}
2437 #else
2438 	ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number,
2439 		bus->dev, NULL, 0);
2440 #endif /* SUPPORT_LINKDOWN_RECOVERY */
2441 #endif /* CONFIG_ARCH_MSM */
2442 #ifdef CONFIG_ARCH_TEGRA
2443 	ret = tegra_pcie_pm_resume();
2444 #endif /* CONFIG_ARCH_TEGRA */
2445 
2446 	if (ret) {
2447 		DHD_ERROR(("%s Failed to bring up PCIe link\n", __FUNCTION__));
2448 		goto done;
2449 	}
2450 
2451 done:
2452 	DHD_TRACE(("%s Exit:\n", __FUNCTION__));
2453 	return ret;
2454 }
2455 
2456 int
dhdpcie_stop_host_dev(dhd_bus_t * bus)2457 dhdpcie_stop_host_dev(dhd_bus_t *bus)
2458 {
2459 	int ret = 0;
2460 #ifdef CONFIG_ARCH_MSM
2461 #ifdef SUPPORT_LINKDOWN_RECOVERY
2462 	int options = 0;
2463 #endif /* SUPPORT_LINKDOWN_RECOVERY */
2464 #endif /* CONFIG_ARCH_MSM */
2465 
2466 	DHD_TRACE(("%s Enter:\n", __FUNCTION__));
2467 
2468 	if (bus == NULL) {
2469 		return BCME_ERROR;
2470 	}
2471 
2472 	if (bus->dev == NULL) {
2473 		return BCME_ERROR;
2474 	}
2475 
2476 #ifdef CONFIG_ARCH_EXYNOS
2477 	exynos_pcie_pm_suspend(pcie_ch_num);
2478 #endif /* CONFIG_ARCH_EXYNOS */
2479 #ifdef CONFIG_ARCH_MSM
2480 #ifdef SUPPORT_LINKDOWN_RECOVERY
2481 	if (bus->no_cfg_restore) {
2482 		options = MSM_PCIE_CONFIG_NO_CFG_RESTORE | MSM_PCIE_CONFIG_LINKDOWN;
2483 	}
2484 
2485 	ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number,
2486 		bus->dev, NULL, options);
2487 #else
2488 	ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number,
2489 		bus->dev, NULL, 0);
2490 #endif /* SUPPORT_LINKDOWN_RECOVERY */
2491 #endif /* CONFIG_ARCH_MSM */
2492 #ifdef CONFIG_ARCH_TEGRA
2493 	ret = tegra_pcie_pm_suspend();
2494 #endif /* CONFIG_ARCH_TEGRA */
2495 	if (ret) {
2496 		DHD_ERROR(("Failed to stop PCIe link\n"));
2497 		goto done;
2498 	}
2499 done:
2500 	DHD_TRACE(("%s Exit:\n", __FUNCTION__));
2501 	return ret;
2502 }
2503 
2504 int
dhdpcie_disable_device(dhd_bus_t * bus)2505 dhdpcie_disable_device(dhd_bus_t *bus)
2506 {
2507 	DHD_TRACE(("%s Enter:\n", __FUNCTION__));
2508 
2509 	if (bus == NULL) {
2510 		return BCME_ERROR;
2511 	}
2512 
2513 	if (bus->dev == NULL) {
2514 		return BCME_ERROR;
2515 	}
2516 
2517 	if (pci_is_enabled(bus->dev))
2518 		pci_disable_device(bus->dev);
2519 
2520 	return 0;
2521 }
2522 
2523 int
dhdpcie_enable_device(dhd_bus_t * bus)2524 dhdpcie_enable_device(dhd_bus_t *bus)
2525 {
2526 	int ret = BCME_ERROR;
2527 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
2528 	dhdpcie_info_t *pch;
2529 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
2530 
2531 	DHD_TRACE(("%s Enter:\n", __FUNCTION__));
2532 
2533 	if (bus == NULL) {
2534 		return BCME_ERROR;
2535 	}
2536 
2537 	if (bus->dev == NULL) {
2538 		return BCME_ERROR;
2539 	}
2540 
2541 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
2542 	pch = pci_get_drvdata(bus->dev);
2543 	if (pch == NULL) {
2544 		return BCME_ERROR;
2545 	}
2546 
2547 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) && \
2548 	(LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)) && !defined(CONFIG_SOC_EXYNOS8890)
2549 	/* Updated with pci_load_and_free_saved_state to compatible
2550 	 * with Kernel version 3.14.0 to 3.18.41.
2551 	 */
2552 	pci_load_and_free_saved_state(bus->dev, &pch->default_state);
2553 	pch->default_state = pci_store_saved_state(bus->dev);
2554 #else
2555 	pci_load_saved_state(bus->dev, pch->default_state);
2556 #endif /* LINUX_VERSION >= 3.14.0 && LINUX_VERSION < 3.19.0 && !CONFIG_SOC_EXYNOS8890 */
2557 
2558 	/* Check if Device ID is valid */
2559 	if (bus->dev->state_saved) {
2560 		uint32 vid, saved_vid;
2561 		pci_read_config_dword(bus->dev, PCI_CFG_VID, &vid);
2562 		saved_vid = bus->dev->saved_config_space[PCI_CFG_VID];
2563 		if (vid != saved_vid) {
2564 			DHD_ERROR(("%s: VID(0x%x) is different from saved VID(0x%x) "
2565 				"Skip the bus init\n", __FUNCTION__, vid, saved_vid));
2566 			bus->no_bus_init = TRUE;
2567 			/* Check if the PCIe link is down */
2568 			if (vid == (uint32)-1) {
2569 				bus->is_linkdown = 1;
2570 #ifdef SUPPORT_LINKDOWN_RECOVERY
2571 #ifdef CONFIG_ARCH_MSM
2572 				bus->no_cfg_restore = TRUE;
2573 #endif /* CONFIG_ARCH_MSM */
2574 #endif /* SUPPORT_LINKDOWN_RECOVERY */
2575 			}
2576 			return BCME_ERROR;
2577 		}
2578 	}
2579 
2580 	pci_restore_state(bus->dev);
2581 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) */
2582 
2583 	ret = pci_enable_device(bus->dev);
2584 	if (ret) {
2585 		pci_disable_device(bus->dev);
2586 	} else {
2587 		pci_set_master(bus->dev);
2588 	}
2589 
2590 	return ret;
2591 }
2592 
2593 int
dhdpcie_alloc_resource(dhd_bus_t * bus)2594 dhdpcie_alloc_resource(dhd_bus_t *bus)
2595 {
2596 	dhdpcie_info_t *dhdpcie_info;
2597 	phys_addr_t bar0_addr, bar1_addr;
2598 	ulong bar1_size;
2599 
2600 	do {
2601 		if (bus == NULL) {
2602 			DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2603 			break;
2604 		}
2605 
2606 		if (bus->dev == NULL) {
2607 			DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2608 			break;
2609 		}
2610 
2611 		dhdpcie_info = pci_get_drvdata(bus->dev);
2612 		if (dhdpcie_info == NULL) {
2613 			DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__));
2614 			break;
2615 		}
2616 
2617 		bar0_addr = pci_resource_start(bus->dev, 0);	/* Bar-0 mapped address */
2618 		bar1_addr = pci_resource_start(bus->dev, 2);	/* Bar-1 mapped address */
2619 
2620 		/* read Bar-1 mapped memory range */
2621 		bar1_size = pci_resource_len(bus->dev, 2);
2622 
2623 		if ((bar1_size == 0) || (bar1_addr == 0)) {
2624 			printf("%s: BAR1 Not enabled for this device size(%ld),"
2625 				" addr(0x"PRINTF_RESOURCE")\n",
2626 				__FUNCTION__, bar1_size, bar1_addr);
2627 			break;
2628 		}
2629 
2630 		dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE);
2631 		if (!dhdpcie_info->regs) {
2632 			DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__));
2633 			break;
2634 		}
2635 
2636 		bus->regs = dhdpcie_info->regs;
2637 		dhdpcie_info->bar1_size =
2638 			(bar1_size > DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE;
2639 		dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->bar1_size);
2640 		if (!dhdpcie_info->tcm) {
2641 			DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__));
2642 			REG_UNMAP(dhdpcie_info->regs);
2643 			bus->regs = NULL;
2644 			break;
2645 		}
2646 
2647 		bus->tcm = dhdpcie_info->tcm;
2648 		bus->bar1_size = dhdpcie_info->bar1_size;
2649 
2650 		DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n",
2651 			__FUNCTION__, dhdpcie_info->regs, bar0_addr));
2652 		DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE" \n",
2653 			__FUNCTION__, dhdpcie_info->tcm, bar1_addr));
2654 
2655 		return 0;
2656 	} while (0);
2657 
2658 	return BCME_ERROR;
2659 }
2660 
2661 void
dhdpcie_free_resource(dhd_bus_t * bus)2662 dhdpcie_free_resource(dhd_bus_t *bus)
2663 {
2664 	dhdpcie_info_t *dhdpcie_info;
2665 
2666 	if (bus == NULL) {
2667 		DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2668 		return;
2669 	}
2670 
2671 	if (bus->dev == NULL) {
2672 		DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2673 		return;
2674 	}
2675 
2676 	dhdpcie_info = pci_get_drvdata(bus->dev);
2677 	if (dhdpcie_info == NULL) {
2678 		DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__));
2679 		return;
2680 	}
2681 
2682 	if (bus->regs) {
2683 		REG_UNMAP(dhdpcie_info->regs);
2684 		bus->regs = NULL;
2685 	}
2686 
2687 	if (bus->tcm) {
2688 		REG_UNMAP(dhdpcie_info->tcm);
2689 		bus->tcm = NULL;
2690 	}
2691 }
2692 
2693 int
dhdpcie_bus_request_irq(struct dhd_bus * bus)2694 dhdpcie_bus_request_irq(struct dhd_bus *bus)
2695 {
2696 	dhdpcie_info_t *dhdpcie_info;
2697 	int ret = 0;
2698 
2699 	if (bus == NULL) {
2700 		DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2701 		return BCME_ERROR;
2702 	}
2703 
2704 	if (bus->dev == NULL) {
2705 		DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2706 		return BCME_ERROR;
2707 	}
2708 
2709 	dhdpcie_info = pci_get_drvdata(bus->dev);
2710 	if (dhdpcie_info == NULL) {
2711 		DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__));
2712 		return BCME_ERROR;
2713 	}
2714 
2715 	if (bus->intr) {
2716 		/* Register interrupt callback, but mask it (not operational yet). */
2717 		DHD_INTR(("%s: Registering and masking interrupts\n", __FUNCTION__));
2718 		bus->intr_enabled = FALSE;
2719 		dhdpcie_bus_intr_disable(bus);
2720 		ret = dhdpcie_request_irq(dhdpcie_info);
2721 		if (ret) {
2722 			DHD_ERROR(("%s: request_irq() failed, ret=%d\n",
2723 				__FUNCTION__, ret));
2724 			return ret;
2725 		}
2726 	}
2727 
2728 	return ret;
2729 }
2730 
2731 #ifdef BCMPCIE_OOB_HOST_WAKE
2732 #ifdef CONFIG_BCMDHD_GET_OOB_STATE
2733 extern int dhd_get_wlan_oob_gpio(void);
2734 #endif /* CONFIG_BCMDHD_GET_OOB_STATE */
2735 
dhdpcie_get_oob_irq_level(void)2736 int dhdpcie_get_oob_irq_level(void)
2737 {
2738 	int gpio_level;
2739 
2740 #ifdef CONFIG_BCMDHD_GET_OOB_STATE
2741 	gpio_level = dhd_get_wlan_oob_gpio();
2742 #else
2743 	gpio_level = BCME_UNSUPPORTED;
2744 #endif /* CONFIG_BCMDHD_GET_OOB_STATE */
2745 	return gpio_level;
2746 }
2747 
dhdpcie_get_oob_irq_status(struct dhd_bus * bus)2748 int dhdpcie_get_oob_irq_status(struct dhd_bus *bus)
2749 {
2750 	dhdpcie_info_t *pch;
2751 	dhdpcie_os_info_t *dhdpcie_osinfo;
2752 
2753 	if (bus == NULL) {
2754 		DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2755 		return 0;
2756 	}
2757 
2758 	if (bus->dev == NULL) {
2759 		DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2760 		return 0;
2761 	}
2762 
2763 	pch = pci_get_drvdata(bus->dev);
2764 	if (pch == NULL) {
2765 		DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
2766 		return 0;
2767 	}
2768 
2769 	dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
2770 
2771 	return dhdpcie_osinfo ? dhdpcie_osinfo->oob_irq_enabled : 0;
2772 }
2773 
dhdpcie_get_oob_irq_num(struct dhd_bus * bus)2774 int dhdpcie_get_oob_irq_num(struct dhd_bus *bus)
2775 {
2776 	dhdpcie_info_t *pch;
2777 	dhdpcie_os_info_t *dhdpcie_osinfo;
2778 
2779 	if (bus == NULL) {
2780 		DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2781 		return 0;
2782 	}
2783 
2784 	if (bus->dev == NULL) {
2785 		DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2786 		return 0;
2787 	}
2788 
2789 	pch = pci_get_drvdata(bus->dev);
2790 	if (pch == NULL) {
2791 		DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
2792 		return 0;
2793 	}
2794 
2795 	dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
2796 
2797 	return dhdpcie_osinfo ? dhdpcie_osinfo->oob_irq_num : 0;
2798 }
2799 
dhdpcie_oob_intr_set(dhd_bus_t * bus,bool enable)2800 void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable)
2801 {
2802 	unsigned long flags;
2803 	dhdpcie_info_t *pch;
2804 	dhdpcie_os_info_t *dhdpcie_osinfo;
2805 
2806 	if (bus == NULL) {
2807 		DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2808 		return;
2809 	}
2810 
2811 	if (bus->dev == NULL) {
2812 		DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2813 		return;
2814 	}
2815 
2816 	pch = pci_get_drvdata(bus->dev);
2817 	if (pch == NULL) {
2818 		DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
2819 		return;
2820 	}
2821 
2822 	dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
2823 	DHD_OOB_IRQ_LOCK(&dhdpcie_osinfo->oob_irq_spinlock, flags);
2824 	if ((dhdpcie_osinfo->oob_irq_enabled != enable) &&
2825 		(dhdpcie_osinfo->oob_irq_num > 0)) {
2826 		if (enable) {
2827 			enable_irq(dhdpcie_osinfo->oob_irq_num);
2828 			bus->oob_intr_enable_count++;
2829 			bus->last_oob_irq_enable_time = OSL_LOCALTIME_NS();
2830 		} else {
2831 			disable_irq_nosync(dhdpcie_osinfo->oob_irq_num);
2832 			bus->oob_intr_disable_count++;
2833 			bus->last_oob_irq_disable_time = OSL_LOCALTIME_NS();
2834 		}
2835 		dhdpcie_osinfo->oob_irq_enabled = enable;
2836 	}
2837 	DHD_OOB_IRQ_UNLOCK(&dhdpcie_osinfo->oob_irq_spinlock, flags);
2838 }
2839 
2840 #if defined(DHD_USE_SPIN_LOCK_BH) && !defined(DHD_USE_PCIE_OOB_THREADED_IRQ)
2841 #error "Cannot enable DHD_USE_SPIN_LOCK_BH without enabling DHD_USE_PCIE_OOB_THREADED_IRQ"
2842 #endif /* DHD_USE_SPIN_LOCK_BH && !DHD_USE_PCIE_OOB_THREADED_IRQ */
2843 
2844 #ifdef DHD_USE_PCIE_OOB_THREADED_IRQ
wlan_oob_irq_isr(int irq,void * data)2845 static irqreturn_t wlan_oob_irq_isr(int irq, void *data)
2846 {
2847 	dhd_bus_t *bus = (dhd_bus_t *)data;
2848 	DHD_TRACE(("%s: IRQ ISR\n", __FUNCTION__));
2849 	bus->last_oob_irq_isr_time = OSL_LOCALTIME_NS();
2850 	return IRQ_WAKE_THREAD;
2851 }
2852 #endif /* DHD_USE_PCIE_OOB_THREADED_IRQ */
2853 
wlan_oob_irq(int irq,void * data)2854 static irqreturn_t wlan_oob_irq(int irq, void *data)
2855 {
2856 	dhd_bus_t *bus;
2857 	bus = (dhd_bus_t *)data;
2858 	dhdpcie_oob_intr_set(bus, FALSE);
2859 #ifdef DHD_USE_PCIE_OOB_THREADED_IRQ
2860 	DHD_TRACE(("%s: IRQ Thread\n", __FUNCTION__));
2861 	bus->last_oob_irq_thr_time = OSL_LOCALTIME_NS();
2862 #else
2863 	DHD_TRACE(("%s: IRQ ISR\n", __FUNCTION__));
2864 	bus->last_oob_irq_isr_time = OSL_LOCALTIME_NS();
2865 #endif /* DHD_USE_PCIE_OOB_THREADED_IRQ */
2866 
2867 	if (bus->dhd->up == 0) {
2868 		DHD_ERROR(("%s: ########### IRQ during dhd pub up is 0 ############\n",
2869 			__FUNCTION__));
2870 	}
2871 
2872 	bus->oob_intr_count++;
2873 #ifdef DHD_WAKE_STATUS
2874 #ifdef DHD_PCIE_RUNTIMEPM
2875 	/* This condition is for avoiding counting of wake up from Runtime PM */
2876 	if (bus->chk_pm)
2877 #endif /* DHD_PCIE_RUNTIMPM */
2878 	{
2879 		bcmpcie_set_get_wake(bus, 1);
2880 	}
2881 #endif /* DHD_WAKE_STATUS */
2882 #ifdef DHD_PCIE_RUNTIMEPM
2883 	dhdpcie_runtime_bus_wake(bus->dhd, FALSE, wlan_oob_irq);
2884 #endif /* DHD_PCIE_RUNTIMPM */
2885 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
2886 	dhd_bus_wakeup_work(bus->dhd);
2887 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
2888 	/* Hold wakelock if bus_low_power_state is
2889 	 * DHD_BUS_D3_INFORM_SENT OR DHD_BUS_D3_ACK_RECIEVED
2890 	 */
2891 	if (bus->dhd->up && DHD_CHK_BUS_IN_LPS(bus)) {
2892 		DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(bus->dhd, OOB_WAKE_LOCK_TIMEOUT);
2893 	}
2894 	return IRQ_HANDLED;
2895 }
2896 
dhdpcie_oob_intr_register(dhd_bus_t * bus)2897 int dhdpcie_oob_intr_register(dhd_bus_t *bus)
2898 {
2899 	int err = 0;
2900 	dhdpcie_info_t *pch;
2901 	dhdpcie_os_info_t *dhdpcie_osinfo;
2902 
2903 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2904 	if (bus == NULL) {
2905 		DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2906 		return -EINVAL;
2907 	}
2908 
2909 	if (bus->dev == NULL) {
2910 		DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2911 		return -EINVAL;
2912 	}
2913 
2914 	pch = pci_get_drvdata(bus->dev);
2915 	if (pch == NULL) {
2916 		DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
2917 		return -EINVAL;
2918 	}
2919 
2920 	dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
2921 	if (dhdpcie_osinfo->oob_irq_registered) {
2922 		DHD_ERROR(("%s: irq is already registered\n", __FUNCTION__));
2923 		return -EBUSY;
2924 	}
2925 
2926 	if (dhdpcie_osinfo->oob_irq_num > 0) {
2927 		printf("%s OOB irq=%d flags=0x%X\n", __FUNCTION__,
2928 			(int)dhdpcie_osinfo->oob_irq_num,
2929 			(int)dhdpcie_osinfo->oob_irq_flags);
2930 #ifdef DHD_USE_PCIE_OOB_THREADED_IRQ
2931 		err = request_threaded_irq(dhdpcie_osinfo->oob_irq_num,
2932 			wlan_oob_irq_isr, wlan_oob_irq,
2933 			dhdpcie_osinfo->oob_irq_flags, "dhdpcie_host_wake",
2934 			bus);
2935 #else
2936 		err = request_irq(dhdpcie_osinfo->oob_irq_num, wlan_oob_irq,
2937 			dhdpcie_osinfo->oob_irq_flags, "dhdpcie_host_wake",
2938 			bus);
2939 #endif /* DHD_USE_THREADED_IRQ_PCIE_OOB */
2940 		if (err) {
2941 			DHD_ERROR(("%s: request_irq failed with %d\n",
2942 				__FUNCTION__, err));
2943 			return err;
2944 		}
2945 #if defined(DISABLE_WOWLAN)
2946 		printf("%s: disable_irq_wake\n", __FUNCTION__);
2947 		dhdpcie_osinfo->oob_irq_wake_enabled = FALSE;
2948 #else
2949 		printf("%s: enable_irq_wake\n", __FUNCTION__);
2950 		err = enable_irq_wake(dhdpcie_osinfo->oob_irq_num);
2951 		if (!err) {
2952 			dhdpcie_osinfo->oob_irq_wake_enabled = TRUE;
2953 		} else
2954 			printf("%s: enable_irq_wake failed with %d\n", __FUNCTION__, err);
2955 #endif
2956 		dhdpcie_osinfo->oob_irq_enabled = TRUE;
2957 	}
2958 
2959 	dhdpcie_osinfo->oob_irq_registered = TRUE;
2960 
2961 	return 0;
2962 }
2963 
dhdpcie_oob_intr_unregister(dhd_bus_t * bus)2964 void dhdpcie_oob_intr_unregister(dhd_bus_t *bus)
2965 {
2966 	int err = 0;
2967 	dhdpcie_info_t *pch;
2968 	dhdpcie_os_info_t *dhdpcie_osinfo;
2969 
2970 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2971 	if (bus == NULL) {
2972 		DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2973 		return;
2974 	}
2975 
2976 	if (bus->dev == NULL) {
2977 		DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2978 		return;
2979 	}
2980 
2981 	pch = pci_get_drvdata(bus->dev);
2982 	if (pch == NULL) {
2983 		DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
2984 		return;
2985 	}
2986 
2987 	dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
2988 	if (!dhdpcie_osinfo->oob_irq_registered) {
2989 		DHD_ERROR(("%s: irq is not registered\n", __FUNCTION__));
2990 		return;
2991 	}
2992 	if (dhdpcie_osinfo->oob_irq_num > 0) {
2993 		if (dhdpcie_osinfo->oob_irq_wake_enabled) {
2994 			err = disable_irq_wake(dhdpcie_osinfo->oob_irq_num);
2995 			if (!err) {
2996 				dhdpcie_osinfo->oob_irq_wake_enabled = FALSE;
2997 			}
2998 		}
2999 		if (dhdpcie_osinfo->oob_irq_enabled) {
3000 			disable_irq(dhdpcie_osinfo->oob_irq_num);
3001 			dhdpcie_osinfo->oob_irq_enabled = FALSE;
3002 		}
3003 		free_irq(dhdpcie_osinfo->oob_irq_num, bus);
3004 	}
3005 	dhdpcie_osinfo->oob_irq_registered = FALSE;
3006 }
3007 #endif /* BCMPCIE_OOB_HOST_WAKE */
3008 
3009 #ifdef PCIE_OOB
dhdpcie_oob_init(dhd_bus_t * bus)3010 void dhdpcie_oob_init(dhd_bus_t *bus)
3011 {
3012 	/* XXX this should be passed in as a command line parameter */
3013 	gpio_handle_val = get_handle(OOB_PORT);
3014 	if (gpio_handle_val < 0)
3015 	{
3016 		DHD_ERROR(("%s: Could not get GPIO handle.\n", __FUNCTION__));
3017 		ASSERT(FALSE);
3018 	}
3019 
3020 	gpio_direction = 0;
3021 	ftdi_set_bitmode(gpio_handle_val, 0, BITMODE_BITBANG);
3022 
3023 	/* Note BT core is also enabled here */
3024 	gpio_port = 1 << BIT_WL_REG_ON | 1 << BIT_BT_REG_ON | 1 << DEVICE_WAKE;
3025 	gpio_write_port(gpio_handle_val, gpio_port);
3026 
3027 	gpio_direction = 1 << BIT_WL_REG_ON | 1 << BIT_BT_REG_ON | 1 << DEVICE_WAKE;
3028 	ftdi_set_bitmode(gpio_handle_val, gpio_direction, BITMODE_BITBANG);
3029 
3030 	bus->oob_enabled = TRUE;
3031 	bus->oob_presuspend = FALSE;
3032 
3033 	/* drive the Device_Wake GPIO low on startup */
3034 	bus->device_wake_state = TRUE;
3035 	dhd_bus_set_device_wake(bus, FALSE);
3036 	dhd_bus_doorbell_timeout_reset(bus);
3037 
3038 }
3039 
3040 void
dhd_oob_set_bt_reg_on(struct dhd_bus * bus,bool val)3041 dhd_oob_set_bt_reg_on(struct dhd_bus *bus, bool val)
3042 {
3043 	DHD_INFO(("Set Device_Wake to %d\n", val));
3044 	if (val)
3045 	{
3046 		gpio_port = gpio_port | (1 << BIT_BT_REG_ON);
3047 		gpio_write_port(gpio_handle_val, gpio_port);
3048 	} else {
3049 		gpio_port = gpio_port & (0xff ^ (1 << BIT_BT_REG_ON));
3050 		gpio_write_port(gpio_handle_val, gpio_port);
3051 	}
3052 }
3053 
3054 int
dhd_oob_get_bt_reg_on(struct dhd_bus * bus)3055 dhd_oob_get_bt_reg_on(struct dhd_bus *bus)
3056 {
3057 	int ret;
3058 	uint8 val;
3059 	ret = gpio_read_port(gpio_handle_val, &val);
3060 
3061 	if (ret < 0) {
3062 		/* XXX handle error properly */
3063 		DHD_ERROR(("gpio_read_port returns %d\n", ret));
3064 		return ret;
3065 	}
3066 
3067 	if (val & (1 << BIT_BT_REG_ON))
3068 	{
3069 		ret = 1;
3070 	} else {
3071 		ret = 0;
3072 	}
3073 
3074 	return ret;
3075 }
3076 
3077 int
dhd_os_oob_set_device_wake(struct dhd_bus * bus,bool val)3078 dhd_os_oob_set_device_wake(struct dhd_bus *bus, bool val)
3079 {
3080 	if (bus->device_wake_state != val)
3081 	{
3082 		DHD_INFO(("Set Device_Wake to %d\n", val));
3083 
3084 		if (bus->oob_enabled && !bus->oob_presuspend)
3085 		{
3086 			if (val)
3087 			{
3088 				gpio_port = gpio_port | (1 << DEVICE_WAKE);
3089 				gpio_write_port_non_block(gpio_handle_val, gpio_port);
3090 			} else {
3091 				gpio_port = gpio_port & (0xff ^ (1 << DEVICE_WAKE));
3092 				gpio_write_port_non_block(gpio_handle_val, gpio_port);
3093 			}
3094 		}
3095 
3096 		bus->device_wake_state = val;
3097 	}
3098 	return BCME_OK;
3099 }
3100 
3101 INLINE void
dhd_os_ib_set_device_wake(struct dhd_bus * bus,bool val)3102 dhd_os_ib_set_device_wake(struct dhd_bus *bus, bool val)
3103 {
3104 	/* TODO: Currently Inband implementation of Device_Wake is not supported,
3105 	 * so this function is left empty later this can be used to support the same.
3106 	 */
3107 }
3108 #endif /* PCIE_OOB */
3109 
3110 #ifdef DHD_PCIE_RUNTIMEPM
dhd_runtimepm_state(dhd_pub_t * dhd)3111 bool dhd_runtimepm_state(dhd_pub_t *dhd)
3112 {
3113 	dhd_bus_t *bus;
3114 	unsigned long flags;
3115 	bus = dhd->bus;
3116 
3117 	DHD_GENERAL_LOCK(dhd, flags);
3118 	bus->idlecount++;
3119 
3120 	DHD_TRACE(("%s : Enter \n", __FUNCTION__));
3121 
3122 	if (dhd_query_bus_erros(dhd)) {
3123 		/* Becasue bus_error/dongle_trap ... etc,
3124 		 * driver don't allow enter suspend, return FALSE
3125 		 */
3126 		DHD_GENERAL_UNLOCK(dhd, flags);
3127 		return FALSE;
3128 	}
3129 
3130 	if ((bus->idletime > 0) && (bus->idlecount >= bus->idletime)) {
3131 		bus->idlecount = 0;
3132 		if (DHD_BUS_BUSY_CHECK_IDLE(dhd) && !DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd) &&
3133 			!DHD_CHECK_CFG_IN_PROGRESS(dhd) && !dhd_os_check_wakelock_all(bus->dhd)) {
3134 			DHD_ERROR(("%s: DHD Idle state!! -  idletime :%d, wdtick :%d \n",
3135 					__FUNCTION__, bus->idletime, dhd_runtimepm_ms));
3136 			bus->bus_wake = 0;
3137 			DHD_BUS_BUSY_SET_RPM_SUSPEND_IN_PROGRESS(dhd);
3138 			bus->runtime_resume_done = FALSE;
3139 			/* stop all interface network queue. */
3140 			dhd_bus_stop_queue(bus);
3141 			DHD_GENERAL_UNLOCK(dhd, flags);
3142 			/* RPM suspend is failed, return FALSE then re-trying */
3143 			if (dhdpcie_set_suspend_resume(bus, TRUE)) {
3144 				DHD_ERROR(("%s: exit with wakelock \n", __FUNCTION__));
3145 				DHD_GENERAL_LOCK(dhd, flags);
3146 				DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_IN_PROGRESS(dhd);
3147 				dhd_os_busbusy_wake(bus->dhd);
3148 				bus->runtime_resume_done = TRUE;
3149 				/* It can make stuck NET TX Queue without below */
3150 				dhd_bus_start_queue(bus);
3151 				DHD_GENERAL_UNLOCK(dhd, flags);
3152 				if (bus->dhd->rx_pending_due_to_rpm) {
3153 					/* Reschedule tasklet to process Rx frames */
3154 					DHD_ERROR(("%s: Schedule DPC to process pending"
3155 						" Rx packets\n", __FUNCTION__));
3156 					/* irq will be enabled at the end of dpc */
3157 					dhd_schedule_delayed_dpc_on_dpc_cpu(bus->dhd, 0);
3158 				} else {
3159 					/* enabling host irq deferred from system suspend */
3160 					if (dhdpcie_irq_disabled(bus)) {
3161 						dhdpcie_enable_irq(bus);
3162 						/* increasing intrrupt count when it enabled */
3163 						bus->resume_intr_enable_count++;
3164 					}
3165 				}
3166 				smp_wmb();
3167 				wake_up(&bus->rpm_queue);
3168 				return FALSE;
3169 			}
3170 
3171 			DHD_GENERAL_LOCK(dhd, flags);
3172 			DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_IN_PROGRESS(dhd);
3173 			DHD_BUS_BUSY_SET_RPM_SUSPEND_DONE(dhd);
3174 			/* For making sure NET TX Queue active  */
3175 			dhd_bus_start_queue(bus);
3176 			DHD_GENERAL_UNLOCK(dhd, flags);
3177 
3178 			wait_event(bus->rpm_queue, bus->bus_wake);
3179 
3180 			DHD_GENERAL_LOCK(dhd, flags);
3181 			DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_DONE(dhd);
3182 			DHD_BUS_BUSY_SET_RPM_RESUME_IN_PROGRESS(dhd);
3183 			DHD_GENERAL_UNLOCK(dhd, flags);
3184 
3185 			dhdpcie_set_suspend_resume(bus, FALSE);
3186 
3187 			DHD_GENERAL_LOCK(dhd, flags);
3188 			DHD_BUS_BUSY_CLEAR_RPM_RESUME_IN_PROGRESS(dhd);
3189 			dhd_os_busbusy_wake(bus->dhd);
3190 			/* Inform the wake up context that Resume is over */
3191 			bus->runtime_resume_done = TRUE;
3192 			/* For making sure NET TX Queue active  */
3193 			dhd_bus_start_queue(bus);
3194 			DHD_GENERAL_UNLOCK(dhd, flags);
3195 
3196 			if (bus->dhd->rx_pending_due_to_rpm) {
3197 				/* Reschedule tasklet to process Rx frames */
3198 				DHD_ERROR(("%s: Schedule DPC to process pending Rx packets\n",
3199 					__FUNCTION__));
3200 				bus->rpm_sched_dpc_time = OSL_LOCALTIME_NS();
3201 				dhd_sched_dpc(bus->dhd);
3202 			}
3203 
3204 			/* enabling host irq deferred from system suspend */
3205 			if (dhdpcie_irq_disabled(bus)) {
3206 				dhdpcie_enable_irq(bus);
3207 				/* increasing intrrupt count when it enabled */
3208 				bus->resume_intr_enable_count++;
3209 			}
3210 
3211 			smp_wmb();
3212 			wake_up(&bus->rpm_queue);
3213 			DHD_ERROR(("%s : runtime resume ended \n", __FUNCTION__));
3214 			return TRUE;
3215 		} else {
3216 			DHD_GENERAL_UNLOCK(dhd, flags);
3217 			/* Since one of the contexts are busy (TX, IOVAR or RX)
3218 			 * we should not suspend
3219 			 */
3220 			DHD_ERROR(("%s : bus is active with dhd_bus_busy_state = 0x%x\n",
3221 				__FUNCTION__, dhd->dhd_bus_busy_state));
3222 			return FALSE;
3223 		}
3224 	}
3225 
3226 	DHD_GENERAL_UNLOCK(dhd, flags);
3227 	return FALSE;
3228 } /* dhd_runtimepm_state */
3229 
3230 /*
3231  * dhd_runtime_bus_wake
3232  *  TRUE - related with runtime pm context
3233  *  FALSE - It isn't invloved in runtime pm context
3234  */
dhd_runtime_bus_wake(dhd_bus_t * bus,bool wait,void * func_addr)3235 bool dhd_runtime_bus_wake(dhd_bus_t *bus, bool wait, void *func_addr)
3236 {
3237 	unsigned long flags;
3238 	bus->idlecount = 0;
3239 	DHD_TRACE(("%s : enter\n", __FUNCTION__));
3240 	if (bus->dhd->up == FALSE) {
3241 		DHD_INFO(("%s : dhd is not up\n", __FUNCTION__));
3242 		return FALSE;
3243 	}
3244 
3245 	DHD_GENERAL_LOCK(bus->dhd, flags);
3246 	if (DHD_BUS_BUSY_CHECK_RPM_ALL(bus->dhd)) {
3247 		/* Wake up RPM state thread if it is suspend in progress or suspended */
3248 		if (DHD_BUS_BUSY_CHECK_RPM_SUSPEND_IN_PROGRESS(bus->dhd) ||
3249 				DHD_BUS_BUSY_CHECK_RPM_SUSPEND_DONE(bus->dhd)) {
3250 			bus->bus_wake = 1;
3251 
3252 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
3253 
3254 			DHD_ERROR_RLMT(("Runtime Resume is called in %pf\n", func_addr));
3255 			smp_wmb();
3256 			wake_up(&bus->rpm_queue);
3257 		/* No need to wake up the RPM state thread */
3258 		} else if (DHD_BUS_BUSY_CHECK_RPM_RESUME_IN_PROGRESS(bus->dhd)) {
3259 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
3260 		}
3261 
3262 		/* If wait is TRUE, function with wait = TRUE will be wait in here  */
3263 		if (wait) {
3264 			if (!wait_event_timeout(bus->rpm_queue, bus->runtime_resume_done,
3265 					msecs_to_jiffies(RPM_WAKE_UP_TIMEOUT))) {
3266 				DHD_ERROR(("%s: RPM_WAKE_UP_TIMEOUT error\n", __FUNCTION__));
3267 				return FALSE;
3268 			}
3269 		} else {
3270 			DHD_INFO(("%s: bus wakeup but no wait until resume done\n", __FUNCTION__));
3271 		}
3272 		/* If it is called from RPM context, it returns TRUE */
3273 		return TRUE;
3274 	}
3275 
3276 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
3277 
3278 	return FALSE;
3279 }
3280 
dhdpcie_runtime_bus_wake(dhd_pub_t * dhdp,bool wait,void * func_addr)3281 bool dhdpcie_runtime_bus_wake(dhd_pub_t *dhdp, bool wait, void* func_addr)
3282 {
3283 	dhd_bus_t *bus = dhdp->bus;
3284 	return dhd_runtime_bus_wake(bus, wait, func_addr);
3285 }
3286 
dhdpcie_block_runtime_pm(dhd_pub_t * dhdp)3287 void dhdpcie_block_runtime_pm(dhd_pub_t *dhdp)
3288 {
3289 	dhd_bus_t *bus = dhdp->bus;
3290 	bus->idletime = 0;
3291 }
3292 
dhdpcie_is_resume_done(dhd_pub_t * dhdp)3293 bool dhdpcie_is_resume_done(dhd_pub_t *dhdp)
3294 {
3295 	dhd_bus_t *bus = dhdp->bus;
3296 	return bus->runtime_resume_done;
3297 }
3298 #endif /* DHD_PCIE_RUNTIMEPM */
3299 
dhd_bus_to_dev(dhd_bus_t * bus)3300 struct device * dhd_bus_to_dev(dhd_bus_t *bus)
3301 {
3302 	struct pci_dev *pdev;
3303 	pdev = bus->dev;
3304 
3305 	if (pdev)
3306 		return &pdev->dev;
3307 	else
3308 		return NULL;
3309 }
3310 
3311 #ifdef DHD_FW_COREDUMP
3312 int
dhd_dongle_mem_dump(void)3313 dhd_dongle_mem_dump(void)
3314 {
3315 	if (!g_dhd_bus) {
3316 		DHD_ERROR(("%s: Bus is NULL\n", __FUNCTION__));
3317 		return -ENODEV;
3318 	}
3319 
3320 	dhd_bus_dump_console_buffer(g_dhd_bus);
3321 	dhd_prot_debug_info_print(g_dhd_bus->dhd);
3322 
3323 	g_dhd_bus->dhd->memdump_enabled = DUMP_MEMFILE_BUGON;
3324 	g_dhd_bus->dhd->memdump_type = DUMP_TYPE_AP_ABNORMAL_ACCESS;
3325 
3326 #ifdef DHD_PCIE_RUNTIMEPM
3327 	dhdpcie_runtime_bus_wake(g_dhd_bus->dhd, TRUE, __builtin_return_address(0));
3328 #endif /* DHD_PCIE_RUNTIMEPM */
3329 
3330 	dhd_bus_mem_dump(g_dhd_bus->dhd);
3331 	return 0;
3332 }
3333 EXPORT_SYMBOL(dhd_dongle_mem_dump);
3334 #endif /* DHD_FW_COREDUMP */
3335 
3336 #ifdef CONFIG_ARCH_MSM
3337 void
dhd_bus_inform_ep_loaded_to_rc(dhd_pub_t * dhdp,bool up)3338 dhd_bus_inform_ep_loaded_to_rc(dhd_pub_t *dhdp, bool up)
3339 {
3340 	sec_pcie_set_ep_driver_loaded(dhdp->bus->rc_dev, up);
3341 }
3342 #endif /* CONFIG_ARCH_MSM */
3343 
3344 bool
dhd_bus_check_driver_up(void)3345 dhd_bus_check_driver_up(void)
3346 {
3347 	dhd_bus_t *bus;
3348 	dhd_pub_t *dhdp;
3349 	bool isup = FALSE;
3350 
3351 	bus = (dhd_bus_t *)g_dhd_bus;
3352 	if (!bus) {
3353 		DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
3354 		return isup;
3355 	}
3356 
3357 	dhdp = bus->dhd;
3358 	if (dhdp) {
3359 		isup = dhdp->up;
3360 	}
3361 
3362 	return isup;
3363 }
3364 EXPORT_SYMBOL(dhd_bus_check_driver_up);
3365