• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Linux DHD Bus Module for PCIE
3  *
4  * Copyright (C) 1999-2019, Broadcom.
5  *
6  *      Unless you and Broadcom execute a separate written software license
7  * agreement governing use of this software, this software is licensed to you
8  * under the terms of the GNU General Public License version 2 (the "GPL"),
9  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10  * following added to such license:
11  *
12  *      As a special exception, the copyright holders of this software give you
13  * permission to link this software with independent modules, and to copy and
14  * distribute the resulting executable under terms of your choice, provided that
15  * you also meet, for each linked independent module, the terms and conditions
16  * of the license of that module.  An independent module is a module which is
17  * not derived from this software.  The special exception does not apply to any
18  * modifications of the software.
19  *
20  *      Notwithstanding the above, under no circumstances may you combine this
21  * software in any way with any other Broadcom software provided under a license
22  * other than the GPL, without Broadcom's express prior written consent.
23  *
24  *
25  * <<Broadcom-WL-IPTag/Open:>>
26  *
27  * $Id: dhd_pcie_linux.c 821650 2019-05-24 10:41:54Z $
28  */
29 
30 /* include files */
31 #include <typedefs.h>
32 #include <bcmutils.h>
33 #include <bcmdevs.h>
34 #include <siutils.h>
35 #include <hndsoc.h>
36 #include <hndpmu.h>
37 #include <sbchipc.h>
38 #if defined(DHD_DEBUG)
39 #include <hnd_armtrap.h>
40 #include <hnd_cons.h>
41 #endif /* defined(DHD_DEBUG) */
42 #include <dngl_stats.h>
43 #include <pcie_core.h>
44 #include <dhd.h>
45 #include <dhd_bus.h>
46 #include <dhd_proto.h>
47 #include <dhd_dbg.h>
48 #include <dhdioctl.h>
49 #include <bcmmsgbuf.h>
50 #include <pcicfg.h>
51 #include <dhd_pcie.h>
52 #include <dhd_linux.h>
53 #ifdef CONFIG_ARCH_MSM
54 #if defined(CONFIG_PCI_MSM) || defined(CONFIG_ARCH_MSM8996)
55 #include <linux/msm_pcie.h>
56 #else
57 #include <mach/msm_pcie.h>
58 #endif /* CONFIG_PCI_MSM */
59 #endif /* CONFIG_ARCH_MSM */
60 
61 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
62 #include <linux/pm_runtime.h>
63 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
64 
65 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
66 #ifndef AUTO_SUSPEND_TIMEOUT
67 #define AUTO_SUSPEND_TIMEOUT 1000
68 #endif /* AUTO_SUSPEND_TIMEOUT */
69 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
70 
71 #include <linux/irq.h>
72 #ifdef USE_SMMU_ARCH_MSM
73 #include <asm/dma-iommu.h>
74 #include <linux/iommu.h>
75 #include <linux/of.h>
76 #include <linux/platform_device.h>
77 #endif /* USE_SMMU_ARCH_MSM */
78 #include <dhd_config.h>
79 
80 #define PCI_CFG_RETRY 10
81 #define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */
82 #define BCM_MEM_FILENAME_LEN 24    /* Mem. filename length */
83 
84 #ifdef FORCE_TPOWERON
85 extern uint32 tpoweron_scale;
86 #endif /* FORCE_TPOWERON */
87 /* user defined data structures  */
88 
89 typedef bool (*dhdpcie_cb_fn_t)(void *);
90 
91 typedef struct dhdpcie_info {
92     dhd_bus_t *bus;
93     osl_t *osh;
94     struct pci_dev *dev;  /* pci device handle */
95     volatile char *regs;  /* pci device memory va */
96     volatile char *tcm;   /* pci device memory va */
97     uint32 bar1_size;     /* pci device memory size */
98     uint32 curr_bar1_win; /* current PCIEBar1Window setting */
99     struct pcos_info *pcos_info;
100     uint16 last_intrstatus; /* to cache intrstatus */
101     int irq;
102     char pciname[32];
103     struct pci_saved_state *default_state;
104     struct pci_saved_state *state;
105 #ifdef BCMPCIE_OOB_HOST_WAKE
106     void *os_cxt; /* Pointer to per-OS private data */
107 #endif            /* BCMPCIE_OOB_HOST_WAKE */
108 #ifdef DHD_WAKE_STATUS
109     spinlock_t pcie_lock;
110     unsigned int total_wake_count;
111     int pkt_wake;
112     int wake_irq;
113 #endif /* DHD_WAKE_STATUS */
114 #ifdef USE_SMMU_ARCH_MSM
115     void *smmu_cxt;
116 #endif /* USE_SMMU_ARCH_MSM */
117 } dhdpcie_info_t;
118 
119 struct pcos_info {
120     dhdpcie_info_t *pc;
121     spinlock_t lock;
122     wait_queue_head_t intr_wait_queue;
123     struct timer_list tuning_timer;
124     int tuning_timer_exp;
125     atomic_t timer_enab;
126     struct tasklet_struct tuning_tasklet;
127 };
128 
129 #ifdef BCMPCIE_OOB_HOST_WAKE
130 typedef struct dhdpcie_os_info {
131     int oob_irq_num; /* valid when hardware or software oob in use */
132     unsigned long
133         oob_irq_flags; /* valid when hardware or software oob in use */
134     bool oob_irq_registered;
135     bool oob_irq_enabled;
136     bool oob_irq_wake_enabled;
137     spinlock_t oob_irq_spinlock;
138     void *dev; /* handle to the underlying device */
139 } dhdpcie_os_info_t;
140 static irqreturn_t wlan_oob_irq(int irq, void *data);
141 #ifdef CUSTOMER_HW2
142 extern struct brcm_pcie_wake brcm_pcie_wake;
143 #endif /* CUSTOMER_HW2 */
144 #endif /* BCMPCIE_OOB_HOST_WAKE */
145 
146 #ifdef USE_SMMU_ARCH_MSM
147 typedef struct dhdpcie_smmu_info {
148     struct dma_iommu_mapping *smmu_mapping;
149     dma_addr_t smmu_iova_start;
150     size_t smmu_iova_len;
151 } dhdpcie_smmu_info_t;
152 #endif /* USE_SMMU_ARCH_MSM */
153 
154 /* function declarations */
155 static int __devinit dhdpcie_pci_probe(struct pci_dev *pdev,
156                                        const struct pci_device_id *ent);
157 static void __devexit dhdpcie_pci_remove(struct pci_dev *pdev);
158 static int dhdpcie_init(struct pci_dev *pdev);
159 static irqreturn_t dhdpcie_isr(int irq, void *arg);
160 /* OS Routine functions for PCI suspend/resume */
161 
162 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
163 static int dhdpcie_set_suspend_resume(struct pci_dev *dev, bool state,
164                                       bool byint);
165 #else
166 static int dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state);
167 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
168 static int dhdpcie_resume_host_dev(dhd_bus_t *bus);
169 static int dhdpcie_suspend_host_dev(dhd_bus_t *bus);
170 static int dhdpcie_resume_dev(struct pci_dev *dev);
171 static int dhdpcie_suspend_dev(struct pci_dev *dev);
172 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
173 static int dhdpcie_pm_system_suspend_noirq(struct device *dev);
174 static int dhdpcie_pm_system_resume_noirq(struct device *dev);
175 #else
176 static int dhdpcie_pci_suspend(struct pci_dev *dev, pm_message_t state);
177 static int dhdpcie_pci_resume(struct pci_dev *dev);
178 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
179 
180 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
181 static int dhdpcie_pm_runtime_suspend(struct device *dev);
182 static int dhdpcie_pm_runtime_resume(struct device *dev);
183 static int dhdpcie_pm_system_suspend_noirq(struct device *dev);
184 static int dhdpcie_pm_system_resume_noirq(struct device *dev);
185 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
186 
187 static void dhdpcie_config_save_restore_coherent(dhd_bus_t *bus, bool state);
188 
189 uint32 dhdpcie_access_cap(struct pci_dev *pdev, int cap, uint offset,
190                           bool is_ext, bool is_write, uint32 writeval);
191 
192 static struct pci_device_id dhdpcie_pci_devid[] __devinitdata = {
193     {
194         vendor : 0x14e4,
195         device : PCI_ANY_ID,
196         subvendor : PCI_ANY_ID,
197         subdevice : PCI_ANY_ID,
198         class : PCI_CLASS_NETWORK_OTHER << 8,
199         class_mask : 0xffff00,
200         driver_data : 0,
201     },
202     {0, 0, 0, 0, 0, 0, 0}};
203 MODULE_DEVICE_TABLE(pci, dhdpcie_pci_devid);
204 
205 /* Power Management Hooks */
206 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
207 static const struct dev_pm_ops dhdpcie_pm_ops = {
208     SET_RUNTIME_PM_OPS(dhdpcie_pm_runtime_suspend, dhdpcie_pm_runtime_resume,
209                        NULL)
210         .suspend_noirq = dhdpcie_pm_system_suspend_noirq,
211     .resume_noirq = dhdpcie_pm_system_resume_noirq};
212 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
213 
214 static struct pci_driver dhdpcie_driver = {
215     node : {&dhdpcie_driver.node, &dhdpcie_driver.node},
216     name : "pcieh",
217     id_table : dhdpcie_pci_devid,
218     probe : dhdpcie_pci_probe,
219     remove : dhdpcie_pci_remove,
220 #if defined(DHD_PCIE_NATIVE_RUNTIMEPM)
221     .driver.pm = &dhd_pcie_pm_ops,
222 #else
223     suspend : dhdpcie_pci_suspend,
224     resume : dhdpcie_pci_resume,
225 #endif // endif
226 };
227 
228 int dhdpcie_init_succeeded = FALSE;
229 
230 #ifdef USE_SMMU_ARCH_MSM
dhdpcie_smmu_init(struct pci_dev * pdev,void * smmu_cxt)231 static int dhdpcie_smmu_init(struct pci_dev *pdev, void *smmu_cxt)
232 {
233     struct dma_iommu_mapping *mapping;
234     struct device_node *root_node = NULL;
235     dhdpcie_smmu_info_t *smmu_info = (dhdpcie_smmu_info_t *)smmu_cxt;
236     int smmu_iova_address[2];
237     char *wlan_node = "android,bcmdhd_wlan";
238     char *wlan_smmu_node = "wlan-smmu-iova-address";
239     int atomic_ctx = 1;
240     int s1_bypass = 1;
241     int ret = 0;
242 
243     DHD_ERROR(("%s: SMMU initialize\n", __FUNCTION__));
244 
245     root_node = of_find_compatible_node(NULL, NULL, wlan_node);
246     if (!root_node) {
247         WARN(1, "failed to get device node of BRCM WLAN\n");
248         return -ENODEV;
249     }
250 
251     if (of_property_read_u32_array(root_node, wlan_smmu_node, smmu_iova_address,
252                                    0x2) == 0) {
253         DHD_ERROR(("%s : get SMMU start address 0x%x, size 0x%x\n",
254                    __FUNCTION__, smmu_iova_address[0], smmu_iova_address[1]));
255         smmu_info->smmu_iova_start = smmu_iova_address[0];
256         smmu_info->smmu_iova_len = smmu_iova_address[1];
257     } else {
258         printf("%s : can't get smmu iova address property\n", __FUNCTION__);
259         return -ENODEV;
260     }
261 
262     if (smmu_info->smmu_iova_len <= 0) {
263         DHD_ERROR(("%s: Invalid smmu iova len %d\n", __FUNCTION__,
264                    (int)smmu_info->smmu_iova_len));
265         return -EINVAL;
266     }
267 
268     DHD_ERROR(("%s : SMMU init start\n", __FUNCTION__));
269 
270     if (pci_set_dma_mask(pdev, DMA_BIT_MASK(0x40)) ||
271         pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(0x40))) {
272         DHD_ERROR(("%s: DMA set 64bit mask failed.\n", __FUNCTION__));
273         return -EINVAL;
274     }
275 
276     mapping =
277         arm_iommu_create_mapping(&platform_bus_type, smmu_info->smmu_iova_start,
278                                  smmu_info->smmu_iova_len);
279     if (IS_ERR(mapping)) {
280         DHD_ERROR(("%s: create mapping failed, err = %d\n", __FUNCTION__, ret));
281         ret = PTR_ERR(mapping);
282         goto map_fail;
283     }
284 
285     ret =
286         iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_ATOMIC, &atomic_ctx);
287     if (ret) {
288         DHD_ERROR(("%s: set atomic_ctx attribute failed, err = %d\n",
289                    __FUNCTION__, ret));
290         goto set_attr_fail;
291     }
292 
293     ret = iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS,
294                                 &s1_bypass);
295     if (ret < 0) {
296         DHD_ERROR(("%s: set s1_bypass attribute failed, err = %d\n",
297                    __FUNCTION__, ret));
298         goto set_attr_fail;
299     }
300 
301     ret = arm_iommu_attach_device(&pdev->dev, mapping);
302     if (ret) {
303         DHD_ERROR(("%s: attach device failed, err = %d\n", __FUNCTION__, ret));
304         goto attach_fail;
305     }
306 
307     smmu_info->smmu_mapping = mapping;
308 
309     return ret;
310 
311 attach_fail:
312 set_attr_fail:
313     arm_iommu_release_mapping(mapping);
314 map_fail:
315     return ret;
316 }
317 
dhdpcie_smmu_remove(struct pci_dev * pdev,void * smmu_cxt)318 static void dhdpcie_smmu_remove(struct pci_dev *pdev, void *smmu_cxt)
319 {
320     dhdpcie_smmu_info_t *smmu_info;
321 
322     if (!smmu_cxt) {
323         return;
324     }
325 
326     smmu_info = (dhdpcie_smmu_info_t *)smmu_cxt;
327     if (smmu_info->smmu_mapping) {
328         arm_iommu_detach_device(&pdev->dev);
329         arm_iommu_release_mapping(smmu_info->smmu_mapping);
330         smmu_info->smmu_mapping = NULL;
331     }
332 }
333 #endif /* USE_SMMU_ARCH_MSM */
334 
335 #ifdef FORCE_TPOWERON
dhd_bus_get_tpoweron(dhd_bus_t * bus)336 static void dhd_bus_get_tpoweron(dhd_bus_t *bus)
337 {
338     uint32 tpoweron_rc;
339     uint32 tpoweron_ep;
340 
341     tpoweron_rc =
342         dhdpcie_rc_access_cap(bus, PCIE_EXTCAP_ID_L1SS,
343                               PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, FALSE, 0);
344     tpoweron_ep =
345         dhdpcie_ep_access_cap(bus, PCIE_EXTCAP_ID_L1SS,
346                               PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, FALSE, 0);
347     DHD_ERROR(("%s: tpoweron_rc:0x%x tpoweron_ep:0x%x\n", __FUNCTION__,
348                tpoweron_rc, tpoweron_ep));
349 }
350 
dhd_bus_set_tpoweron(dhd_bus_t * bus,uint16 tpoweron)351 static void dhd_bus_set_tpoweron(dhd_bus_t *bus, uint16 tpoweron)
352 {
353     dhd_bus_get_tpoweron(bus);
354     /* Set the tpoweron */
355     DHD_ERROR(("%s tpoweron: 0x%x\n", __FUNCTION__, tpoweron));
356     dhdpcie_rc_access_cap(bus, PCIE_EXTCAP_ID_L1SS,
357                           PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, TRUE,
358                           tpoweron);
359     dhdpcie_ep_access_cap(bus, PCIE_EXTCAP_ID_L1SS,
360                           PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, TRUE,
361                           tpoweron);
362 
363     dhd_bus_get_tpoweron(bus);
364 }
365 
dhdpcie_chip_req_forced_tpoweron(dhd_bus_t * bus)366 static bool dhdpcie_chip_req_forced_tpoweron(dhd_bus_t *bus)
367 {
368     /*
369      * On Fire's reference platform, coming out of L1.2,
370      * there is a constant delay of 45us between CLKREQ# and stable REFCLK
371      * Due to this delay, with tPowerOn < 50
372      * there is a chance of the refclk sense to trigger on noise.
373      *
374      * Which ever chip needs forced tPowerOn of 50us should be listed below.
375      */
376     if (si_chipid(bus->sih) == BCM4377_CHIP_ID) {
377         return TRUE;
378     }
379     return FALSE;
380 }
381 #endif /* FORCE_TPOWERON */
382 
dhd_bus_aspm_enable_dev(dhd_bus_t * bus,struct pci_dev * dev,bool enable)383 static bool dhd_bus_aspm_enable_dev(dhd_bus_t *bus, struct pci_dev *dev,
384                                     bool enable)
385 {
386     uint32 linkctrl_before;
387     uint32 linkctrl_after = 0;
388     uint8 linkctrl_asm;
389     char *device;
390 
391     device = (dev == bus->dev) ? "EP" : "RC";
392 
393     linkctrl_before = dhdpcie_access_cap(
394         dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, FALSE, FALSE, 0);
395     linkctrl_asm = (linkctrl_before & PCIE_ASPM_CTRL_MASK);
396 
397     if (enable) {
398         if (linkctrl_asm == PCIE_ASPM_L1_ENAB) {
399             DHD_ERROR(("%s: %s already enabled  linkctrl: 0x%x\n", __FUNCTION__,
400                        device, linkctrl_before));
401             return FALSE;
402         }
403         /* Enable only L1 ASPM (bit 1) */
404         dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET,
405                            FALSE, TRUE, (linkctrl_before | PCIE_ASPM_L1_ENAB));
406     } else {
407         if (linkctrl_asm == 0) {
408             DHD_ERROR(("%s: %s already disabled linkctrl: 0x%x\n", __FUNCTION__,
409                        device, linkctrl_before));
410             return FALSE;
411         }
412         /* Disable complete ASPM (bit 1 and bit 0) */
413         dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET,
414                            FALSE, TRUE, (linkctrl_before & (~PCIE_ASPM_ENAB)));
415     }
416 
417     linkctrl_after = dhdpcie_access_cap(
418         dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, FALSE, FALSE, 0);
419     DHD_ERROR(("%s: %s %s, linkctrl_before: 0x%x linkctrl_after: 0x%x\n",
420                __FUNCTION__, device, (enable ? "ENABLE " : "DISABLE"),
421                linkctrl_before, linkctrl_after));
422 
423     return TRUE;
424 }
425 
dhd_bus_is_rc_ep_aspm_capable(dhd_bus_t * bus)426 static bool dhd_bus_is_rc_ep_aspm_capable(dhd_bus_t *bus)
427 {
428     uint32 rc_aspm_cap;
429     uint32 ep_aspm_cap;
430 
431     /* RC ASPM capability */
432     rc_aspm_cap = dhdpcie_access_cap(bus->rc_dev, PCIE_CAP_ID_EXP,
433                                      PCIE_CAP_LINKCTRL_OFFSET, FALSE, FALSE, 0);
434     if (rc_aspm_cap == BCME_ERROR) {
435         DHD_ERROR(("%s RC is not ASPM capable\n", __FUNCTION__));
436         return FALSE;
437     }
438 
439     /* EP ASPM capability */
440     ep_aspm_cap = dhdpcie_access_cap(bus->dev, PCIE_CAP_ID_EXP,
441                                      PCIE_CAP_LINKCTRL_OFFSET, FALSE, FALSE, 0);
442     if (ep_aspm_cap == BCME_ERROR) {
443         DHD_ERROR(("%s EP is not ASPM capable\n", __FUNCTION__));
444         return FALSE;
445     }
446 
447     return TRUE;
448 }
449 
dhd_bus_aspm_enable_rc_ep(dhd_bus_t * bus,bool enable)450 bool dhd_bus_aspm_enable_rc_ep(dhd_bus_t *bus, bool enable)
451 {
452     bool ret;
453 
454     if (!bus->rc_ep_aspm_cap) {
455         DHD_ERROR(("%s: NOT ASPM  CAPABLE rc_ep_aspm_cap: %d\n", __FUNCTION__,
456                    bus->rc_ep_aspm_cap));
457         return FALSE;
458     }
459 
460     if (enable) {
461         /* Enable only L1 ASPM first RC then EP */
462         ret = dhd_bus_aspm_enable_dev(bus, bus->rc_dev, enable);
463         ret = dhd_bus_aspm_enable_dev(bus, bus->dev, enable);
464     } else {
465         /* Disable complete ASPM first EP then RC */
466         ret = dhd_bus_aspm_enable_dev(bus, bus->dev, enable);
467         ret = dhd_bus_aspm_enable_dev(bus, bus->rc_dev, enable);
468     }
469 
470     return ret;
471 }
472 
dhd_bus_l1ss_enable_dev(dhd_bus_t * bus,struct pci_dev * dev,bool enable)473 static void dhd_bus_l1ss_enable_dev(dhd_bus_t *bus, struct pci_dev *dev,
474                                     bool enable)
475 {
476     uint32 l1ssctrl_before;
477     uint32 l1ssctrl_after = 0;
478     uint8 l1ss_ep;
479     char *device;
480 
481     device = (dev == bus->dev) ? "EP" : "RC";
482 
483     /* Extendend Capacility Reg */
484     l1ssctrl_before =
485         dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS,
486                            PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0);
487     l1ss_ep = (l1ssctrl_before & PCIE_EXT_L1SS_MASK);
488 
489     if (enable) {
490         if (l1ss_ep == PCIE_EXT_L1SS_ENAB) {
491             DHD_ERROR(("%s: %s already enabled,  l1ssctrl: 0x%x\n",
492                        __FUNCTION__, device, l1ssctrl_before));
493             return;
494         }
495         dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS,
496                            PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, TRUE,
497                            (l1ssctrl_before | PCIE_EXT_L1SS_ENAB));
498     } else {
499         if (l1ss_ep == 0) {
500             DHD_ERROR(("%s: %s already disabled, l1ssctrl: 0x%x\n",
501                        __FUNCTION__, device, l1ssctrl_before));
502             return;
503         }
504         dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS,
505                            PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, TRUE,
506                            (l1ssctrl_before & (~PCIE_EXT_L1SS_ENAB)));
507     }
508     l1ssctrl_after =
509         dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS,
510                            PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0);
511     DHD_ERROR(("%s: %s %s, l1ssctrl_before: 0x%x l1ssctrl_after: 0x%x\n",
512                __FUNCTION__, device, (enable ? "ENABLE " : "DISABLE"),
513                l1ssctrl_before, l1ssctrl_after));
514 }
515 
dhd_bus_is_rc_ep_l1ss_capable(dhd_bus_t * bus)516 static bool dhd_bus_is_rc_ep_l1ss_capable(dhd_bus_t *bus)
517 {
518     uint32 rc_l1ss_cap;
519     uint32 ep_l1ss_cap;
520 
521     /* RC Extendend Capacility */
522     rc_l1ss_cap =
523         dhdpcie_access_cap(bus->rc_dev, PCIE_EXTCAP_ID_L1SS,
524                            PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0);
525     if (rc_l1ss_cap == BCME_ERROR) {
526         DHD_ERROR(("%s RC is not l1ss capable\n", __FUNCTION__));
527         return FALSE;
528     }
529 
530     /* EP Extendend Capacility */
531     ep_l1ss_cap =
532         dhdpcie_access_cap(bus->dev, PCIE_EXTCAP_ID_L1SS,
533                            PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0);
534     if (ep_l1ss_cap == BCME_ERROR) {
535         DHD_ERROR(("%s EP is not l1ss capable\n", __FUNCTION__));
536         return FALSE;
537     }
538 
539     return TRUE;
540 }
541 
dhd_bus_l1ss_enable_rc_ep(dhd_bus_t * bus,bool enable)542 void dhd_bus_l1ss_enable_rc_ep(dhd_bus_t *bus, bool enable)
543 {
544     bool ret;
545 
546     if ((!bus->rc_ep_aspm_cap) || (!bus->rc_ep_l1ss_cap)) {
547         DHD_ERROR(
548             ("%s: NOT L1SS CAPABLE rc_ep_aspm_cap: %d rc_ep_l1ss_cap: %d\n",
549              __FUNCTION__, bus->rc_ep_aspm_cap, bus->rc_ep_l1ss_cap));
550         return;
551     }
552 
553     /* Disable ASPM of RC and EP */
554     ret = dhd_bus_aspm_enable_rc_ep(bus, FALSE);
555 
556     if (enable) {
557         /* Enable RC then EP */
558         dhd_bus_l1ss_enable_dev(bus, bus->rc_dev, enable);
559         dhd_bus_l1ss_enable_dev(bus, bus->dev, enable);
560     } else {
561         /* Disable EP then RC */
562         dhd_bus_l1ss_enable_dev(bus, bus->dev, enable);
563         dhd_bus_l1ss_enable_dev(bus, bus->rc_dev, enable);
564     }
565 
566     /* Enable ASPM of RC and EP only if this API disabled */
567     if (ret == TRUE) {
568         dhd_bus_aspm_enable_rc_ep(bus, TRUE);
569     }
570 }
571 
dhd_bus_aer_config(dhd_bus_t * bus)572 void dhd_bus_aer_config(dhd_bus_t *bus)
573 {
574     uint32 val;
575 
576     DHD_ERROR(("%s: Configure AER registers for EP\n", __FUNCTION__));
577     val = dhdpcie_ep_access_cap(bus, PCIE_ADVERRREP_CAPID,
578                                 PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, FALSE, 0);
579     if (val != (uint32)-1) {
580         val &= ~CORR_ERR_AE;
581         dhdpcie_ep_access_cap(bus, PCIE_ADVERRREP_CAPID,
582                               PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, TRUE, val);
583     } else {
584         DHD_ERROR(("%s: Invalid EP's PCIE_ADV_CORR_ERR_MASK: 0x%x\n",
585                    __FUNCTION__, val));
586     }
587 
588     DHD_ERROR(("%s: Configure AER registers for RC\n", __FUNCTION__));
589     val = dhdpcie_rc_access_cap(bus, PCIE_ADVERRREP_CAPID,
590                                 PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, FALSE, 0);
591     if (val != (uint32)-1) {
592         val &= ~CORR_ERR_AE;
593         dhdpcie_rc_access_cap(bus, PCIE_ADVERRREP_CAPID,
594                               PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, TRUE, val);
595     } else {
596         DHD_ERROR(("%s: Invalid RC's PCIE_ADV_CORR_ERR_MASK: 0x%x\n",
597                    __FUNCTION__, val));
598     }
599 }
600 
dhdpcie_pci_suspend(struct pci_dev * pdev,pm_message_t state)601 static int dhdpcie_pci_suspend(struct pci_dev *pdev, pm_message_t state)
602 {
603     int ret = 0;
604     dhdpcie_info_t *pch = pci_get_drvdata(pdev);
605     dhd_bus_t *bus = NULL;
606     unsigned long flags;
607     uint32 i = 0;
608 
609     if (pch) {
610         bus = pch->bus;
611     }
612     if (!bus) {
613         return ret;
614     }
615 
616     BCM_REFERENCE(state);
617 
618     if (!DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) {
619         DHD_ERROR(("%s: Bus not IDLE!! dhd_bus_busy_state = 0x%x\n",
620                    __FUNCTION__, bus->dhd->dhd_bus_busy_state));
621 
622         OSL_DELAY(0x3E8);
623         /* retry till the transaction is complete */
624         while (i < 0x64) {
625             OSL_DELAY(0x3E8);
626             i++;
627             if (DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) {
628                 DHD_ERROR(
629                     ("%s: Bus enter IDLE!! after %d ms\n", __FUNCTION__, i));
630                 break;
631             }
632         }
633         if (!DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) {
634             DHD_ERROR(("%s: Bus not IDLE!! Failed after %d ms, "
635                        "dhd_bus_busy_state = 0x%x\n",
636                        __FUNCTION__, i, bus->dhd->dhd_bus_busy_state));
637             return -EBUSY;
638         }
639     }
640     DHD_GENERAL_LOCK(bus->dhd, flags);
641     DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd);
642     DHD_GENERAL_UNLOCK(bus->dhd, flags);
643 
644     if (!bus->dhd->dongle_reset) {
645         ret = dhdpcie_set_suspend_resume(bus, TRUE);
646     }
647 
648     DHD_GENERAL_LOCK(bus->dhd, flags);
649     DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd);
650     dhd_os_busbusy_wake(bus->dhd);
651     DHD_GENERAL_UNLOCK(bus->dhd, flags);
652 
653     return ret;
654 }
655 
dhdpcie_pci_resume(struct pci_dev * pdev)656 static int dhdpcie_pci_resume(struct pci_dev *pdev)
657 {
658     int ret = 0;
659     dhdpcie_info_t *pch = pci_get_drvdata(pdev);
660     dhd_bus_t *bus = NULL;
661     unsigned long flags;
662 
663     if (pch) {
664         bus = pch->bus;
665     }
666     if (!bus) {
667         return ret;
668     }
669 
670     DHD_GENERAL_LOCK(bus->dhd, flags);
671     DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd);
672     DHD_GENERAL_UNLOCK(bus->dhd, flags);
673 
674     if (!bus->dhd->dongle_reset) {
675         ret = dhdpcie_set_suspend_resume(bus, FALSE);
676     }
677 
678     DHD_GENERAL_LOCK(bus->dhd, flags);
679     DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd);
680     dhd_os_busbusy_wake(bus->dhd);
681     DHD_GENERAL_UNLOCK(bus->dhd, flags);
682 
683     return ret;
684 }
685 
686 static int
687 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
dhdpcie_set_suspend_resume(dhd_bus_t * bus,bool state,bool byint)688 dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state, bool byint)
689 #else
690 dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state)
691 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
692 {
693     int ret = 0;
694 
695     ASSERT(bus && !bus->dhd->dongle_reset);
696 
697     /* When firmware is not loaded do the PCI bus */
698     /* suspend/resume only */
699     if (bus->dhd->busstate == DHD_BUS_DOWN) {
700         ret = dhdpcie_pci_suspend_resume(bus, state);
701         return ret;
702     }
703 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
704     ret = dhdpcie_bus_suspend(bus, state, byint);
705 #else
706     ret = dhdpcie_bus_suspend(bus, state);
707 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
708 
709     return ret;
710 }
711 
712 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
dhdpcie_pm_runtime_suspend(struct device * dev)713 static int dhdpcie_pm_runtime_suspend(struct device *dev)
714 {
715     struct pci_dev *pdev = to_pci_dev(dev);
716     dhdpcie_info_t *pch = pci_get_drvdata(pdev);
717     dhd_bus_t *bus = NULL;
718     int ret = 0;
719 
720     if (!pch) {
721         return -EBUSY;
722     }
723 
724     bus = pch->bus;
725 
726     DHD_RPM(("%s Enter\n", __FUNCTION__));
727 
728     if (atomic_read(&bus->dhd->block_bus)) {
729         return -EHOSTDOWN;
730     }
731 
732     dhd_netif_stop_queue(bus);
733     atomic_set(&bus->dhd->block_bus, TRUE);
734 
735     if (dhdpcie_set_suspend_resume(pdev, TRUE, TRUE)) {
736         pm_runtime_mark_last_busy(dev);
737         ret = -EAGAIN;
738     }
739 
740     atomic_set(&bus->dhd->block_bus, FALSE);
741     dhd_bus_start_queue(bus);
742 
743     return ret;
744 }
745 
dhdpcie_pm_runtime_resume(struct device * dev)746 static int dhdpcie_pm_runtime_resume(struct device *dev)
747 {
748     struct pci_dev *pdev = to_pci_dev(dev);
749     dhdpcie_info_t *pch = pci_get_drvdata(pdev);
750     dhd_bus_t *bus = pch->bus;
751 
752     DHD_RPM(("%s Enter\n", __FUNCTION__));
753 
754     if (atomic_read(&bus->dhd->block_bus)) {
755         return -EHOSTDOWN;
756     }
757 
758     if (dhdpcie_set_suspend_resume(pdev, FALSE, TRUE)) {
759         return -EAGAIN;
760     }
761 
762     return 0;
763 }
764 
dhdpcie_pm_system_suspend_noirq(struct device * dev)765 static int dhdpcie_pm_system_suspend_noirq(struct device *dev)
766 {
767     struct pci_dev *pdev = to_pci_dev(dev);
768     dhdpcie_info_t *pch = pci_get_drvdata(pdev);
769     dhd_bus_t *bus = NULL;
770     int ret;
771 
772     DHD_RPM(("%s Enter\n", __FUNCTION__));
773 
774     if (!pch) {
775         return -EBUSY;
776     }
777 
778     bus = pch->bus;
779 
780     if (atomic_read(&bus->dhd->block_bus)) {
781         return -EHOSTDOWN;
782     }
783 
784     dhd_netif_stop_queue(bus);
785     atomic_set(&bus->dhd->block_bus, TRUE);
786 
787     ret = dhdpcie_set_suspend_resume(pdev, TRUE, FALSE);
788     if (ret) {
789         dhd_bus_start_queue(bus);
790         atomic_set(&bus->dhd->block_bus, FALSE);
791     }
792 
793     return ret;
794 }
795 
dhdpcie_pm_system_resume_noirq(struct device * dev)796 static int dhdpcie_pm_system_resume_noirq(struct device *dev)
797 {
798     struct pci_dev *pdev = to_pci_dev(dev);
799     dhdpcie_info_t *pch = pci_get_drvdata(pdev);
800     dhd_bus_t *bus = NULL;
801     int ret;
802 
803     if (!pch) {
804         return -EBUSY;
805     }
806 
807     bus = pch->bus;
808 
809     DHD_RPM(("%s Enter\n", __FUNCTION__));
810 
811     ret = dhdpcie_set_suspend_resume(pdev, FALSE, FALSE);
812 
813     atomic_set(&bus->dhd->block_bus, FALSE);
814     dhd_bus_start_queue(bus);
815     pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
816 
817     return ret;
818 }
819 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
820 
821 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
822 extern void dhd_dpc_tasklet_kill(dhd_pub_t *dhdp);
823 #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
824 
dhdpcie_suspend_dump_cfgregs(struct dhd_bus * bus,char * suspend_state)825 static void dhdpcie_suspend_dump_cfgregs(struct dhd_bus *bus,
826                                          char *suspend_state)
827 {
828     DHD_ERROR(
829         ("%s: BaseAddress0(0x%x)=0x%x, "
830          "BaseAddress1(0x%x)=0x%x PCIE_CFG_PMCSR(0x%x)=0x%x\n",
831          suspend_state, PCIECFGREG_BASEADDR0,
832          dhd_pcie_config_read(bus->osh, PCIECFGREG_BASEADDR0, sizeof(uint32)),
833          PCIECFGREG_BASEADDR1,
834          dhd_pcie_config_read(bus->osh, PCIECFGREG_BASEADDR1, sizeof(uint32)),
835          PCIE_CFG_PMCSR,
836          dhd_pcie_config_read(bus->osh, PCIE_CFG_PMCSR, sizeof(uint32))));
837 }
838 
dhdpcie_suspend_dev(struct pci_dev * dev)839 static int dhdpcie_suspend_dev(struct pci_dev *dev)
840 {
841     int ret;
842     dhdpcie_info_t *pch = pci_get_drvdata(dev);
843     dhd_bus_t *bus = pch->bus;
844 
845 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
846     if (bus->is_linkdown) {
847         DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
848         return BCME_ERROR;
849     }
850 #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
851     DHD_ERROR(("%s: Enter\n", __FUNCTION__));
852     dhdpcie_suspend_dump_cfgregs(bus, "BEFORE_EP_SUSPEND");
853 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
854     dhd_dpc_tasklet_kill(bus->dhd);
855 #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
856     pci_save_state(dev);
857 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
858     pch->state = pci_store_saved_state(dev);
859 #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
860     pci_enable_wake(dev, PCI_D0, TRUE);
861     if (pci_is_enabled(dev)) {
862         pci_disable_device(dev);
863     }
864 
865     ret = pci_set_power_state(dev, PCI_D3hot);
866     if (ret) {
867         DHD_ERROR(("%s: pci_set_power_state error %d\n", __FUNCTION__, ret));
868     }
869     dhdpcie_suspend_dump_cfgregs(bus, "AFTER_EP_SUSPEND");
870     return ret;
871 }
872 
873 #ifdef DHD_WAKE_STATUS
bcmpcie_get_total_wake(struct dhd_bus * bus)874 int bcmpcie_get_total_wake(struct dhd_bus *bus)
875 {
876     dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
877 
878     return pch->total_wake_count;
879 }
880 
bcmpcie_set_get_wake(struct dhd_bus * bus,int flag)881 int bcmpcie_set_get_wake(struct dhd_bus *bus, int flag)
882 {
883     dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
884     unsigned long flags;
885     int ret;
886 
887     spin_lock_irqsave(&pch->pcie_lock, flags);
888 
889     ret = pch->pkt_wake;
890     pch->total_wake_count += flag;
891     pch->pkt_wake = flag;
892 
893     spin_unlock_irqrestore(&pch->pcie_lock, flags);
894     return ret;
895 }
896 #endif /* DHD_WAKE_STATUS */
897 
dhdpcie_resume_dev(struct pci_dev * dev)898 static int dhdpcie_resume_dev(struct pci_dev *dev)
899 {
900     int err = 0;
901     dhdpcie_info_t *pch = pci_get_drvdata(dev);
902 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
903     pci_load_and_free_saved_state(dev, &pch->state);
904 #endif
905     DHD_ERROR(("%s: Enter\n", __FUNCTION__));
906     pci_restore_state(dev);
907 #ifdef FORCE_TPOWERON
908     if (dhdpcie_chip_req_forced_tpoweron(pch->bus)) {
909         dhd_bus_set_tpoweron(pch->bus, tpoweron_scale);
910     }
911 #endif /* FORCE_TPOWERON */
912     err = pci_enable_device(dev);
913     if (err) {
914         printf("%s:pci_enable_device error %d \n", __FUNCTION__, err);
915         goto out;
916     }
917     pci_set_master(dev);
918     err = pci_set_power_state(dev, PCI_D0);
919     if (err) {
920         printf("%s:pci_set_power_state error %d \n", __FUNCTION__, err);
921         goto out;
922     }
923     BCM_REFERENCE(pch);
924     dhdpcie_suspend_dump_cfgregs(pch->bus, "AFTER_EP_RESUME");
925 out:
926     return err;
927 }
928 
dhdpcie_resume_host_dev(dhd_bus_t * bus)929 static int dhdpcie_resume_host_dev(dhd_bus_t *bus)
930 {
931     int bcmerror = 0;
932 #ifdef USE_EXYNOS_PCIE_RC_PMPATCH
933     bcmerror = exynos_pcie_pm_resume(SAMSUNG_PCIE_CH_NUM);
934 #endif /* USE_EXYNOS_PCIE_RC_PMPATCH */
935 #ifdef CONFIG_ARCH_MSM
936     bcmerror = dhdpcie_start_host_pcieclock(bus);
937 #endif /* CONFIG_ARCH_MSM */
938 #ifdef CONFIG_ARCH_TEGRA
939     bcmerror = tegra_pcie_pm_resume();
940 #endif /* CONFIG_ARCH_TEGRA */
941     if (bcmerror < 0) {
942         DHD_ERROR(
943             ("%s: PCIe RC resume failed!!! (%d)\n", __FUNCTION__, bcmerror));
944         bus->is_linkdown = 1;
945     }
946 
947     return bcmerror;
948 }
949 
dhdpcie_suspend_host_dev(dhd_bus_t * bus)950 static int dhdpcie_suspend_host_dev(dhd_bus_t *bus)
951 {
952     int bcmerror = 0;
953 #ifdef USE_EXYNOS_PCIE_RC_PMPATCH
954     if (bus->rc_dev) {
955         pci_save_state(bus->rc_dev);
956     } else {
957         DHD_ERROR(("%s: RC %x:%x handle is NULL\n", __FUNCTION__,
958                    PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID));
959     }
960     exynos_pcie_pm_suspend(SAMSUNG_PCIE_CH_NUM);
961 #endif /* USE_EXYNOS_PCIE_RC_PMPATCH */
962 #ifdef CONFIG_ARCH_MSM
963     bcmerror = dhdpcie_stop_host_pcieclock(bus);
964 #endif /* CONFIG_ARCH_MSM */
965 #ifdef CONFIG_ARCH_TEGRA
966     bcmerror = tegra_pcie_pm_suspend();
967 #endif /* CONFIG_ARCH_TEGRA */
968     return bcmerror;
969 }
970 
971 /**
972  * dhdpcie_os_setbar1win
973  *
974  * Interface function for setting bar1 window in order to allow
975  * os layer to be aware of current window positon.
976  *
977  * @bus: dhd bus context
978  * @addr: new backplane windows address for BAR1
979  */
dhdpcie_os_setbar1win(dhd_bus_t * bus,uint32 addr)980 void dhdpcie_os_setbar1win(dhd_bus_t *bus, uint32 addr)
981 {
982     dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
983 
984     osl_pci_write_config(bus->osh, PCI_BAR1_WIN, 0x4, addr);
985     pch->curr_bar1_win = addr;
986 }
987 
988 /**
989  * dhdpcie_os_chkbpoffset
990  *
991  * Check the provided address is within the current BAR1 window,
992  * if not, shift the window
993  *
994  * @bus: dhd bus context
995  * @offset: back plane address that the caller wants to access
996  *
997  * Return: new offset for access
998  */
dhdpcie_os_chkbpoffset(dhdpcie_info_t * pch,ulong offset)999 static ulong dhdpcie_os_chkbpoffset(dhdpcie_info_t *pch, ulong offset)
1000 {
1001     /* Determine BAR1 backplane window using window size
1002      * Window address mask should be ~(size - 1)
1003      */
1004     uint32 bpwin = (uint32)(offset & ~(pch->bar1_size - 1));
1005 
1006     if (bpwin != pch->curr_bar1_win) {
1007         /* Move BAR1 window */
1008         dhdpcie_os_setbar1win(pch->bus, bpwin);
1009     }
1010 
1011     return offset - bpwin;
1012 }
1013 
1014 /**
1015  * dhdpcie os layer tcm read/write interface
1016  */
dhdpcie_os_wtcm8(dhd_bus_t * bus,ulong offset,uint8 data)1017 void dhdpcie_os_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data)
1018 {
1019     dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
1020 
1021     offset = dhdpcie_os_chkbpoffset(pch, offset);
1022     W_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset), data);
1023 }
1024 
dhdpcie_os_rtcm8(dhd_bus_t * bus,ulong offset)1025 uint8 dhdpcie_os_rtcm8(dhd_bus_t *bus, ulong offset)
1026 {
1027     volatile uint8 data;
1028     dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
1029 
1030     offset = dhdpcie_os_chkbpoffset(pch, offset);
1031     data = R_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset));
1032     return data;
1033 }
1034 
dhdpcie_os_wtcm16(dhd_bus_t * bus,ulong offset,uint16 data)1035 void dhdpcie_os_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data)
1036 {
1037     dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
1038 
1039     offset = dhdpcie_os_chkbpoffset(pch, offset);
1040     W_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset), data);
1041 }
1042 
dhdpcie_os_rtcm16(dhd_bus_t * bus,ulong offset)1043 uint16 dhdpcie_os_rtcm16(dhd_bus_t *bus, ulong offset)
1044 {
1045     volatile uint16 data;
1046     dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
1047 
1048     offset = dhdpcie_os_chkbpoffset(pch, offset);
1049     data = R_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset));
1050     return data;
1051 }
1052 
dhdpcie_os_wtcm32(dhd_bus_t * bus,ulong offset,uint32 data)1053 void dhdpcie_os_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data)
1054 {
1055     dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
1056 
1057     offset = dhdpcie_os_chkbpoffset(pch, offset);
1058     W_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset), data);
1059 }
1060 
dhdpcie_os_rtcm32(dhd_bus_t * bus,ulong offset)1061 uint32 dhdpcie_os_rtcm32(dhd_bus_t *bus, ulong offset)
1062 {
1063     volatile uint32 data;
1064     dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
1065 
1066     offset = dhdpcie_os_chkbpoffset(pch, offset);
1067     data = R_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset));
1068     return data;
1069 }
1070 
1071 #ifdef DHD_SUPPORT_64BIT
dhdpcie_os_wtcm64(dhd_bus_t * bus,ulong offset,uint64 data)1072 void dhdpcie_os_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data)
1073 {
1074     dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
1075 
1076     offset = dhdpcie_os_chkbpoffset(pch, offset);
1077     W_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset), data);
1078 }
1079 
dhdpcie_os_rtcm64(dhd_bus_t * bus,ulong offset)1080 uint64 dhdpcie_os_rtcm64(dhd_bus_t *bus, ulong offset)
1081 {
1082     volatile uint64 data;
1083     dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
1084 
1085     offset = dhdpcie_os_chkbpoffset(pch, offset);
1086     data = R_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset));
1087     return data;
1088 }
1089 #endif /* DHD_SUPPORT_64BIT */
1090 
dhdpcie_rc_config_read(dhd_bus_t * bus,uint offset)1091 uint32 dhdpcie_rc_config_read(dhd_bus_t *bus, uint offset)
1092 {
1093     uint val = -1; /* Initialise to 0xfffffff */
1094     if (bus->rc_dev) {
1095         pci_read_config_dword(bus->rc_dev, offset, &val);
1096         OSL_DELAY(0x64);
1097     } else {
1098         DHD_ERROR(("%s: RC %x:%x handle is NULL\n", __FUNCTION__,
1099                    PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID));
1100     }
1101     DHD_ERROR(("%s: RC %x:%x offset 0x%x val 0x%x\n", __FUNCTION__,
1102                PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, offset, val));
1103     return (val);
1104 }
1105 
1106 /*
1107  * Reads/ Writes the value of capability register
1108  * from the given CAP_ID section of PCI Root Port
1109  *
1110  * Arguements
1111  * @bus current dhd_bus_t pointer
1112  * @cap Capability or Extended Capability ID to get
1113  * @offset offset of Register to Read
1114  * @is_ext TRUE if @cap is given for Extended Capability
1115  * @is_write is set to TRUE to indicate write
1116  * @val value to write
1117  *
1118  * Return Value
1119  * Returns 0xffffffff on error
1120  * on write success returns BCME_OK (0)
1121  * on Read Success returns the value of register requested
1122  * Note: caller shoud ensure valid capability ID and Ext. Capability ID.
1123  */
1124 
dhdpcie_access_cap(struct pci_dev * pdev,int cap,uint offset,bool is_ext,bool is_write,uint32 writeval)1125 uint32 dhdpcie_access_cap(struct pci_dev *pdev, int cap, uint offset,
1126                           bool is_ext, bool is_write, uint32 writeval)
1127 {
1128     int cap_ptr = 0;
1129     uint32 ret = -1;
1130     uint32 readval;
1131 
1132     if (!(pdev)) {
1133         DHD_ERROR(("%s: pdev is NULL\n", __FUNCTION__));
1134         return ret;
1135     }
1136 
1137     /* Find Capability offset */
1138     if (is_ext) {
1139         /* removing max EXT_CAP_ID check as
1140          * linux kernel definition's max value is not upadted yet as per spec
1141          */
1142         cap_ptr = pci_find_ext_capability(pdev, cap);
1143     } else {
1144         /* removing max PCI_CAP_ID_MAX check as
1145          * pervious kernel versions dont have this definition
1146          */
1147         cap_ptr = pci_find_capability(pdev, cap);
1148     }
1149 
1150     /* Return if capability with given ID not found */
1151     if (cap_ptr == 0) {
1152         DHD_ERROR(("%s: PCI Cap(0x%02x) not supported.\n", __FUNCTION__, cap));
1153         return BCME_ERROR;
1154     }
1155 
1156     if (is_write) {
1157         pci_write_config_dword(pdev, (cap_ptr + offset), writeval);
1158         ret = BCME_OK;
1159     } else {
1160         pci_read_config_dword(pdev, (cap_ptr + offset), &readval);
1161         ret = readval;
1162     }
1163 
1164     return ret;
1165 }
1166 
dhdpcie_rc_access_cap(dhd_bus_t * bus,int cap,uint offset,bool is_ext,bool is_write,uint32 writeval)1167 uint32 dhdpcie_rc_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext,
1168                              bool is_write, uint32 writeval)
1169 {
1170     if (!(bus->rc_dev)) {
1171         DHD_ERROR(("%s: RC %x:%x handle is NULL\n", __FUNCTION__,
1172                    PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID));
1173         return BCME_ERROR;
1174     }
1175 
1176     return dhdpcie_access_cap(bus->rc_dev, cap, offset, is_ext, is_write,
1177                               writeval);
1178 }
1179 
dhdpcie_ep_access_cap(dhd_bus_t * bus,int cap,uint offset,bool is_ext,bool is_write,uint32 writeval)1180 uint32 dhdpcie_ep_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext,
1181                              bool is_write, uint32 writeval)
1182 {
1183     if (!(bus->dev)) {
1184         DHD_ERROR(("%s: EP handle is NULL\n", __FUNCTION__));
1185         return BCME_ERROR;
1186     }
1187 
1188     return dhdpcie_access_cap(bus->dev, cap, offset, is_ext, is_write,
1189                               writeval);
1190 }
1191 
1192 /* API wrapper to read Root Port link capability
1193  * Returns 2 = GEN2 1 = GEN1 BCME_ERR on linkcap not found
1194  */
1195 
dhd_debug_get_rc_linkcap(dhd_bus_t * bus)1196 uint32 dhd_debug_get_rc_linkcap(dhd_bus_t *bus)
1197 {
1198     uint32 linkcap = -1;
1199     linkcap = dhdpcie_rc_access_cap(bus, PCIE_CAP_ID_EXP,
1200                                     PCIE_CAP_LINKCAP_OFFSET, FALSE, FALSE, 0);
1201     linkcap &= PCIE_CAP_LINKCAP_LNKSPEED_MASK;
1202     return linkcap;
1203 }
1204 
dhdpcie_config_save_restore_coherent(dhd_bus_t * bus,bool state)1205 static void dhdpcie_config_save_restore_coherent(dhd_bus_t *bus, bool state)
1206 {
1207     if (bus->coreid == ARMCA7_CORE_ID) {
1208         if (state) {
1209             /* Sleep */
1210             bus->coherent_state =
1211                 dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 0x4) &
1212                 PCIE_BARCOHERENTACCEN_MASK;
1213         } else {
1214             uint32 val = (dhdpcie_bus_cfg_read_dword(
1215                               bus, PCIE_CFG_SUBSYSTEM_CONTROL, 0x4) &
1216                           ~PCIE_BARCOHERENTACCEN_MASK) |
1217                          bus->coherent_state;
1218             dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 0x4,
1219                                         val);
1220         }
1221     }
1222 }
1223 
dhdpcie_pci_suspend_resume(dhd_bus_t * bus,bool state)1224 int dhdpcie_pci_suspend_resume(dhd_bus_t *bus, bool state)
1225 {
1226     int rc;
1227 
1228     struct pci_dev *dev = bus->dev;
1229 
1230     if (state) {
1231         dhdpcie_config_save_restore_coherent(bus, state);
1232 #if !defined(BCMPCIE_OOB_HOST_WAKE)
1233         dhdpcie_pme_active(bus->osh, state);
1234 #endif // endif
1235         rc = dhdpcie_suspend_dev(dev);
1236         if (!rc) {
1237             dhdpcie_suspend_host_dev(bus);
1238         }
1239     } else {
1240         rc = dhdpcie_resume_host_dev(bus);
1241         if (!rc) {
1242             rc = dhdpcie_resume_dev(dev);
1243             if (PCIECTO_ENAB(bus)) {
1244                 /* reinit CTO configuration
1245                  * because cfg space got reset at D3 (PERST)
1246                  */
1247                 dhdpcie_cto_cfg_init(bus, TRUE);
1248             }
1249             if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) {
1250                 dhdpcie_ssreset_dis_enum_rst(bus);
1251             }
1252 #if !defined(BCMPCIE_OOB_HOST_WAKE)
1253             dhdpcie_pme_active(bus->osh, state);
1254 #endif // endif
1255         }
1256         dhdpcie_config_save_restore_coherent(bus, state);
1257         if (bus->is_linkdown) {
1258             bus->dhd->hang_reason = HANG_REASON_PCIE_RC_LINK_UP_FAIL;
1259             dhd_os_send_hang_message(bus->dhd);
1260         }
1261     }
1262     return rc;
1263 }
1264 
dhdpcie_device_scan(struct device * dev,void * data)1265 static int dhdpcie_device_scan(struct device *dev, void *data)
1266 {
1267     struct pci_dev *pcidev;
1268     int *cnt = data;
1269 
1270 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1271 #pragma GCC diagnostic push
1272 #pragma GCC diagnostic ignored "-Wcast-qual"
1273 #endif // endif
1274     pcidev = container_of(dev, struct pci_dev, dev);
1275 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1276 #pragma GCC diagnostic pop
1277 #endif // endif
1278     if (pcidev->vendor != 0x14e4) {
1279         return 0;
1280     }
1281 
1282     DHD_INFO(("Found Broadcom PCI device 0x%04x\n", pcidev->device));
1283     *cnt += 1;
1284     if (pcidev->driver && strcmp(pcidev->driver->name, dhdpcie_driver.name)) {
1285         DHD_ERROR(("Broadcom PCI Device 0x%04x has allocated with driver %s\n",
1286                    pcidev->device, pcidev->driver->name));
1287     }
1288 
1289     return 0;
1290 }
1291 
dhdpcie_bus_register(void)1292 int dhdpcie_bus_register(void)
1293 {
1294     int error = 0;
1295 
1296     if (!(error = pci_register_driver(&dhdpcie_driver))) {
1297         bus_for_each_dev(dhdpcie_driver.driver.bus, NULL, &error,
1298                          dhdpcie_device_scan);
1299         if (!error) {
1300             DHD_ERROR(("No Broadcom PCI device enumerated!\n"));
1301 #ifdef DHD_PRELOAD
1302             return 0;
1303 #endif
1304         } else if (!dhdpcie_init_succeeded) {
1305             DHD_ERROR(("%s: dhdpcie initialize failed.\n", __FUNCTION__));
1306         } else {
1307             return 0;
1308         }
1309 
1310         pci_unregister_driver(&dhdpcie_driver);
1311         error = BCME_ERROR;
1312     }
1313 
1314     return error;
1315 }
1316 
dhdpcie_bus_unregister(void)1317 void dhdpcie_bus_unregister(void)
1318 {
1319     pci_unregister_driver(&dhdpcie_driver);
1320 }
1321 
dhdpcie_pci_probe(struct pci_dev * pdev,const struct pci_device_id * ent)1322 int __devinit dhdpcie_pci_probe(struct pci_dev *pdev,
1323                                 const struct pci_device_id *ent)
1324 {
1325     int err = 0;
1326     DHD_MUTEX_LOCK();
1327 
1328     if (dhdpcie_chipmatch(pdev->vendor, pdev->device)) {
1329         DHD_ERROR(("%s: chipmatch failed!!\n", __FUNCTION__));
1330         err = -ENODEV;
1331         goto exit;
1332     }
1333 
1334     printf("PCI_PROBE:  bus %X, slot %X,vendor %X, device %X"
1335            "(good PCI location)\n",
1336            pdev->bus->number, PCI_SLOT(pdev->devfn), pdev->vendor,
1337            pdev->device);
1338 
1339     if (dhdpcie_init_succeeded == TRUE) {
1340         DHD_ERROR(
1341             ("%s(): === Driver Already attached to a BRCM device === \r\n",
1342              __FUNCTION__));
1343         err = -ENODEV;
1344         goto exit;
1345     }
1346 
1347     if (dhdpcie_init(pdev)) {
1348         DHD_ERROR(("%s: PCIe Enumeration failed\n", __FUNCTION__));
1349         err = -ENODEV;
1350         goto exit;
1351     }
1352 
1353 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1354     /*
1355     Since MSM PCIe RC dev usage conunt already incremented +2 even
1356     before dhdpcie_pci_probe() called, then we inevitably to call
1357     pm_runtime_put_noidle() two times to make the count start with zero.
1358     */
1359 
1360     pm_runtime_put_noidle(&pdev->dev);
1361     pm_runtime_put_noidle(&pdev->dev);
1362     pm_runtime_set_suspended(&pdev->dev);
1363 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1364 
1365 #ifdef BCMPCIE_DISABLE_ASYNC_SUSPEND
1366     /* disable async suspend */
1367     device_disable_async_suspend(&pdev->dev);
1368 #endif /* BCMPCIE_DISABLE_ASYNC_SUSPEND */
1369 
1370     DHD_TRACE(("%s: PCIe Enumeration done!!\n", __FUNCTION__));
1371 exit:
1372     DHD_MUTEX_UNLOCK();
1373     return err;
1374 }
1375 
dhdpcie_detach(dhdpcie_info_t * pch)1376 int dhdpcie_detach(dhdpcie_info_t *pch)
1377 {
1378     if (pch) {
1379 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1380         if (!dhd_download_fw_on_driverload) {
1381             pci_load_and_free_saved_state(pch->dev, &pch->default_state);
1382         }
1383 #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1384         MFREE(pch->osh, pch, sizeof(dhdpcie_info_t));
1385     }
1386     return 0;
1387 }
1388 
dhdpcie_pci_remove(struct pci_dev * pdev)1389 void __devexit dhdpcie_pci_remove(struct pci_dev *pdev)
1390 {
1391     osl_t *osh = NULL;
1392     dhdpcie_info_t *pch = NULL;
1393     dhd_bus_t *bus = NULL;
1394 
1395     DHD_TRACE(("%s Enter\n", __FUNCTION__));
1396 
1397     DHD_MUTEX_LOCK();
1398 
1399     pch = pci_get_drvdata(pdev);
1400     bus = pch->bus;
1401     osh = pch->osh;
1402 
1403 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1404     pm_runtime_get_noresume(&pdev->dev);
1405     pm_runtime_get_noresume(&pdev->dev);
1406 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1407 
1408     if (bus) {
1409 
1410         bus->rc_dev = NULL;
1411 
1412         dhdpcie_bus_release(bus);
1413     }
1414 
1415     if (pci_is_enabled(pdev)) {
1416         pci_disable_device(pdev);
1417     }
1418 #ifdef BCMPCIE_OOB_HOST_WAKE
1419     /* pcie os info detach */
1420     MFREE(osh, pch->os_cxt, sizeof(dhdpcie_os_info_t));
1421 #endif /* BCMPCIE_OOB_HOST_WAKE */
1422 #ifdef USE_SMMU_ARCH_MSM
1423     /* smmu info detach */
1424     dhdpcie_smmu_remove(pdev, pch->smmu_cxt);
1425     MFREE(osh, pch->smmu_cxt, sizeof(dhdpcie_smmu_info_t));
1426 #endif /* USE_SMMU_ARCH_MSM */
1427     /* pcie info detach */
1428     dhdpcie_detach(pch);
1429     /* osl detach */
1430     osl_detach(osh);
1431 
1432 #if defined(BCMPCIE_OOB_HOST_WAKE) && defined(CUSTOMER_HW2) &&                 \
1433     defined(CONFIG_ARCH_APQ8084)
1434     brcm_pcie_wake.wake_irq = NULL;
1435     brcm_pcie_wake.data = NULL;
1436 #endif /* BCMPCIE_OOB_HOST_WAKE && CUSTOMR_HW2 && CONFIG_ARCH_APQ8084 */
1437 
1438     dhdpcie_init_succeeded = FALSE;
1439 
1440     DHD_MUTEX_UNLOCK();
1441 
1442     DHD_TRACE(("%s Exit\n", __FUNCTION__));
1443 
1444     return;
1445 }
1446 
1447 /* Enable Linux Msi */
dhdpcie_enable_msi(struct pci_dev * pdev,unsigned int min_vecs,unsigned int max_vecs)1448 int dhdpcie_enable_msi(struct pci_dev *pdev, unsigned int min_vecs,
1449                        unsigned int max_vecs)
1450 {
1451 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
1452     return pci_alloc_irq_vectors(pdev, min_vecs, max_vecs, PCI_IRQ_MSI);
1453 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
1454     return pci_enable_msi_range(pdev, min_vecs, max_vecs);
1455 #else
1456     return pci_enable_msi_block(pdev, max_vecs);
1457 #endif // endif
1458 }
1459 
1460 /* Disable Linux Msi */
dhdpcie_disable_msi(struct pci_dev * pdev)1461 void dhdpcie_disable_msi(struct pci_dev *pdev)
1462 {
1463 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
1464     pci_free_irq_vectors(pdev);
1465 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0))
1466     pci_disable_msi(pdev);
1467 #else
1468     pci_disable_msi(pdev);
1469 #endif // endif
1470     return;
1471 }
1472 
1473 /* Request Linux irq */
dhdpcie_request_irq(dhdpcie_info_t * dhdpcie_info)1474 int dhdpcie_request_irq(dhdpcie_info_t *dhdpcie_info)
1475 {
1476     dhd_bus_t *bus = dhdpcie_info->bus;
1477     struct pci_dev *pdev = dhdpcie_info->bus->dev;
1478     int host_irq_disabled;
1479 
1480     if (!bus->irq_registered) {
1481         snprintf(dhdpcie_info->pciname, sizeof(dhdpcie_info->pciname),
1482                  "dhdpcie:%s", pci_name(pdev));
1483 
1484         if (bus->d2h_intr_method == PCIE_MSI) {
1485             if (dhdpcie_enable_msi(pdev, 1, 1) < 0) {
1486                 DHD_ERROR(("%s: dhdpcie_enable_msi() failed\n", __FUNCTION__));
1487                 dhdpcie_disable_msi(pdev);
1488                 bus->d2h_intr_method = PCIE_INTX;
1489             }
1490         }
1491 
1492         if (bus->d2h_intr_method == PCIE_MSI) {
1493             printf("%s: MSI enabled\n", __FUNCTION__);
1494         } else {
1495             printf("%s: INTx enabled\n", __FUNCTION__);
1496         }
1497 
1498         if (request_irq(pdev->irq, dhdpcie_isr, IRQF_SHARED,
1499                         dhdpcie_info->pciname, bus) < 0) {
1500             DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__));
1501             if (bus->d2h_intr_method == PCIE_MSI) {
1502                 dhdpcie_disable_msi(pdev);
1503             }
1504             return -1;
1505         } else {
1506             bus->irq_registered = TRUE;
1507         }
1508     } else {
1509         DHD_ERROR(("%s: PCI IRQ is already registered\n", __FUNCTION__));
1510     }
1511 
1512     host_irq_disabled = dhdpcie_irq_disabled(bus);
1513     if (host_irq_disabled) {
1514         DHD_ERROR(("%s: PCIe IRQ was disabled(%d), so, enabled it again\n",
1515                    __FUNCTION__, host_irq_disabled));
1516         dhdpcie_enable_irq(bus);
1517     }
1518 
1519     DHD_TRACE(("%s %s\n", __FUNCTION__, dhdpcie_info->pciname));
1520 
1521     return 0; /* SUCCESS */
1522 }
1523 
1524 /**
1525  *	dhdpcie_get_pcieirq - return pcie irq number to linux-dhd
1526  */
dhdpcie_get_pcieirq(struct dhd_bus * bus,unsigned int * irq)1527 int dhdpcie_get_pcieirq(struct dhd_bus *bus, unsigned int *irq)
1528 {
1529     struct pci_dev *pdev = bus->dev;
1530 
1531     if (!pdev) {
1532         DHD_ERROR(("%s : bus->dev is NULL\n", __FUNCTION__));
1533         return -ENODEV;
1534     }
1535 
1536     *irq = pdev->irq;
1537 
1538     return 0; /* SUCCESS */
1539 }
1540 
1541 #ifdef CONFIG_PHYS_ADDR_T_64BIT
1542 #define PRINTF_RESOURCE "0x%016llx"
1543 #else
1544 #define PRINTF_RESOURCE "0x%08x"
1545 #endif // endif
1546 
1547 #ifdef EXYNOS_PCIE_MODULE_PATCH
1548 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1549 extern struct pci_saved_state *bcm_pcie_default_state;
1550 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1551 #endif /* EXYNOS_MODULE_PATCH */
1552 
1553 /*
1554 
1555 Name:  osl_pci_get_resource
1556 
1557 Parametrs
1558 
1559 1: struct pci_dev *pdev   -- pci device structure
1560 2: pci_res                       -- structure containing pci configuration space
1561 values
1562 
1563 Return value
1564 
1565 int   - Status (TRUE or FALSE)
1566 
1567 Description:
1568 Access PCI configuration space, retrieve  PCI allocated resources , updates in
1569 resource structure.
1570 
1571  */
dhdpcie_get_resource(dhdpcie_info_t * dhdpcie_info)1572 int dhdpcie_get_resource(dhdpcie_info_t *dhdpcie_info)
1573 {
1574     phys_addr_t bar0_addr, bar1_addr;
1575     ulong bar1_size;
1576     struct pci_dev *pdev = NULL;
1577     pdev = dhdpcie_info->dev;
1578 #ifdef EXYNOS_PCIE_MODULE_PATCH
1579 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1580     if (bcm_pcie_default_state) {
1581         pci_load_saved_state(pdev, bcm_pcie_default_state);
1582         pci_restore_state(pdev);
1583     }
1584 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1585 #endif /* EXYNOS_MODULE_PATCH */
1586     do {
1587         if (pci_enable_device(pdev)) {
1588             printf("%s: Cannot enable PCI device\n", __FUNCTION__);
1589             break;
1590         }
1591         pci_set_master(pdev);
1592         bar0_addr = pci_resource_start(pdev, 0); /* Bar-0 mapped address */
1593         bar1_addr = pci_resource_start(pdev, 0x2); /* Bar-1 mapped address */
1594 
1595         /* read Bar-1 mapped memory range */
1596         bar1_size = pci_resource_len(pdev, 0x2);
1597         if ((bar1_size == 0) || (bar1_addr == 0)) {
1598             printf("%s: BAR1 Not enabled for this device  size(%ld),"
1599                    " addr(0x" PRINTF_RESOURCE ")\n",
1600                    __FUNCTION__, bar1_size, bar1_addr);
1601             goto err;
1602         }
1603 
1604         dhdpcie_info->regs =
1605             (volatile char *)REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE);
1606         dhdpcie_info->bar1_size =
1607             (bar1_size > DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE;
1608         dhdpcie_info->tcm =
1609             (volatile char *)REG_MAP(bar1_addr, dhdpcie_info->bar1_size);
1610 
1611         if (!dhdpcie_info->regs || !dhdpcie_info->tcm) {
1612             DHD_ERROR(("%s:ioremap() failed\n", __FUNCTION__));
1613             break;
1614         }
1615 #ifdef EXYNOS_PCIE_MODULE_PATCH
1616 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1617         if (bcm_pcie_default_state == NULL) {
1618             pci_save_state(pdev);
1619             bcm_pcie_default_state = pci_store_saved_state(pdev);
1620         }
1621 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1622 #endif /* EXYNOS_MODULE_PATCH */
1623 
1624 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1625         /* Backup PCIe configuration so as to use Wi-Fi on/off process
1626          * in case of built in driver
1627          */
1628         pci_save_state(pdev);
1629         dhdpcie_info->default_state = pci_store_saved_state(pdev);
1630 
1631         if (dhdpcie_info->default_state == NULL) {
1632             DHD_ERROR(
1633                 ("%s pci_store_saved_state returns NULL\n", __FUNCTION__));
1634             REG_UNMAP(dhdpcie_info->regs);
1635             REG_UNMAP(dhdpcie_info->tcm);
1636             pci_disable_device(pdev);
1637             break;
1638         }
1639 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1640 
1641         DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x" PRINTF_RESOURCE
1642                    " \n",
1643                    __FUNCTION__, dhdpcie_info->regs, bar0_addr));
1644         DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x" PRINTF_RESOURCE
1645                    " \n",
1646                    __FUNCTION__, dhdpcie_info->tcm, bar1_addr));
1647 
1648         return 0; /* SUCCESS  */
1649     } while (0);
1650 err:
1651     return -1; /* FAILURE */
1652 }
1653 
dhdpcie_scan_resource(dhdpcie_info_t * dhdpcie_info)1654 int dhdpcie_scan_resource(dhdpcie_info_t *dhdpcie_info)
1655 {
1656     DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
1657 
1658     do {
1659         /* define it here only!! */
1660         if (dhdpcie_get_resource(dhdpcie_info)) {
1661             DHD_ERROR(("%s: Failed to get PCI resources\n", __FUNCTION__));
1662             break;
1663         }
1664         DHD_TRACE(("%s:Exit - SUCCESS \n", __FUNCTION__));
1665 
1666         return 0; /* SUCCESS */
1667     } while (0);
1668 
1669     DHD_TRACE(("%s:Exit - FAILURE \n", __FUNCTION__));
1670 
1671     return -1; /* FAILURE */
1672 }
1673 
dhdpcie_dump_resource(dhd_bus_t * bus)1674 void dhdpcie_dump_resource(dhd_bus_t *bus)
1675 {
1676     dhdpcie_info_t *pch;
1677 
1678     if (bus == NULL) {
1679         DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
1680         return;
1681     }
1682 
1683     if (bus->dev == NULL) {
1684         DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
1685         return;
1686     }
1687 
1688     pch = pci_get_drvdata(bus->dev);
1689     if (pch == NULL) {
1690         DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
1691         return;
1692     }
1693 
1694     /* BAR0 */
1695     DHD_ERROR(("%s: BAR0(VA): 0x%pK, BAR0(PA): " PRINTF_RESOURCE ", SIZE: %d\n",
1696                __FUNCTION__, pch->regs, pci_resource_start(bus->dev, 0),
1697                DONGLE_REG_MAP_SIZE));
1698 
1699     /* BAR1 */
1700     DHD_ERROR(("%s: BAR1(VA): 0x%pK, BAR1(PA): " PRINTF_RESOURCE ", SIZE: %d\n",
1701                __FUNCTION__, pch->tcm, pci_resource_start(bus->dev, 0x2),
1702                pch->bar1_size));
1703 }
1704 
dhdpcie_init(struct pci_dev * pdev)1705 int dhdpcie_init(struct pci_dev *pdev)
1706 {
1707     osl_t *osh = NULL;
1708     dhd_bus_t *bus = NULL;
1709     dhdpcie_info_t *dhdpcie_info = NULL;
1710     wifi_adapter_info_t *adapter = NULL;
1711 #ifdef BCMPCIE_OOB_HOST_WAKE
1712     dhdpcie_os_info_t *dhdpcie_osinfo = NULL;
1713 #endif /* BCMPCIE_OOB_HOST_WAKE */
1714 #ifdef USE_SMMU_ARCH_MSM
1715     dhdpcie_smmu_info_t *dhdpcie_smmu_info = NULL;
1716 #endif /* USE_SMMU_ARCH_MSM */
1717     int ret = 0;
1718 
1719     do {
1720         /* osl attach */
1721         if (!(osh = osl_attach(pdev, PCI_BUS, FALSE))) {
1722             DHD_ERROR(("%s: osl_attach failed\n", __FUNCTION__));
1723             break;
1724         }
1725 
1726         /* initialize static buffer */
1727         adapter = dhd_wifi_platform_get_adapter(PCI_BUS, pdev->bus->number,
1728                                                 PCI_SLOT(pdev->devfn));
1729         if (adapter != NULL) {
1730             DHD_ERROR(
1731                 ("%s: found adapter info '%s'\n", __FUNCTION__, adapter->name));
1732             adapter->bus_type = PCI_BUS;
1733             adapter->bus_num = pdev->bus->number;
1734             adapter->slot_num = PCI_SLOT(pdev->devfn);
1735             adapter->pci_dev = pdev;
1736         } else {
1737             DHD_ERROR(
1738                 ("%s: can't find adapter info for this chip\n", __FUNCTION__));
1739         }
1740         osl_static_mem_init(osh, adapter);
1741 
1742         /* Set ACP coherence flag */
1743         if (OSL_ACP_WAR_ENAB() || OSL_ARCH_IS_COHERENT()) {
1744             osl_flag_set(osh, OSL_ACP_COHERENCE);
1745         }
1746 
1747         /*  allocate linux spcific pcie structure here */
1748         if (!(dhdpcie_info = MALLOC(osh, sizeof(dhdpcie_info_t)))) {
1749             DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
1750             break;
1751         }
1752         bzero(dhdpcie_info, sizeof(dhdpcie_info_t));
1753         dhdpcie_info->osh = osh;
1754         dhdpcie_info->dev = pdev;
1755 
1756 #ifdef BCMPCIE_OOB_HOST_WAKE
1757         /* allocate OS speicific structure */
1758         dhdpcie_osinfo = MALLOC(osh, sizeof(dhdpcie_os_info_t));
1759         if (dhdpcie_osinfo == NULL) {
1760             DHD_ERROR(
1761                 ("%s: MALLOC of dhdpcie_os_info_t failed\n", __FUNCTION__));
1762             break;
1763         }
1764         bzero(dhdpcie_osinfo, sizeof(dhdpcie_os_info_t));
1765         dhdpcie_info->os_cxt = (void *)dhdpcie_osinfo;
1766 
1767         /* Initialize host wake IRQ */
1768         spin_lock_init(&dhdpcie_osinfo->oob_irq_spinlock);
1769         /* Get customer specific host wake IRQ parametres: IRQ number as IRQ
1770          * type */
1771         dhdpcie_osinfo->oob_irq_num = wifi_platform_get_irq_number(
1772             adapter, &dhdpcie_osinfo->oob_irq_flags);
1773         if (dhdpcie_osinfo->oob_irq_num < 0) {
1774             DHD_ERROR(("%s: Host OOB irq is not defined\n", __FUNCTION__));
1775         }
1776 #endif /* BCMPCIE_OOB_HOST_WAKE */
1777 
1778 #ifdef USE_SMMU_ARCH_MSM
1779         /* allocate private structure for using SMMU */
1780         dhdpcie_smmu_info = MALLOC(osh, sizeof(dhdpcie_smmu_info_t));
1781         if (dhdpcie_smmu_info == NULL) {
1782             DHD_ERROR(
1783                 ("%s: MALLOC of dhdpcie_smmu_info_t failed\n", __FUNCTION__));
1784             break;
1785         }
1786         bzero(dhdpcie_smmu_info, sizeof(dhdpcie_smmu_info_t));
1787         dhdpcie_info->smmu_cxt = (void *)dhdpcie_smmu_info;
1788 
1789         /* Initialize smmu structure */
1790         if (dhdpcie_smmu_init(pdev, dhdpcie_info->smmu_cxt) < 0) {
1791             DHD_ERROR(("%s: Failed to initialize SMMU\n", __FUNCTION__));
1792             break;
1793         }
1794 #endif /* USE_SMMU_ARCH_MSM */
1795 
1796 #ifdef DHD_WAKE_STATUS
1797         /* Initialize pcie_lock */
1798         spin_lock_init(&dhdpcie_info->pcie_lock);
1799 #endif /* DHD_WAKE_STATUS */
1800 
1801         /* Find the PCI resources, verify the  */
1802         /* vendor and device ID, map BAR regions and irq,  update in structures
1803          */
1804         if (dhdpcie_scan_resource(dhdpcie_info)) {
1805             DHD_ERROR(("%s: dhd_Scan_PCI_Res failed\n", __FUNCTION__));
1806 
1807             break;
1808         }
1809 
1810         /* Bus initialization */
1811         ret = dhdpcie_bus_attach(osh, &bus, dhdpcie_info->regs,
1812                                  dhdpcie_info->tcm, pdev, adapter);
1813         if (ret != BCME_OK) {
1814             DHD_ERROR(("%s:dhdpcie_bus_attach() failed\n", __FUNCTION__));
1815             break;
1816         }
1817 
1818         dhdpcie_info->bus = bus;
1819         bus->is_linkdown = 0;
1820         bus->no_bus_init = FALSE;
1821         bus->cto_triggered = 0;
1822 
1823         bus->rc_dev = NULL;
1824 
1825         /* Get RC Device Handle */
1826         if (bus->dev->bus) {
1827             /* self member of structure pci_bus is bridge device as seen by
1828              * parent */
1829             bus->rc_dev = bus->dev->bus->self;
1830             if (bus->rc_dev) {
1831                 DHD_ERROR(("%s: rc_dev from dev->bus->self (%x:%x) is %pK\n",
1832                            __FUNCTION__, bus->rc_dev->vendor,
1833                            bus->rc_dev->device, bus->rc_dev));
1834             } else {
1835                 DHD_ERROR(("%s: bus->dev->bus->self is NULL\n", __FUNCTION__));
1836             }
1837         } else {
1838             DHD_ERROR(("%s: unable to get rc_dev as dev->bus is NULL\n",
1839                        __FUNCTION__));
1840         }
1841 
1842         /* if rc_dev is still NULL, try to get from vendor/device IDs */
1843         if (bus->rc_dev == NULL) {
1844             bus->rc_dev =
1845                 pci_get_device(PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, NULL);
1846             DHD_ERROR(("%s: rc_dev from pci_get_device (%x:%x) is %p\n",
1847                        __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID,
1848                        bus->rc_dev));
1849         }
1850 
1851         bus->rc_ep_aspm_cap = dhd_bus_is_rc_ep_aspm_capable(bus);
1852         bus->rc_ep_l1ss_cap = dhd_bus_is_rc_ep_l1ss_capable(bus);
1853         DHD_ERROR(("%s: rc_ep_aspm_cap: %d rc_ep_l1ss_cap: %d\n", __FUNCTION__,
1854                    bus->rc_ep_aspm_cap, bus->rc_ep_l1ss_cap));
1855 
1856 #ifdef FORCE_TPOWERON
1857         if (dhdpcie_chip_req_forced_tpoweron(bus)) {
1858             dhd_bus_set_tpoweron(bus, tpoweron_scale);
1859         }
1860 #endif /* FORCE_TPOWERON */
1861 
1862 #if defined(BCMPCIE_OOB_HOST_WAKE) && defined(CUSTOMER_HW2) &&                 \
1863     defined(CONFIG_ARCH_APQ8084)
1864         brcm_pcie_wake.wake_irq = wlan_oob_irq;
1865         brcm_pcie_wake.data = bus;
1866 #endif /* BCMPCIE_OOB_HOST_WAKE && CUSTOMR_HW2 && CONFIG_ARCH_APQ8084 */
1867 
1868 #ifdef DONGLE_ENABLE_ISOLATION
1869         bus->dhd->dongle_isolation = TRUE;
1870 #endif /* DONGLE_ENABLE_ISOLATION */
1871 
1872         if (bus->intr) {
1873             /* Register interrupt callback, but mask it (not operational yet).
1874              */
1875             DHD_INTR(
1876                 ("%s: Registering and masking interrupts\n", __FUNCTION__));
1877             dhdpcie_bus_intr_disable(bus);
1878 
1879             if (dhdpcie_request_irq(dhdpcie_info)) {
1880                 DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__));
1881                 break;
1882             }
1883         } else {
1884             bus->pollrate = 1;
1885             DHD_INFO(("%s: PCIe interrupt function is NOT registered "
1886                       "due to polling mode\n",
1887                       __FUNCTION__));
1888         }
1889 
1890 #if defined(BCM_REQUEST_FW)
1891         if (dhd_bus_download_firmware(bus, osh, NULL, NULL) < 0) {
1892             DHD_ERROR(("%s: failed to download firmware\n", __FUNCTION__));
1893         }
1894         bus->nv_path = NULL;
1895         bus->fw_path = NULL;
1896 #endif /* BCM_REQUEST_FW */
1897 
1898         /* set private data for pci_dev */
1899         pci_set_drvdata(pdev, dhdpcie_info);
1900 
1901 #if defined(BCMDHD_MODULAR) && defined(INSMOD_FW_LOAD)
1902         if (1)
1903 #else
1904         if (dhd_download_fw_on_driverload)
1905 #endif
1906         {
1907             if (dhd_bus_start(bus->dhd)) {
1908                 DHD_ERROR(("%s: dhd_bud_start() failed\n", __FUNCTION__));
1909                 if (!allow_delay_fwdl) {
1910                     break;
1911                 }
1912             }
1913         } else {
1914             /* Set ramdom MAC address during boot time */
1915             get_random_bytes(&bus->dhd->mac.octet[0x3], 0x3);
1916             /* Adding BRCM OUI */
1917             bus->dhd->mac.octet[0] = 0;
1918             bus->dhd->mac.octet[1] = 0x90;
1919             bus->dhd->mac.octet[0x2] = 0x4C;
1920         }
1921 
1922         /* Attach to the OS network interface */
1923         DHD_TRACE(("%s(): Calling dhd_register_if() \n", __FUNCTION__));
1924         if (dhd_attach_net(bus->dhd, TRUE)) {
1925             DHD_ERROR(
1926                 ("%s(): ERROR.. dhd_register_if() failed\n", __FUNCTION__));
1927             break;
1928         }
1929 
1930         dhdpcie_init_succeeded = TRUE;
1931 
1932 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1933         pm_runtime_set_autosuspend_delay(&pdev->dev, AUTO_SUSPEND_TIMEOUT);
1934         pm_runtime_use_autosuspend(&pdev->dev);
1935         atomic_set(&bus->dhd->block_bus, FALSE);
1936 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1937 
1938 #if defined(MULTIPLE_SUPPLICANT)
1939         wl_android_post_init(); // terence 20120530: fix critical section in
1940                                 // dhd_open and dhdsdio_probe
1941 #endif                          /* MULTIPLE_SUPPLICANT */
1942 
1943         DHD_TRACE(("%s:Exit - SUCCESS \n", __FUNCTION__));
1944         return 0; /* return  SUCCESS  */
1945     } while (0);
1946     /* reverse the initialization in order in case of error */
1947     if (bus) {
1948         dhdpcie_bus_release(bus);
1949     }
1950 
1951 #ifdef BCMPCIE_OOB_HOST_WAKE
1952     if (dhdpcie_osinfo) {
1953         MFREE(osh, dhdpcie_osinfo, sizeof(dhdpcie_os_info_t));
1954     }
1955 #endif /* BCMPCIE_OOB_HOST_WAKE */
1956 
1957 #ifdef USE_SMMU_ARCH_MSM
1958     if (dhdpcie_smmu_info) {
1959         MFREE(osh, dhdpcie_smmu_info, sizeof(dhdpcie_smmu_info_t));
1960         dhdpcie_info->smmu_cxt = NULL;
1961     }
1962 #endif /* USE_SMMU_ARCH_MSM */
1963 
1964     if (dhdpcie_info) {
1965         dhdpcie_detach(dhdpcie_info);
1966     }
1967     pci_disable_device(pdev);
1968     if (osh) {
1969         osl_detach(osh);
1970     }
1971     if (adapter != NULL) {
1972         adapter->bus_type = -1;
1973         adapter->bus_num = -1;
1974         adapter->slot_num = -1;
1975     }
1976 
1977     dhdpcie_init_succeeded = FALSE;
1978 
1979     DHD_TRACE(("%s:Exit - FAILURE \n", __FUNCTION__));
1980 
1981     return -1; /* return FAILURE  */
1982 }
1983 
1984 /* Free Linux irq */
dhdpcie_free_irq(dhd_bus_t * bus)1985 void dhdpcie_free_irq(dhd_bus_t *bus)
1986 {
1987     struct pci_dev *pdev = NULL;
1988 
1989     DHD_TRACE(("%s: freeing up the IRQ\n", __FUNCTION__));
1990     if (bus) {
1991         pdev = bus->dev;
1992         if (bus->irq_registered) {
1993 #if defined(SET_PCIE_IRQ_CPU_CORE) && defined(CONFIG_ARCH_SM8150)
1994             /* clean up the affinity_hint before
1995              * the unregistration of PCIe irq
1996              */
1997             (void)irq_set_affinity_hint(pdev->irq, NULL);
1998 #endif /* SET_PCIE_IRQ_CPU_CORE && CONFIG_ARCH_SM8150 */
1999             free_irq(pdev->irq, bus);
2000             bus->irq_registered = FALSE;
2001             if (bus->d2h_intr_method == PCIE_MSI) {
2002                 dhdpcie_disable_msi(pdev);
2003             }
2004         } else {
2005             DHD_ERROR(("%s: PCIe IRQ is not registered\n", __FUNCTION__));
2006         }
2007     }
2008     DHD_TRACE(("%s: Exit\n", __FUNCTION__));
2009     return;
2010 }
2011 
2012 /*
2013 
2014 Name:  dhdpcie_isr
2015 
2016 Parametrs
2017 
2018 1: IN int irq   -- interrupt vector
2019 2: IN void *arg      -- handle to private data structure
2020 
2021 Return value
2022 
2023 Status (TRUE or FALSE)
2024 
2025 Description:
2026 Interrupt Service routine checks for the status register,
2027 disable interrupt and queue DPC if mail box interrupts are raised.
2028 */
2029 
dhdpcie_isr(int irq,void * arg)2030 irqreturn_t dhdpcie_isr(int irq, void *arg)
2031 {
2032     dhd_bus_t *bus = (dhd_bus_t *)arg;
2033     bus->isr_entry_time = OSL_LOCALTIME_NS();
2034     if (!dhdpcie_bus_isr(bus)) {
2035         DHD_LOG_MEM(("%s: dhdpcie_bus_isr returns with FALSE\n", __FUNCTION__));
2036     }
2037     bus->isr_exit_time = OSL_LOCALTIME_NS();
2038     return IRQ_HANDLED;
2039 }
2040 
dhdpcie_disable_irq_nosync(dhd_bus_t * bus)2041 int dhdpcie_disable_irq_nosync(dhd_bus_t *bus)
2042 {
2043     struct pci_dev *dev;
2044     if ((bus == NULL) || (bus->dev == NULL)) {
2045         DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__));
2046         return BCME_ERROR;
2047     }
2048 
2049     dev = bus->dev;
2050     disable_irq_nosync(dev->irq);
2051     return BCME_OK;
2052 }
2053 
dhdpcie_disable_irq(dhd_bus_t * bus)2054 int dhdpcie_disable_irq(dhd_bus_t *bus)
2055 {
2056     struct pci_dev *dev;
2057     if ((bus == NULL) || (bus->dev == NULL)) {
2058         DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__));
2059         return BCME_ERROR;
2060     }
2061 
2062     dev = bus->dev;
2063     disable_irq(dev->irq);
2064     return BCME_OK;
2065 }
2066 
dhdpcie_enable_irq(dhd_bus_t * bus)2067 int dhdpcie_enable_irq(dhd_bus_t *bus)
2068 {
2069     struct pci_dev *dev;
2070     if ((bus == NULL) || (bus->dev == NULL)) {
2071         DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__));
2072         return BCME_ERROR;
2073     }
2074 
2075     dev = bus->dev;
2076     enable_irq(dev->irq);
2077     return BCME_OK;
2078 }
2079 
dhdpcie_irq_disabled(dhd_bus_t * bus)2080 int dhdpcie_irq_disabled(dhd_bus_t *bus)
2081 {
2082 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
2083     struct irq_desc *desc = irq_to_desc(bus->dev->irq);
2084     /* depth will be zero, if enabled */
2085     return desc->depth;
2086 #else
2087     /* return ERROR by default as there is no support for lower versions */
2088     return BCME_ERROR;
2089 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
2090 }
2091 
dhdpcie_start_host_pcieclock(dhd_bus_t * bus)2092 int dhdpcie_start_host_pcieclock(dhd_bus_t *bus)
2093 {
2094     int ret = 0;
2095 #ifdef CONFIG_ARCH_MSM
2096 #endif /* CONFIG_ARCH_MSM */
2097     DHD_TRACE(("%s Enter:\n", __FUNCTION__));
2098 
2099     if (bus == NULL) {
2100         return BCME_ERROR;
2101     }
2102 
2103     if (bus->dev == NULL) {
2104         return BCME_ERROR;
2105     }
2106 
2107 #ifdef CONFIG_ARCH_MSM
2108     ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number, bus->dev,
2109                               NULL, 0);
2110     if (ret) {
2111         DHD_ERROR(("%s Failed to bring up PCIe link\n", __FUNCTION__));
2112         goto done;
2113     }
2114 
2115 done:
2116 #endif /* CONFIG_ARCH_MSM */
2117     DHD_TRACE(("%s Exit:\n", __FUNCTION__));
2118     return ret;
2119 }
2120 
dhdpcie_stop_host_pcieclock(dhd_bus_t * bus)2121 int dhdpcie_stop_host_pcieclock(dhd_bus_t *bus)
2122 {
2123     int ret = 0;
2124 #ifdef CONFIG_ARCH_MSM
2125 #endif /* CONFIG_ARCH_MSM */
2126 
2127     DHD_TRACE(("%s Enter:\n", __FUNCTION__));
2128 
2129     if (bus == NULL) {
2130         return BCME_ERROR;
2131     }
2132 
2133     if (bus->dev == NULL) {
2134         return BCME_ERROR;
2135     }
2136 
2137 #ifdef CONFIG_ARCH_MSM
2138     ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number, bus->dev,
2139                               NULL, 0);
2140     if (ret) {
2141         DHD_ERROR(("Failed to stop PCIe link\n"));
2142         goto done;
2143     }
2144 done:
2145 #endif /* CONFIG_ARCH_MSM */
2146     DHD_TRACE(("%s Exit:\n", __FUNCTION__));
2147     return ret;
2148 }
2149 
dhdpcie_disable_device(dhd_bus_t * bus)2150 int dhdpcie_disable_device(dhd_bus_t *bus)
2151 {
2152     DHD_TRACE(("%s Enter:\n", __FUNCTION__));
2153 
2154     if (bus == NULL) {
2155         return BCME_ERROR;
2156     }
2157 
2158     if (bus->dev == NULL) {
2159         return BCME_ERROR;
2160     }
2161 
2162     if (pci_is_enabled(bus->dev)) {
2163         pci_disable_device(bus->dev);
2164     }
2165 
2166     return 0;
2167 }
2168 
dhdpcie_enable_device(dhd_bus_t * bus)2169 int dhdpcie_enable_device(dhd_bus_t *bus)
2170 {
2171     int ret = BCME_ERROR;
2172 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
2173     dhdpcie_info_t *pch;
2174 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
2175 
2176     DHD_TRACE(("%s Enter:\n", __FUNCTION__));
2177 
2178     if (bus == NULL) {
2179         return BCME_ERROR;
2180     }
2181 
2182     if (bus->dev == NULL) {
2183         return BCME_ERROR;
2184     }
2185 
2186 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
2187     pch = pci_get_drvdata(bus->dev);
2188     if (pch == NULL) {
2189         return BCME_ERROR;
2190     }
2191 
2192 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) &&                        \
2193     (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)) &&                         \
2194     !defined(CONFIG_SOC_EXYNOS8890)
2195     /* Updated with pci_load_and_free_saved_state to compatible
2196      * with Kernel version 3.14.0 to 3.18.41.
2197      */
2198     pci_load_and_free_saved_state(bus->dev, &pch->default_state);
2199     pch->default_state = pci_store_saved_state(bus->dev);
2200 #else
2201     pci_load_saved_state(bus->dev, pch->default_state);
2202 #endif /* LINUX_VERSION >= 3.14.0 && LINUX_VERSION < 3.19.0 &&                 \
2203           !CONFIG_SOC_EXYNOS8890 */
2204 
2205     /* Check if Device ID is valid */
2206     if (bus->dev->state_saved) {
2207         uint32 vid, saved_vid;
2208         pci_read_config_dword(bus->dev, PCI_CFG_VID, &vid);
2209         saved_vid = bus->dev->saved_config_space[PCI_CFG_VID];
2210         if (vid != saved_vid) {
2211             DHD_ERROR(("%s: VID(0x%x) is different from saved VID(0x%x) "
2212                        "Skip the bus init\n",
2213                        __FUNCTION__, vid, saved_vid));
2214             bus->no_bus_init = TRUE;
2215             /* Check if the PCIe link is down */
2216             if (vid == (uint32)-1) {
2217                 bus->is_linkdown = 1;
2218             }
2219             return BCME_ERROR;
2220         }
2221     }
2222 
2223     pci_restore_state(bus->dev);
2224 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) */
2225 
2226     ret = pci_enable_device(bus->dev);
2227     if (ret) {
2228         pci_disable_device(bus->dev);
2229     } else {
2230         pci_set_master(bus->dev);
2231     }
2232 
2233     return ret;
2234 }
2235 
dhdpcie_alloc_resource(dhd_bus_t * bus)2236 int dhdpcie_alloc_resource(dhd_bus_t *bus)
2237 {
2238     dhdpcie_info_t *dhdpcie_info;
2239     phys_addr_t bar0_addr, bar1_addr;
2240     ulong bar1_size;
2241 
2242     do {
2243         if (bus == NULL) {
2244             DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2245             break;
2246         }
2247 
2248         if (bus->dev == NULL) {
2249             DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2250             break;
2251         }
2252 
2253         dhdpcie_info = pci_get_drvdata(bus->dev);
2254         if (dhdpcie_info == NULL) {
2255             DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__));
2256             break;
2257         }
2258 
2259         bar0_addr = pci_resource_start(bus->dev, 0); /* Bar-0 mapped address */
2260         bar1_addr = pci_resource_start(bus->dev, 0x2); /* Bar-1 mapped address */
2261 
2262         /* read Bar-1 mapped memory range */
2263         bar1_size = pci_resource_len(bus->dev, 0x2);
2264         if ((bar1_size == 0) || (bar1_addr == 0)) {
2265             printf("%s: BAR1 Not enabled for this device size(%ld),"
2266                    " addr(0x" PRINTF_RESOURCE ")\n",
2267                    __FUNCTION__, bar1_size, bar1_addr);
2268             break;
2269         }
2270         dhdpcie_info->regs =
2271             (volatile char *)REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE);
2272         if (!dhdpcie_info->regs) {
2273             DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__));
2274             break;
2275         }
2276 
2277         bus->regs = dhdpcie_info->regs;
2278         dhdpcie_info->bar1_size =
2279             (bar1_size > DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE;
2280         dhdpcie_info->tcm =
2281             (volatile char *)REG_MAP(bar1_addr, dhdpcie_info->bar1_size);
2282         if (!dhdpcie_info->tcm) {
2283             DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__));
2284             REG_UNMAP(dhdpcie_info->regs);
2285             bus->regs = NULL;
2286             break;
2287         }
2288 
2289         bus->tcm = dhdpcie_info->tcm;
2290 
2291         DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x" PRINTF_RESOURCE
2292                    " \n",
2293                    __FUNCTION__, dhdpcie_info->regs, bar0_addr));
2294         DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x" PRINTF_RESOURCE
2295                    " \n",
2296                    __FUNCTION__, dhdpcie_info->tcm, bar1_addr));
2297 
2298         return 0;
2299     } while (0);
2300 
2301     return BCME_ERROR;
2302 }
2303 
dhdpcie_free_resource(dhd_bus_t * bus)2304 void dhdpcie_free_resource(dhd_bus_t *bus)
2305 {
2306     dhdpcie_info_t *dhdpcie_info;
2307 
2308     if (bus == NULL) {
2309         DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2310         return;
2311     }
2312 
2313     if (bus->dev == NULL) {
2314         DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2315         return;
2316     }
2317 
2318     dhdpcie_info = pci_get_drvdata(bus->dev);
2319     if (dhdpcie_info == NULL) {
2320         DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__));
2321         return;
2322     }
2323 
2324     if (bus->regs) {
2325         REG_UNMAP(dhdpcie_info->regs);
2326         bus->regs = NULL;
2327     }
2328 
2329     if (bus->tcm) {
2330         REG_UNMAP(dhdpcie_info->tcm);
2331         bus->tcm = NULL;
2332     }
2333 }
2334 
dhdpcie_bus_request_irq(struct dhd_bus * bus)2335 int dhdpcie_bus_request_irq(struct dhd_bus *bus)
2336 {
2337     dhdpcie_info_t *dhdpcie_info;
2338     int ret = 0;
2339 
2340     if (bus == NULL) {
2341         DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2342         return BCME_ERROR;
2343     }
2344 
2345     if (bus->dev == NULL) {
2346         DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2347         return BCME_ERROR;
2348     }
2349 
2350     dhdpcie_info = pci_get_drvdata(bus->dev);
2351     if (dhdpcie_info == NULL) {
2352         DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__));
2353         return BCME_ERROR;
2354     }
2355 
2356     if (bus->intr) {
2357         /* Register interrupt callback, but mask it (not operational yet). */
2358         DHD_INTR(("%s: Registering and masking interrupts\n", __FUNCTION__));
2359         dhdpcie_bus_intr_disable(bus);
2360         ret = dhdpcie_request_irq(dhdpcie_info);
2361         if (ret) {
2362             DHD_ERROR(
2363                 ("%s: request_irq() failed, ret=%d\n", __FUNCTION__, ret));
2364             return ret;
2365         }
2366     }
2367 
2368     return ret;
2369 }
2370 
2371 #ifdef BCMPCIE_OOB_HOST_WAKE
2372 #ifdef CONFIG_BCMDHD_GET_OOB_STATE
2373 extern int dhd_get_wlan_oob_gpio(void);
2374 #endif /* CONFIG_BCMDHD_GET_OOB_STATE */
2375 
dhdpcie_get_oob_irq_level(void)2376 int dhdpcie_get_oob_irq_level(void)
2377 {
2378     int gpio_level;
2379 
2380 #ifdef CONFIG_BCMDHD_GET_OOB_STATE
2381     gpio_level = dhd_get_wlan_oob_gpio();
2382 #else
2383     gpio_level = BCME_UNSUPPORTED;
2384 #endif /* CONFIG_BCMDHD_GET_OOB_STATE */
2385     return gpio_level;
2386 }
2387 
dhdpcie_get_oob_irq_status(struct dhd_bus * bus)2388 int dhdpcie_get_oob_irq_status(struct dhd_bus *bus)
2389 {
2390     dhdpcie_info_t *pch;
2391     dhdpcie_os_info_t *dhdpcie_osinfo;
2392 
2393     if (bus == NULL) {
2394         DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2395         return 0;
2396     }
2397 
2398     if (bus->dev == NULL) {
2399         DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2400         return 0;
2401     }
2402 
2403     pch = pci_get_drvdata(bus->dev);
2404     if (pch == NULL) {
2405         DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
2406         return 0;
2407     }
2408 
2409     dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
2410 
2411     return dhdpcie_osinfo ? dhdpcie_osinfo->oob_irq_enabled : 0;
2412 }
2413 
dhdpcie_get_oob_irq_num(struct dhd_bus * bus)2414 int dhdpcie_get_oob_irq_num(struct dhd_bus *bus)
2415 {
2416     dhdpcie_info_t *pch;
2417     dhdpcie_os_info_t *dhdpcie_osinfo;
2418 
2419     if (bus == NULL) {
2420         DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2421         return 0;
2422     }
2423 
2424     if (bus->dev == NULL) {
2425         DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2426         return 0;
2427     }
2428 
2429     pch = pci_get_drvdata(bus->dev);
2430     if (pch == NULL) {
2431         DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
2432         return 0;
2433     }
2434 
2435     dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
2436 
2437     return dhdpcie_osinfo ? dhdpcie_osinfo->oob_irq_num : 0;
2438 }
2439 
dhdpcie_oob_intr_set(dhd_bus_t * bus,bool enable)2440 void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable)
2441 {
2442     unsigned long flags;
2443     dhdpcie_info_t *pch;
2444     dhdpcie_os_info_t *dhdpcie_osinfo;
2445 
2446     if (bus == NULL) {
2447         DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2448         return;
2449     }
2450 
2451     if (bus->dev == NULL) {
2452         DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2453         return;
2454     }
2455 
2456     pch = pci_get_drvdata(bus->dev);
2457     if (pch == NULL) {
2458         DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
2459         return;
2460     }
2461 
2462     dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
2463     spin_lock_irqsave(&dhdpcie_osinfo->oob_irq_spinlock, flags);
2464     if ((dhdpcie_osinfo->oob_irq_enabled != enable) &&
2465         (dhdpcie_osinfo->oob_irq_num > 0)) {
2466         if (enable) {
2467             enable_irq(dhdpcie_osinfo->oob_irq_num);
2468             bus->oob_intr_enable_count++;
2469             bus->last_oob_irq_enable_time = OSL_LOCALTIME_NS();
2470         } else {
2471             disable_irq_nosync(dhdpcie_osinfo->oob_irq_num);
2472             bus->oob_intr_disable_count++;
2473             bus->last_oob_irq_disable_time = OSL_LOCALTIME_NS();
2474         }
2475         dhdpcie_osinfo->oob_irq_enabled = enable;
2476     }
2477     spin_unlock_irqrestore(&dhdpcie_osinfo->oob_irq_spinlock, flags);
2478 }
2479 
wlan_oob_irq(int irq,void * data)2480 static irqreturn_t wlan_oob_irq(int irq, void *data)
2481 {
2482     dhd_bus_t *bus;
2483     unsigned long flags_bus;
2484     DHD_TRACE(("%s: IRQ Triggered\n", __FUNCTION__));
2485     bus = (dhd_bus_t *)data;
2486     dhdpcie_oob_intr_set(bus, FALSE);
2487     bus->last_oob_irq_time = OSL_LOCALTIME_NS();
2488     bus->oob_intr_count++;
2489 #ifdef DHD_WAKE_STATUS
2490     {
2491         bcmpcie_set_get_wake(bus, 1);
2492     }
2493 #endif /* DHD_WAKE_STATUS */
2494 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
2495     dhd_bus_wakeup_work(bus->dhd);
2496 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
2497     DHD_BUS_LOCK(bus->bus_lock, flags_bus);
2498     /* Hold wakelock if bus_low_power_state is
2499      * DHD_BUS_D3_INFORM_SENT OR DHD_BUS_D3_ACK_RECIEVED
2500      */
2501     if (bus->dhd->up &&
2502         bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
2503         DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(bus->dhd, OOB_WAKE_LOCK_TIMEOUT);
2504     }
2505     DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
2506     return IRQ_HANDLED;
2507 }
2508 
dhdpcie_oob_intr_register(dhd_bus_t * bus)2509 int dhdpcie_oob_intr_register(dhd_bus_t *bus)
2510 {
2511     int err = 0;
2512     dhdpcie_info_t *pch;
2513     dhdpcie_os_info_t *dhdpcie_osinfo;
2514 
2515     DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2516     if (bus == NULL) {
2517         DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2518         return -EINVAL;
2519     }
2520 
2521     if (bus->dev == NULL) {
2522         DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2523         return -EINVAL;
2524     }
2525 
2526     pch = pci_get_drvdata(bus->dev);
2527     if (pch == NULL) {
2528         DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
2529         return -EINVAL;
2530     }
2531 
2532     dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
2533     if (dhdpcie_osinfo->oob_irq_registered) {
2534         DHD_ERROR(("%s: irq is already registered\n", __FUNCTION__));
2535         return -EBUSY;
2536     }
2537 
2538     if (dhdpcie_osinfo->oob_irq_num > 0) {
2539         printf("%s OOB irq=%d flags=0x%X\n", __FUNCTION__,
2540                (int)dhdpcie_osinfo->oob_irq_num,
2541                (int)dhdpcie_osinfo->oob_irq_flags);
2542         err = request_irq(dhdpcie_osinfo->oob_irq_num, wlan_oob_irq,
2543                           dhdpcie_osinfo->oob_irq_flags, "dhdpcie_host_wake",
2544                           bus);
2545         if (err) {
2546             DHD_ERROR(("%s: request_irq failed with %d\n", __FUNCTION__, err));
2547             return err;
2548         }
2549 #if defined(DISABLE_WOWLAN)
2550         printf("%s: disable_irq_wake\n", __FUNCTION__);
2551         dhdpcie_osinfo->oob_irq_wake_enabled = FALSE;
2552 #else
2553         printf("%s: enable_irq_wake\n", __FUNCTION__);
2554         err = enable_irq_wake(dhdpcie_osinfo->oob_irq_num);
2555         if (!err) {
2556             dhdpcie_osinfo->oob_irq_wake_enabled = TRUE;
2557         } else {
2558             printf("%s: enable_irq_wake failed with %d\n", __FUNCTION__, err);
2559         }
2560 #endif
2561         dhdpcie_osinfo->oob_irq_enabled = TRUE;
2562     }
2563 
2564     dhdpcie_osinfo->oob_irq_registered = TRUE;
2565 
2566     return 0;
2567 }
2568 
dhdpcie_oob_intr_unregister(dhd_bus_t * bus)2569 void dhdpcie_oob_intr_unregister(dhd_bus_t *bus)
2570 {
2571     int err = 0;
2572     dhdpcie_info_t *pch;
2573     dhdpcie_os_info_t *dhdpcie_osinfo;
2574 
2575     DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2576     if (bus == NULL) {
2577         DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2578         return;
2579     }
2580 
2581     if (bus->dev == NULL) {
2582         DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2583         return;
2584     }
2585 
2586     pch = pci_get_drvdata(bus->dev);
2587     if (pch == NULL) {
2588         DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
2589         return;
2590     }
2591 
2592     dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
2593     if (!dhdpcie_osinfo->oob_irq_registered) {
2594         DHD_ERROR(("%s: irq is not registered\n", __FUNCTION__));
2595         return;
2596     }
2597     if (dhdpcie_osinfo->oob_irq_num > 0) {
2598         if (dhdpcie_osinfo->oob_irq_wake_enabled) {
2599             err = disable_irq_wake(dhdpcie_osinfo->oob_irq_num);
2600             if (!err) {
2601                 dhdpcie_osinfo->oob_irq_wake_enabled = FALSE;
2602             }
2603         }
2604         if (dhdpcie_osinfo->oob_irq_enabled) {
2605             disable_irq(dhdpcie_osinfo->oob_irq_num);
2606             dhdpcie_osinfo->oob_irq_enabled = FALSE;
2607         }
2608         free_irq(dhdpcie_osinfo->oob_irq_num, bus);
2609     }
2610     dhdpcie_osinfo->oob_irq_registered = FALSE;
2611 }
2612 #endif /* BCMPCIE_OOB_HOST_WAKE */
2613 
dhd_bus_to_dev(dhd_bus_t * bus)2614 struct device *dhd_bus_to_dev(dhd_bus_t *bus)
2615 {
2616     struct pci_dev *pdev;
2617     pdev = bus->dev;
2618 
2619     if (pdev) {
2620         return &pdev->dev;
2621     } else {
2622         return NULL;
2623     }
2624 }
2625 
2626 #define KIRQ_PRINT_BUF_LEN 256
2627 
dhd_print_kirqstats(dhd_pub_t * dhd,unsigned int irq_num)2628 void dhd_print_kirqstats(dhd_pub_t *dhd, unsigned int irq_num)
2629 {
2630     unsigned long flags = 0;
2631     struct irq_desc *desc;
2632     int i; /* cpu iterator */
2633     struct bcmstrbuf strbuf;
2634     char tmp_buf[KIRQ_PRINT_BUF_LEN];
2635 
2636     desc = irq_to_desc(irq_num);
2637     if (!desc) {
2638         DHD_ERROR(("%s : irqdesc is not found \n", __FUNCTION__));
2639         return;
2640     }
2641     bcm_binit(&strbuf, tmp_buf, KIRQ_PRINT_BUF_LEN);
2642     raw_spin_lock_irqsave(&desc->lock, flags);
2643     bcm_bprintf(&strbuf, "dhd irq %u:", irq_num);
2644     for_each_online_cpu(i)
2645         bcm_bprintf(&strbuf, "%10u ",
2646                     desc->kstat_irqs ? *per_cpu_ptr(desc->kstat_irqs, i) : 0);
2647     if (desc->irq_data.chip) {
2648         if (desc->irq_data.chip->name) {
2649             bcm_bprintf(&strbuf, " %8s", desc->irq_data.chip->name);
2650         } else {
2651             bcm_bprintf(&strbuf, " %8s", "-");
2652         }
2653     } else {
2654         bcm_bprintf(&strbuf, " %8s", "None");
2655     }
2656 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
2657     if (desc->irq_data.domain) {
2658         bcm_bprintf(&strbuf, " %d", (int)desc->irq_data.hwirq);
2659     }
2660 #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
2661     bcm_bprintf(&strbuf, " %-8s",
2662                 irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
2663 #endif // endif
2664 #endif /* LINUX VERSION > 3.1.0 */
2665 
2666     if (desc->name) {
2667         bcm_bprintf(&strbuf, "-%-8s", desc->name);
2668     }
2669 
2670     DHD_ERROR(("%s\n", strbuf.origbuf));
2671     raw_spin_unlock_irqrestore(&desc->lock, flags);
2672 }
2673 
dhd_show_kirqstats(dhd_pub_t * dhd)2674 void dhd_show_kirqstats(dhd_pub_t *dhd)
2675 {
2676     unsigned int irq = -1;
2677 #ifdef BCMPCIE
2678     dhdpcie_get_pcieirq(dhd->bus, &irq);
2679 #endif /* BCMPCIE */
2680 #ifdef BCMSDIO
2681     irq = ((wifi_adapter_info_t *)dhd->info->adapter)->irq_num;
2682 #endif /* BCMSDIO */
2683     if (irq != -1) {
2684 #ifdef BCMPCIE
2685         DHD_ERROR(("DUMP data kernel irq stats : \n"));
2686 #endif /* BCMPCIE */
2687 #ifdef BCMSDIO
2688         DHD_ERROR(("DUMP data/host wakeup kernel irq stats : \n"));
2689 #endif /* BCMSDIO */
2690         dhd_print_kirqstats(dhd, irq);
2691     }
2692 #ifdef BCMPCIE_OOB_HOST_WAKE
2693     irq = dhdpcie_get_oob_irq_num(dhd->bus);
2694     if (irq) {
2695         DHD_ERROR(("DUMP PCIE host wakeup kernel irq stats : \n"));
2696         dhd_print_kirqstats(dhd, irq);
2697     }
2698 #endif /* BCMPCIE_OOB_HOST_WAKE */
2699 }
2700 
2701 #ifdef DHD_FW_COREDUMP
dhd_dongle_mem_dump(void)2702 int dhd_dongle_mem_dump(void)
2703 {
2704     if (!g_dhd_bus) {
2705         DHD_ERROR(("%s: Bus is NULL\n", __FUNCTION__));
2706         return -ENODEV;
2707     }
2708 
2709     dhd_bus_dump_console_buffer(g_dhd_bus);
2710     dhd_prot_debug_info_print(g_dhd_bus->dhd);
2711 
2712     g_dhd_bus->dhd->memdump_enabled = DUMP_MEMFILE_BUGON;
2713     g_dhd_bus->dhd->memdump_type = DUMP_TYPE_AP_ABNORMAL_ACCESS;
2714 
2715     dhd_bus_mem_dump(g_dhd_bus->dhd);
2716     return 0;
2717 }
2718 EXPORT_SYMBOL(dhd_dongle_mem_dump);
2719 #endif /* DHD_FW_COREDUMP */
2720 
dhd_bus_check_driver_up(void)2721 bool dhd_bus_check_driver_up(void)
2722 {
2723     dhd_bus_t *bus;
2724     dhd_pub_t *dhdp;
2725     bool isup = FALSE;
2726 
2727     bus = (dhd_bus_t *)g_dhd_bus;
2728     if (!bus) {
2729         DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2730         return isup;
2731     }
2732 
2733     dhdp = bus->dhd;
2734     if (dhdp) {
2735         isup = dhdp->up;
2736     }
2737 
2738     return isup;
2739 }
2740 EXPORT_SYMBOL(dhd_bus_check_driver_up);
2741