1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (c) 2014 Broadcom Corporation
4 */
5
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/firmware.h>
9 #include <linux/pci.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12 #include <linux/interrupt.h>
13 #include <linux/bcma/bcma.h>
14 #include <linux/sched.h>
15 #include <linux/io.h>
16 #include <asm/unaligned.h>
17
18 #include <soc.h>
19 #include <chipcommon.h>
20 #include <brcmu_utils.h>
21 #include <brcmu_wifi.h>
22 #include <brcm_hw_ids.h>
23
24 /* Custom brcmf_err() that takes bus arg and passes it further */
25 #define brcmf_err(bus, fmt, ...) \
26 do { \
27 if (IS_ENABLED(CONFIG_BRCMDBG) || \
28 IS_ENABLED(CONFIG_BRCM_TRACING) || \
29 net_ratelimit()) \
30 __brcmf_err(bus, __func__, fmt, ##__VA_ARGS__); \
31 } while (0)
32
33 #include "debug.h"
34 #include "bus.h"
35 #include "commonring.h"
36 #include "msgbuf.h"
37 #include "pcie.h"
38 #include "firmware.h"
39 #include "chip.h"
40 #include "core.h"
41 #include "common.h"
42
43
44 enum brcmf_pcie_state {
45 BRCMFMAC_PCIE_STATE_DOWN,
46 BRCMFMAC_PCIE_STATE_UP
47 };
48
49 BRCMF_FW_DEF(43602, "brcmfmac43602-pcie");
50 BRCMF_FW_DEF(4350, "brcmfmac4350-pcie");
51 BRCMF_FW_DEF(4350C, "brcmfmac4350c2-pcie");
52 BRCMF_FW_DEF(4356, "brcmfmac4356-pcie");
53 BRCMF_FW_DEF(43570, "brcmfmac43570-pcie");
54 BRCMF_FW_DEF(4358, "brcmfmac4358-pcie");
55 BRCMF_FW_DEF(4359, "brcmfmac4359-pcie");
56 BRCMF_FW_DEF(4364, "brcmfmac4364-pcie");
57 BRCMF_FW_DEF(4365B, "brcmfmac4365b-pcie");
58 BRCMF_FW_DEF(4365C, "brcmfmac4365c-pcie");
59 BRCMF_FW_DEF(4366B, "brcmfmac4366b-pcie");
60 BRCMF_FW_DEF(4366C, "brcmfmac4366c-pcie");
61 BRCMF_FW_DEF(4371, "brcmfmac4371-pcie");
62
63 static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
64 BRCMF_FW_ENTRY(BRCM_CC_43602_CHIP_ID, 0xFFFFFFFF, 43602),
65 BRCMF_FW_ENTRY(BRCM_CC_43465_CHIP_ID, 0xFFFFFFF0, 4366C),
66 BRCMF_FW_ENTRY(BRCM_CC_4350_CHIP_ID, 0x000000FF, 4350C),
67 BRCMF_FW_ENTRY(BRCM_CC_4350_CHIP_ID, 0xFFFFFF00, 4350),
68 BRCMF_FW_ENTRY(BRCM_CC_43525_CHIP_ID, 0xFFFFFFF0, 4365C),
69 BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356),
70 BRCMF_FW_ENTRY(BRCM_CC_43567_CHIP_ID, 0xFFFFFFFF, 43570),
71 BRCMF_FW_ENTRY(BRCM_CC_43569_CHIP_ID, 0xFFFFFFFF, 43570),
72 BRCMF_FW_ENTRY(BRCM_CC_43570_CHIP_ID, 0xFFFFFFFF, 43570),
73 BRCMF_FW_ENTRY(BRCM_CC_4358_CHIP_ID, 0xFFFFFFFF, 4358),
74 BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
75 BRCMF_FW_ENTRY(BRCM_CC_4364_CHIP_ID, 0xFFFFFFFF, 4364),
76 BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0x0000000F, 4365B),
77 BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFF0, 4365C),
78 BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0x0000000F, 4366B),
79 BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0xFFFFFFF0, 4366C),
80 BRCMF_FW_ENTRY(BRCM_CC_43664_CHIP_ID, 0xFFFFFFF0, 4366C),
81 BRCMF_FW_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371),
82 };
83
84 #define BRCMF_PCIE_FW_UP_TIMEOUT 5000 /* msec */
85
86 #define BRCMF_PCIE_REG_MAP_SIZE (32 * 1024)
87
88 /* backplane addres space accessed by BAR0 */
89 #define BRCMF_PCIE_BAR0_WINDOW 0x80
90 #define BRCMF_PCIE_BAR0_REG_SIZE 0x1000
91 #define BRCMF_PCIE_BAR0_WRAPPERBASE 0x70
92
93 #define BRCMF_PCIE_BAR0_WRAPBASE_DMP_OFFSET 0x1000
94 #define BRCMF_PCIE_BARO_PCIE_ENUM_OFFSET 0x2000
95
96 #define BRCMF_PCIE_ARMCR4REG_BANKIDX 0x40
97 #define BRCMF_PCIE_ARMCR4REG_BANKPDA 0x4C
98
99 #define BRCMF_PCIE_REG_INTSTATUS 0x90
100 #define BRCMF_PCIE_REG_INTMASK 0x94
101 #define BRCMF_PCIE_REG_SBMBX 0x98
102
103 #define BRCMF_PCIE_REG_LINK_STATUS_CTRL 0xBC
104
105 #define BRCMF_PCIE_PCIE2REG_INTMASK 0x24
106 #define BRCMF_PCIE_PCIE2REG_MAILBOXINT 0x48
107 #define BRCMF_PCIE_PCIE2REG_MAILBOXMASK 0x4C
108 #define BRCMF_PCIE_PCIE2REG_CONFIGADDR 0x120
109 #define BRCMF_PCIE_PCIE2REG_CONFIGDATA 0x124
110 #define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0 0x140
111 #define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1 0x144
112
113 #define BRCMF_PCIE2_INTA 0x01
114 #define BRCMF_PCIE2_INTB 0x02
115
116 #define BRCMF_PCIE_INT_0 0x01
117 #define BRCMF_PCIE_INT_1 0x02
118 #define BRCMF_PCIE_INT_DEF (BRCMF_PCIE_INT_0 | \
119 BRCMF_PCIE_INT_1)
120
121 #define BRCMF_PCIE_MB_INT_FN0_0 0x0100
122 #define BRCMF_PCIE_MB_INT_FN0_1 0x0200
123 #define BRCMF_PCIE_MB_INT_D2H0_DB0 0x10000
124 #define BRCMF_PCIE_MB_INT_D2H0_DB1 0x20000
125 #define BRCMF_PCIE_MB_INT_D2H1_DB0 0x40000
126 #define BRCMF_PCIE_MB_INT_D2H1_DB1 0x80000
127 #define BRCMF_PCIE_MB_INT_D2H2_DB0 0x100000
128 #define BRCMF_PCIE_MB_INT_D2H2_DB1 0x200000
129 #define BRCMF_PCIE_MB_INT_D2H3_DB0 0x400000
130 #define BRCMF_PCIE_MB_INT_D2H3_DB1 0x800000
131
132 #define BRCMF_PCIE_MB_INT_D2H_DB (BRCMF_PCIE_MB_INT_D2H0_DB0 | \
133 BRCMF_PCIE_MB_INT_D2H0_DB1 | \
134 BRCMF_PCIE_MB_INT_D2H1_DB0 | \
135 BRCMF_PCIE_MB_INT_D2H1_DB1 | \
136 BRCMF_PCIE_MB_INT_D2H2_DB0 | \
137 BRCMF_PCIE_MB_INT_D2H2_DB1 | \
138 BRCMF_PCIE_MB_INT_D2H3_DB0 | \
139 BRCMF_PCIE_MB_INT_D2H3_DB1)
140
141 #define BRCMF_PCIE_SHARED_VERSION_7 7
142 #define BRCMF_PCIE_MIN_SHARED_VERSION 5
143 #define BRCMF_PCIE_MAX_SHARED_VERSION BRCMF_PCIE_SHARED_VERSION_7
144 #define BRCMF_PCIE_SHARED_VERSION_MASK 0x00FF
145 #define BRCMF_PCIE_SHARED_DMA_INDEX 0x10000
146 #define BRCMF_PCIE_SHARED_DMA_2B_IDX 0x100000
147 #define BRCMF_PCIE_SHARED_HOSTRDY_DB1 0x10000000
148
149 #define BRCMF_PCIE_FLAGS_HTOD_SPLIT 0x4000
150 #define BRCMF_PCIE_FLAGS_DTOH_SPLIT 0x8000
151
152 #define BRCMF_SHARED_MAX_RXBUFPOST_OFFSET 34
153 #define BRCMF_SHARED_RING_BASE_OFFSET 52
154 #define BRCMF_SHARED_RX_DATAOFFSET_OFFSET 36
155 #define BRCMF_SHARED_CONSOLE_ADDR_OFFSET 20
156 #define BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET 40
157 #define BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET 44
158 #define BRCMF_SHARED_RING_INFO_ADDR_OFFSET 48
159 #define BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET 52
160 #define BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET 56
161 #define BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET 64
162 #define BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET 68
163
164 #define BRCMF_RING_H2D_RING_COUNT_OFFSET 0
165 #define BRCMF_RING_D2H_RING_COUNT_OFFSET 1
166 #define BRCMF_RING_H2D_RING_MEM_OFFSET 4
167 #define BRCMF_RING_H2D_RING_STATE_OFFSET 8
168
169 #define BRCMF_RING_MEM_BASE_ADDR_OFFSET 8
170 #define BRCMF_RING_MAX_ITEM_OFFSET 4
171 #define BRCMF_RING_LEN_ITEMS_OFFSET 6
172 #define BRCMF_RING_MEM_SZ 16
173 #define BRCMF_RING_STATE_SZ 8
174
175 #define BRCMF_DEF_MAX_RXBUFPOST 255
176
177 #define BRCMF_CONSOLE_BUFADDR_OFFSET 8
178 #define BRCMF_CONSOLE_BUFSIZE_OFFSET 12
179 #define BRCMF_CONSOLE_WRITEIDX_OFFSET 16
180
181 #define BRCMF_DMA_D2H_SCRATCH_BUF_LEN 8
182 #define BRCMF_DMA_D2H_RINGUPD_BUF_LEN 1024
183
184 #define BRCMF_D2H_DEV_D3_ACK 0x00000001
185 #define BRCMF_D2H_DEV_DS_ENTER_REQ 0x00000002
186 #define BRCMF_D2H_DEV_DS_EXIT_NOTE 0x00000004
187 #define BRCMF_D2H_DEV_FWHALT 0x10000000
188
189 #define BRCMF_H2D_HOST_D3_INFORM 0x00000001
190 #define BRCMF_H2D_HOST_DS_ACK 0x00000002
191 #define BRCMF_H2D_HOST_D0_INFORM_IN_USE 0x00000008
192 #define BRCMF_H2D_HOST_D0_INFORM 0x00000010
193
194 #define BRCMF_PCIE_MBDATA_TIMEOUT msecs_to_jiffies(2000)
195
196 #define BRCMF_PCIE_CFGREG_STATUS_CMD 0x4
197 #define BRCMF_PCIE_CFGREG_PM_CSR 0x4C
198 #define BRCMF_PCIE_CFGREG_MSI_CAP 0x58
199 #define BRCMF_PCIE_CFGREG_MSI_ADDR_L 0x5C
200 #define BRCMF_PCIE_CFGREG_MSI_ADDR_H 0x60
201 #define BRCMF_PCIE_CFGREG_MSI_DATA 0x64
202 #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL 0xBC
203 #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2 0xDC
204 #define BRCMF_PCIE_CFGREG_RBAR_CTRL 0x228
205 #define BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1 0x248
206 #define BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG 0x4E0
207 #define BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG 0x4F4
208 #define BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB 3
209
210 /* Magic number at a magic location to find RAM size */
211 #define BRCMF_RAMSIZE_MAGIC 0x534d4152 /* SMAR */
212 #define BRCMF_RAMSIZE_OFFSET 0x6c
213
214
215 struct brcmf_pcie_console {
216 u32 base_addr;
217 u32 buf_addr;
218 u32 bufsize;
219 u32 read_idx;
220 u8 log_str[256];
221 u8 log_idx;
222 };
223
224 struct brcmf_pcie_shared_info {
225 u32 tcm_base_address;
226 u32 flags;
227 struct brcmf_pcie_ringbuf *commonrings[BRCMF_NROF_COMMON_MSGRINGS];
228 struct brcmf_pcie_ringbuf *flowrings;
229 u16 max_rxbufpost;
230 u16 max_flowrings;
231 u16 max_submissionrings;
232 u16 max_completionrings;
233 u32 rx_dataoffset;
234 u32 htod_mb_data_addr;
235 u32 dtoh_mb_data_addr;
236 u32 ring_info_addr;
237 struct brcmf_pcie_console console;
238 void *scratch;
239 dma_addr_t scratch_dmahandle;
240 void *ringupd;
241 dma_addr_t ringupd_dmahandle;
242 u8 version;
243 };
244
245 struct brcmf_pcie_core_info {
246 u32 base;
247 u32 wrapbase;
248 };
249
250 struct brcmf_pciedev_info {
251 enum brcmf_pcie_state state;
252 bool in_irq;
253 struct pci_dev *pdev;
254 char fw_name[BRCMF_FW_NAME_LEN];
255 char nvram_name[BRCMF_FW_NAME_LEN];
256 void __iomem *regs;
257 void __iomem *tcm;
258 u32 ram_base;
259 u32 ram_size;
260 struct brcmf_chip *ci;
261 u32 coreid;
262 struct brcmf_pcie_shared_info shared;
263 wait_queue_head_t mbdata_resp_wait;
264 bool mbdata_completed;
265 bool irq_allocated;
266 bool wowl_enabled;
267 u8 dma_idx_sz;
268 void *idxbuf;
269 u32 idxbuf_sz;
270 dma_addr_t idxbuf_dmahandle;
271 u16 (*read_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset);
272 void (*write_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
273 u16 value);
274 struct brcmf_mp_device *settings;
275 };
276
277 struct brcmf_pcie_ringbuf {
278 struct brcmf_commonring commonring;
279 dma_addr_t dma_handle;
280 u32 w_idx_addr;
281 u32 r_idx_addr;
282 struct brcmf_pciedev_info *devinfo;
283 u8 id;
284 };
285
286 /**
287 * struct brcmf_pcie_dhi_ringinfo - dongle/host interface shared ring info
288 *
289 * @ringmem: dongle memory pointer to ring memory location
290 * @h2d_w_idx_ptr: h2d ring write indices dongle memory pointers
291 * @h2d_r_idx_ptr: h2d ring read indices dongle memory pointers
292 * @d2h_w_idx_ptr: d2h ring write indices dongle memory pointers
293 * @d2h_r_idx_ptr: d2h ring read indices dongle memory pointers
294 * @h2d_w_idx_hostaddr: h2d ring write indices host memory pointers
295 * @h2d_r_idx_hostaddr: h2d ring read indices host memory pointers
296 * @d2h_w_idx_hostaddr: d2h ring write indices host memory pointers
297 * @d2h_r_idx_hostaddr: d2h ring reaD indices host memory pointers
298 * @max_flowrings: maximum number of tx flow rings supported.
299 * @max_submissionrings: maximum number of submission rings(h2d) supported.
300 * @max_completionrings: maximum number of completion rings(d2h) supported.
301 */
302 struct brcmf_pcie_dhi_ringinfo {
303 __le32 ringmem;
304 __le32 h2d_w_idx_ptr;
305 __le32 h2d_r_idx_ptr;
306 __le32 d2h_w_idx_ptr;
307 __le32 d2h_r_idx_ptr;
308 struct msgbuf_buf_addr h2d_w_idx_hostaddr;
309 struct msgbuf_buf_addr h2d_r_idx_hostaddr;
310 struct msgbuf_buf_addr d2h_w_idx_hostaddr;
311 struct msgbuf_buf_addr d2h_r_idx_hostaddr;
312 __le16 max_flowrings;
313 __le16 max_submissionrings;
314 __le16 max_completionrings;
315 };
316
317 static const u32 brcmf_ring_max_item[BRCMF_NROF_COMMON_MSGRINGS] = {
318 BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM,
319 BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM,
320 BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM,
321 BRCMF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM,
322 BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM
323 };
324
325 static const u32 brcmf_ring_itemsize_pre_v7[BRCMF_NROF_COMMON_MSGRINGS] = {
326 BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE,
327 BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE,
328 BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE,
329 BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE_PRE_V7,
330 BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE_PRE_V7
331 };
332
333 static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = {
334 BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE,
335 BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE,
336 BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE,
337 BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE,
338 BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE
339 };
340
341 static void brcmf_pcie_setup(struct device *dev, int ret,
342 struct brcmf_fw_request *fwreq);
343 static struct brcmf_fw_request *
344 brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo);
345
346 static u32
brcmf_pcie_read_reg32(struct brcmf_pciedev_info * devinfo,u32 reg_offset)347 brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset)
348 {
349 void __iomem *address = devinfo->regs + reg_offset;
350
351 return (ioread32(address));
352 }
353
354
355 static void
brcmf_pcie_write_reg32(struct brcmf_pciedev_info * devinfo,u32 reg_offset,u32 value)356 brcmf_pcie_write_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset,
357 u32 value)
358 {
359 void __iomem *address = devinfo->regs + reg_offset;
360
361 iowrite32(value, address);
362 }
363
364
365 static u8
brcmf_pcie_read_tcm8(struct brcmf_pciedev_info * devinfo,u32 mem_offset)366 brcmf_pcie_read_tcm8(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
367 {
368 void __iomem *address = devinfo->tcm + mem_offset;
369
370 return (ioread8(address));
371 }
372
373
374 static u16
brcmf_pcie_read_tcm16(struct brcmf_pciedev_info * devinfo,u32 mem_offset)375 brcmf_pcie_read_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
376 {
377 void __iomem *address = devinfo->tcm + mem_offset;
378
379 return (ioread16(address));
380 }
381
382
383 static void
brcmf_pcie_write_tcm16(struct brcmf_pciedev_info * devinfo,u32 mem_offset,u16 value)384 brcmf_pcie_write_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
385 u16 value)
386 {
387 void __iomem *address = devinfo->tcm + mem_offset;
388
389 iowrite16(value, address);
390 }
391
392
393 static u16
brcmf_pcie_read_idx(struct brcmf_pciedev_info * devinfo,u32 mem_offset)394 brcmf_pcie_read_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
395 {
396 u16 *address = devinfo->idxbuf + mem_offset;
397
398 return (*(address));
399 }
400
401
402 static void
brcmf_pcie_write_idx(struct brcmf_pciedev_info * devinfo,u32 mem_offset,u16 value)403 brcmf_pcie_write_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
404 u16 value)
405 {
406 u16 *address = devinfo->idxbuf + mem_offset;
407
408 *(address) = value;
409 }
410
411
412 static u32
brcmf_pcie_read_tcm32(struct brcmf_pciedev_info * devinfo,u32 mem_offset)413 brcmf_pcie_read_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
414 {
415 void __iomem *address = devinfo->tcm + mem_offset;
416
417 return (ioread32(address));
418 }
419
420
421 static void
brcmf_pcie_write_tcm32(struct brcmf_pciedev_info * devinfo,u32 mem_offset,u32 value)422 brcmf_pcie_write_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
423 u32 value)
424 {
425 void __iomem *address = devinfo->tcm + mem_offset;
426
427 iowrite32(value, address);
428 }
429
430
431 static u32
brcmf_pcie_read_ram32(struct brcmf_pciedev_info * devinfo,u32 mem_offset)432 brcmf_pcie_read_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
433 {
434 void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset;
435
436 return (ioread32(addr));
437 }
438
439
440 static void
brcmf_pcie_write_ram32(struct brcmf_pciedev_info * devinfo,u32 mem_offset,u32 value)441 brcmf_pcie_write_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
442 u32 value)
443 {
444 void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset;
445
446 iowrite32(value, addr);
447 }
448
449
450 static void
brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info * devinfo,u32 mem_offset,void * dstaddr,u32 len)451 brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
452 void *dstaddr, u32 len)
453 {
454 void __iomem *address = devinfo->tcm + mem_offset;
455 __le32 *dst32;
456 __le16 *dst16;
457 u8 *dst8;
458
459 if (((ulong)address & 4) || ((ulong)dstaddr & 4) || (len & 4)) {
460 if (((ulong)address & 2) || ((ulong)dstaddr & 2) || (len & 2)) {
461 dst8 = (u8 *)dstaddr;
462 while (len) {
463 *dst8 = ioread8(address);
464 address++;
465 dst8++;
466 len--;
467 }
468 } else {
469 len = len / 2;
470 dst16 = (__le16 *)dstaddr;
471 while (len) {
472 *dst16 = cpu_to_le16(ioread16(address));
473 address += 2;
474 dst16++;
475 len--;
476 }
477 }
478 } else {
479 len = len / 4;
480 dst32 = (__le32 *)dstaddr;
481 while (len) {
482 *dst32 = cpu_to_le32(ioread32(address));
483 address += 4;
484 dst32++;
485 len--;
486 }
487 }
488 }
489
490
491 #define WRITECC32(devinfo, reg, value) brcmf_pcie_write_reg32(devinfo, \
492 CHIPCREGOFFS(reg), value)
493
494
495 static void
brcmf_pcie_select_core(struct brcmf_pciedev_info * devinfo,u16 coreid)496 brcmf_pcie_select_core(struct brcmf_pciedev_info *devinfo, u16 coreid)
497 {
498 const struct pci_dev *pdev = devinfo->pdev;
499 struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
500 struct brcmf_core *core;
501 u32 bar0_win;
502
503 core = brcmf_chip_get_core(devinfo->ci, coreid);
504 if (core) {
505 bar0_win = core->base;
506 pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, bar0_win);
507 if (pci_read_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW,
508 &bar0_win) == 0) {
509 if (bar0_win != core->base) {
510 bar0_win = core->base;
511 pci_write_config_dword(pdev,
512 BRCMF_PCIE_BAR0_WINDOW,
513 bar0_win);
514 }
515 }
516 } else {
517 brcmf_err(bus, "Unsupported core selected %x\n", coreid);
518 }
519 }
520
521
brcmf_pcie_reset_device(struct brcmf_pciedev_info * devinfo)522 static void brcmf_pcie_reset_device(struct brcmf_pciedev_info *devinfo)
523 {
524 struct brcmf_core *core;
525 u16 cfg_offset[] = { BRCMF_PCIE_CFGREG_STATUS_CMD,
526 BRCMF_PCIE_CFGREG_PM_CSR,
527 BRCMF_PCIE_CFGREG_MSI_CAP,
528 BRCMF_PCIE_CFGREG_MSI_ADDR_L,
529 BRCMF_PCIE_CFGREG_MSI_ADDR_H,
530 BRCMF_PCIE_CFGREG_MSI_DATA,
531 BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2,
532 BRCMF_PCIE_CFGREG_RBAR_CTRL,
533 BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1,
534 BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG,
535 BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG };
536 u32 i;
537 u32 val;
538 u32 lsc;
539
540 if (!devinfo->ci)
541 return;
542
543 /* Disable ASPM */
544 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
545 pci_read_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
546 &lsc);
547 val = lsc & (~BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB);
548 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
549 val);
550
551 /* Watchdog reset */
552 brcmf_pcie_select_core(devinfo, BCMA_CORE_CHIPCOMMON);
553 WRITECC32(devinfo, watchdog, 4);
554 msleep(100);
555
556 /* Restore ASPM */
557 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
558 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
559 lsc);
560
561 core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_PCIE2);
562 if (core->rev <= 13) {
563 for (i = 0; i < ARRAY_SIZE(cfg_offset); i++) {
564 brcmf_pcie_write_reg32(devinfo,
565 BRCMF_PCIE_PCIE2REG_CONFIGADDR,
566 cfg_offset[i]);
567 val = brcmf_pcie_read_reg32(devinfo,
568 BRCMF_PCIE_PCIE2REG_CONFIGDATA);
569 brcmf_dbg(PCIE, "config offset 0x%04x, value 0x%04x\n",
570 cfg_offset[i], val);
571 brcmf_pcie_write_reg32(devinfo,
572 BRCMF_PCIE_PCIE2REG_CONFIGDATA,
573 val);
574 }
575 }
576 }
577
578
brcmf_pcie_attach(struct brcmf_pciedev_info * devinfo)579 static void brcmf_pcie_attach(struct brcmf_pciedev_info *devinfo)
580 {
581 u32 config;
582
583 /* BAR1 window may not be sized properly */
584 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
585 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, 0x4e0);
586 config = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA);
587 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, config);
588
589 device_wakeup_enable(&devinfo->pdev->dev);
590 }
591
592
brcmf_pcie_enter_download_state(struct brcmf_pciedev_info * devinfo)593 static int brcmf_pcie_enter_download_state(struct brcmf_pciedev_info *devinfo)
594 {
595 if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
596 brcmf_pcie_select_core(devinfo, BCMA_CORE_ARM_CR4);
597 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
598 5);
599 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA,
600 0);
601 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
602 7);
603 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA,
604 0);
605 }
606 return 0;
607 }
608
609
brcmf_pcie_exit_download_state(struct brcmf_pciedev_info * devinfo,u32 resetintr)610 static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info *devinfo,
611 u32 resetintr)
612 {
613 struct brcmf_core *core;
614
615 if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
616 core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_INTERNAL_MEM);
617 brcmf_chip_resetcore(core, 0, 0, 0);
618 }
619
620 if (!brcmf_chip_set_active(devinfo->ci, resetintr))
621 return -EIO;
622 return 0;
623 }
624
625
626 static int
brcmf_pcie_send_mb_data(struct brcmf_pciedev_info * devinfo,u32 htod_mb_data)627 brcmf_pcie_send_mb_data(struct brcmf_pciedev_info *devinfo, u32 htod_mb_data)
628 {
629 struct brcmf_pcie_shared_info *shared;
630 struct brcmf_core *core;
631 u32 addr;
632 u32 cur_htod_mb_data;
633 u32 i;
634
635 shared = &devinfo->shared;
636 addr = shared->htod_mb_data_addr;
637 cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
638
639 if (cur_htod_mb_data != 0)
640 brcmf_dbg(PCIE, "MB transaction is already pending 0x%04x\n",
641 cur_htod_mb_data);
642
643 i = 0;
644 while (cur_htod_mb_data != 0) {
645 msleep(10);
646 i++;
647 if (i > 100)
648 return -EIO;
649 cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
650 }
651
652 brcmf_pcie_write_tcm32(devinfo, addr, htod_mb_data);
653 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
654
655 /* Send mailbox interrupt twice as a hardware workaround */
656 core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_PCIE2);
657 if (core->rev <= 13)
658 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
659
660 return 0;
661 }
662
663
brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info * devinfo)664 static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo)
665 {
666 struct brcmf_pcie_shared_info *shared;
667 u32 addr;
668 u32 dtoh_mb_data;
669
670 shared = &devinfo->shared;
671 addr = shared->dtoh_mb_data_addr;
672 dtoh_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
673
674 if (!dtoh_mb_data)
675 return;
676
677 brcmf_pcie_write_tcm32(devinfo, addr, 0);
678
679 brcmf_dbg(PCIE, "D2H_MB_DATA: 0x%04x\n", dtoh_mb_data);
680 if (dtoh_mb_data & BRCMF_D2H_DEV_DS_ENTER_REQ) {
681 brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP REQ\n");
682 brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_DS_ACK);
683 brcmf_dbg(PCIE, "D2H_MB_DATA: sent DEEP SLEEP ACK\n");
684 }
685 if (dtoh_mb_data & BRCMF_D2H_DEV_DS_EXIT_NOTE)
686 brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP EXIT\n");
687 if (dtoh_mb_data & BRCMF_D2H_DEV_D3_ACK) {
688 brcmf_dbg(PCIE, "D2H_MB_DATA: D3 ACK\n");
689 devinfo->mbdata_completed = true;
690 wake_up(&devinfo->mbdata_resp_wait);
691 }
692 if (dtoh_mb_data & BRCMF_D2H_DEV_FWHALT) {
693 brcmf_dbg(PCIE, "D2H_MB_DATA: FW HALT\n");
694 brcmf_fw_crashed(&devinfo->pdev->dev);
695 }
696 }
697
698
brcmf_pcie_bus_console_init(struct brcmf_pciedev_info * devinfo)699 static void brcmf_pcie_bus_console_init(struct brcmf_pciedev_info *devinfo)
700 {
701 struct brcmf_pcie_shared_info *shared;
702 struct brcmf_pcie_console *console;
703 u32 addr;
704
705 shared = &devinfo->shared;
706 console = &shared->console;
707 addr = shared->tcm_base_address + BRCMF_SHARED_CONSOLE_ADDR_OFFSET;
708 console->base_addr = brcmf_pcie_read_tcm32(devinfo, addr);
709
710 addr = console->base_addr + BRCMF_CONSOLE_BUFADDR_OFFSET;
711 console->buf_addr = brcmf_pcie_read_tcm32(devinfo, addr);
712 addr = console->base_addr + BRCMF_CONSOLE_BUFSIZE_OFFSET;
713 console->bufsize = brcmf_pcie_read_tcm32(devinfo, addr);
714
715 brcmf_dbg(FWCON, "Console: base %x, buf %x, size %d\n",
716 console->base_addr, console->buf_addr, console->bufsize);
717 }
718
719 /**
720 * brcmf_pcie_bus_console_read - reads firmware messages
721 *
722 * @error: specifies if error has occurred (prints messages unconditionally)
723 */
brcmf_pcie_bus_console_read(struct brcmf_pciedev_info * devinfo,bool error)724 static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo,
725 bool error)
726 {
727 struct pci_dev *pdev = devinfo->pdev;
728 struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
729 struct brcmf_pcie_console *console;
730 u32 addr;
731 u8 ch;
732 u32 newidx;
733
734 if (!error && !BRCMF_FWCON_ON())
735 return;
736
737 console = &devinfo->shared.console;
738 addr = console->base_addr + BRCMF_CONSOLE_WRITEIDX_OFFSET;
739 newidx = brcmf_pcie_read_tcm32(devinfo, addr);
740 while (newidx != console->read_idx) {
741 addr = console->buf_addr + console->read_idx;
742 ch = brcmf_pcie_read_tcm8(devinfo, addr);
743 console->read_idx++;
744 if (console->read_idx == console->bufsize)
745 console->read_idx = 0;
746 if (ch == '\r')
747 continue;
748 console->log_str[console->log_idx] = ch;
749 console->log_idx++;
750 if ((ch != '\n') &&
751 (console->log_idx == (sizeof(console->log_str) - 2))) {
752 ch = '\n';
753 console->log_str[console->log_idx] = ch;
754 console->log_idx++;
755 }
756 if (ch == '\n') {
757 console->log_str[console->log_idx] = 0;
758 if (error)
759 __brcmf_err(bus, __func__, "CONSOLE: %s",
760 console->log_str);
761 else
762 pr_debug("CONSOLE: %s", console->log_str);
763 console->log_idx = 0;
764 }
765 }
766 }
767
768
brcmf_pcie_intr_disable(struct brcmf_pciedev_info * devinfo)769 static void brcmf_pcie_intr_disable(struct brcmf_pciedev_info *devinfo)
770 {
771 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK, 0);
772 }
773
774
brcmf_pcie_intr_enable(struct brcmf_pciedev_info * devinfo)775 static void brcmf_pcie_intr_enable(struct brcmf_pciedev_info *devinfo)
776 {
777 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK,
778 BRCMF_PCIE_MB_INT_D2H_DB |
779 BRCMF_PCIE_MB_INT_FN0_0 |
780 BRCMF_PCIE_MB_INT_FN0_1);
781 }
782
brcmf_pcie_hostready(struct brcmf_pciedev_info * devinfo)783 static void brcmf_pcie_hostready(struct brcmf_pciedev_info *devinfo)
784 {
785 if (devinfo->shared.flags & BRCMF_PCIE_SHARED_HOSTRDY_DB1)
786 brcmf_pcie_write_reg32(devinfo,
787 BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1, 1);
788 }
789
brcmf_pcie_quick_check_isr(int irq,void * arg)790 static irqreturn_t brcmf_pcie_quick_check_isr(int irq, void *arg)
791 {
792 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
793
794 if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT)) {
795 brcmf_pcie_intr_disable(devinfo);
796 brcmf_dbg(PCIE, "Enter\n");
797 return IRQ_WAKE_THREAD;
798 }
799 return IRQ_NONE;
800 }
801
802
brcmf_pcie_isr_thread(int irq,void * arg)803 static irqreturn_t brcmf_pcie_isr_thread(int irq, void *arg)
804 {
805 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
806 u32 status;
807
808 devinfo->in_irq = true;
809 status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
810 brcmf_dbg(PCIE, "Enter %x\n", status);
811 if (status) {
812 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
813 status);
814 if (status & (BRCMF_PCIE_MB_INT_FN0_0 |
815 BRCMF_PCIE_MB_INT_FN0_1))
816 brcmf_pcie_handle_mb_data(devinfo);
817 if (status & BRCMF_PCIE_MB_INT_D2H_DB) {
818 if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
819 brcmf_proto_msgbuf_rx_trigger(
820 &devinfo->pdev->dev);
821 }
822 }
823 brcmf_pcie_bus_console_read(devinfo, false);
824 if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
825 brcmf_pcie_intr_enable(devinfo);
826 devinfo->in_irq = false;
827 return IRQ_HANDLED;
828 }
829
830
brcmf_pcie_request_irq(struct brcmf_pciedev_info * devinfo)831 static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo)
832 {
833 struct pci_dev *pdev = devinfo->pdev;
834 struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
835
836 brcmf_pcie_intr_disable(devinfo);
837
838 brcmf_dbg(PCIE, "Enter\n");
839
840 pci_enable_msi(pdev);
841 if (request_threaded_irq(pdev->irq, brcmf_pcie_quick_check_isr,
842 brcmf_pcie_isr_thread, IRQF_SHARED,
843 "brcmf_pcie_intr", devinfo)) {
844 pci_disable_msi(pdev);
845 brcmf_err(bus, "Failed to request IRQ %d\n", pdev->irq);
846 return -EIO;
847 }
848 devinfo->irq_allocated = true;
849 return 0;
850 }
851
852
brcmf_pcie_release_irq(struct brcmf_pciedev_info * devinfo)853 static void brcmf_pcie_release_irq(struct brcmf_pciedev_info *devinfo)
854 {
855 struct pci_dev *pdev = devinfo->pdev;
856 struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
857 u32 status;
858 u32 count;
859
860 if (!devinfo->irq_allocated)
861 return;
862
863 brcmf_pcie_intr_disable(devinfo);
864 free_irq(pdev->irq, devinfo);
865 pci_disable_msi(pdev);
866
867 msleep(50);
868 count = 0;
869 while ((devinfo->in_irq) && (count < 20)) {
870 msleep(50);
871 count++;
872 }
873 if (devinfo->in_irq)
874 brcmf_err(bus, "Still in IRQ (processing) !!!\n");
875
876 status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
877 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT, status);
878
879 devinfo->irq_allocated = false;
880 }
881
882
brcmf_pcie_ring_mb_write_rptr(void * ctx)883 static int brcmf_pcie_ring_mb_write_rptr(void *ctx)
884 {
885 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
886 struct brcmf_pciedev_info *devinfo = ring->devinfo;
887 struct brcmf_commonring *commonring = &ring->commonring;
888
889 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
890 return -EIO;
891
892 brcmf_dbg(PCIE, "W r_ptr %d (%d), ring %d\n", commonring->r_ptr,
893 commonring->w_ptr, ring->id);
894
895 devinfo->write_ptr(devinfo, ring->r_idx_addr, commonring->r_ptr);
896
897 return 0;
898 }
899
900
brcmf_pcie_ring_mb_write_wptr(void * ctx)901 static int brcmf_pcie_ring_mb_write_wptr(void *ctx)
902 {
903 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
904 struct brcmf_pciedev_info *devinfo = ring->devinfo;
905 struct brcmf_commonring *commonring = &ring->commonring;
906
907 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
908 return -EIO;
909
910 brcmf_dbg(PCIE, "W w_ptr %d (%d), ring %d\n", commonring->w_ptr,
911 commonring->r_ptr, ring->id);
912
913 devinfo->write_ptr(devinfo, ring->w_idx_addr, commonring->w_ptr);
914
915 return 0;
916 }
917
918
brcmf_pcie_ring_mb_ring_bell(void * ctx)919 static int brcmf_pcie_ring_mb_ring_bell(void *ctx)
920 {
921 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
922 struct brcmf_pciedev_info *devinfo = ring->devinfo;
923
924 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
925 return -EIO;
926
927 brcmf_dbg(PCIE, "RING !\n");
928 /* Any arbitrary value will do, lets use 1 */
929 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0, 1);
930
931 return 0;
932 }
933
934
brcmf_pcie_ring_mb_update_rptr(void * ctx)935 static int brcmf_pcie_ring_mb_update_rptr(void *ctx)
936 {
937 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
938 struct brcmf_pciedev_info *devinfo = ring->devinfo;
939 struct brcmf_commonring *commonring = &ring->commonring;
940
941 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
942 return -EIO;
943
944 commonring->r_ptr = devinfo->read_ptr(devinfo, ring->r_idx_addr);
945
946 brcmf_dbg(PCIE, "R r_ptr %d (%d), ring %d\n", commonring->r_ptr,
947 commonring->w_ptr, ring->id);
948
949 return 0;
950 }
951
952
brcmf_pcie_ring_mb_update_wptr(void * ctx)953 static int brcmf_pcie_ring_mb_update_wptr(void *ctx)
954 {
955 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
956 struct brcmf_pciedev_info *devinfo = ring->devinfo;
957 struct brcmf_commonring *commonring = &ring->commonring;
958
959 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
960 return -EIO;
961
962 commonring->w_ptr = devinfo->read_ptr(devinfo, ring->w_idx_addr);
963
964 brcmf_dbg(PCIE, "R w_ptr %d (%d), ring %d\n", commonring->w_ptr,
965 commonring->r_ptr, ring->id);
966
967 return 0;
968 }
969
970
971 static void *
brcmf_pcie_init_dmabuffer_for_device(struct brcmf_pciedev_info * devinfo,u32 size,u32 tcm_dma_phys_addr,dma_addr_t * dma_handle)972 brcmf_pcie_init_dmabuffer_for_device(struct brcmf_pciedev_info *devinfo,
973 u32 size, u32 tcm_dma_phys_addr,
974 dma_addr_t *dma_handle)
975 {
976 void *ring;
977 u64 address;
978
979 ring = dma_alloc_coherent(&devinfo->pdev->dev, size, dma_handle,
980 GFP_KERNEL);
981 if (!ring)
982 return NULL;
983
984 address = (u64)*dma_handle;
985 brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr,
986 address & 0xffffffff);
987 brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr + 4, address >> 32);
988
989 return (ring);
990 }
991
992
993 static struct brcmf_pcie_ringbuf *
brcmf_pcie_alloc_dma_and_ring(struct brcmf_pciedev_info * devinfo,u32 ring_id,u32 tcm_ring_phys_addr)994 brcmf_pcie_alloc_dma_and_ring(struct brcmf_pciedev_info *devinfo, u32 ring_id,
995 u32 tcm_ring_phys_addr)
996 {
997 void *dma_buf;
998 dma_addr_t dma_handle;
999 struct brcmf_pcie_ringbuf *ring;
1000 u32 size;
1001 u32 addr;
1002 const u32 *ring_itemsize_array;
1003
1004 if (devinfo->shared.version < BRCMF_PCIE_SHARED_VERSION_7)
1005 ring_itemsize_array = brcmf_ring_itemsize_pre_v7;
1006 else
1007 ring_itemsize_array = brcmf_ring_itemsize;
1008
1009 size = brcmf_ring_max_item[ring_id] * ring_itemsize_array[ring_id];
1010 dma_buf = brcmf_pcie_init_dmabuffer_for_device(devinfo, size,
1011 tcm_ring_phys_addr + BRCMF_RING_MEM_BASE_ADDR_OFFSET,
1012 &dma_handle);
1013 if (!dma_buf)
1014 return NULL;
1015
1016 addr = tcm_ring_phys_addr + BRCMF_RING_MAX_ITEM_OFFSET;
1017 brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_max_item[ring_id]);
1018 addr = tcm_ring_phys_addr + BRCMF_RING_LEN_ITEMS_OFFSET;
1019 brcmf_pcie_write_tcm16(devinfo, addr, ring_itemsize_array[ring_id]);
1020
1021 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1022 if (!ring) {
1023 dma_free_coherent(&devinfo->pdev->dev, size, dma_buf,
1024 dma_handle);
1025 return NULL;
1026 }
1027 brcmf_commonring_config(&ring->commonring, brcmf_ring_max_item[ring_id],
1028 ring_itemsize_array[ring_id], dma_buf);
1029 ring->dma_handle = dma_handle;
1030 ring->devinfo = devinfo;
1031 brcmf_commonring_register_cb(&ring->commonring,
1032 brcmf_pcie_ring_mb_ring_bell,
1033 brcmf_pcie_ring_mb_update_rptr,
1034 brcmf_pcie_ring_mb_update_wptr,
1035 brcmf_pcie_ring_mb_write_rptr,
1036 brcmf_pcie_ring_mb_write_wptr, ring);
1037
1038 return (ring);
1039 }
1040
1041
brcmf_pcie_release_ringbuffer(struct device * dev,struct brcmf_pcie_ringbuf * ring)1042 static void brcmf_pcie_release_ringbuffer(struct device *dev,
1043 struct brcmf_pcie_ringbuf *ring)
1044 {
1045 void *dma_buf;
1046 u32 size;
1047
1048 if (!ring)
1049 return;
1050
1051 dma_buf = ring->commonring.buf_addr;
1052 if (dma_buf) {
1053 size = ring->commonring.depth * ring->commonring.item_len;
1054 dma_free_coherent(dev, size, dma_buf, ring->dma_handle);
1055 }
1056 kfree(ring);
1057 }
1058
1059
brcmf_pcie_release_ringbuffers(struct brcmf_pciedev_info * devinfo)1060 static void brcmf_pcie_release_ringbuffers(struct brcmf_pciedev_info *devinfo)
1061 {
1062 u32 i;
1063
1064 for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++) {
1065 brcmf_pcie_release_ringbuffer(&devinfo->pdev->dev,
1066 devinfo->shared.commonrings[i]);
1067 devinfo->shared.commonrings[i] = NULL;
1068 }
1069 kfree(devinfo->shared.flowrings);
1070 devinfo->shared.flowrings = NULL;
1071 if (devinfo->idxbuf) {
1072 dma_free_coherent(&devinfo->pdev->dev,
1073 devinfo->idxbuf_sz,
1074 devinfo->idxbuf,
1075 devinfo->idxbuf_dmahandle);
1076 devinfo->idxbuf = NULL;
1077 }
1078 }
1079
1080
brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info * devinfo)1081 static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
1082 {
1083 struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev);
1084 struct brcmf_pcie_ringbuf *ring;
1085 struct brcmf_pcie_ringbuf *rings;
1086 u32 d2h_w_idx_ptr;
1087 u32 d2h_r_idx_ptr;
1088 u32 h2d_w_idx_ptr;
1089 u32 h2d_r_idx_ptr;
1090 u32 ring_mem_ptr;
1091 u32 i;
1092 u64 address;
1093 u32 bufsz;
1094 u8 idx_offset;
1095 struct brcmf_pcie_dhi_ringinfo ringinfo;
1096 u16 max_flowrings;
1097 u16 max_submissionrings;
1098 u16 max_completionrings;
1099
1100 memcpy_fromio(&ringinfo, devinfo->tcm + devinfo->shared.ring_info_addr,
1101 sizeof(ringinfo));
1102 if (devinfo->shared.version >= 6) {
1103 max_submissionrings = le16_to_cpu(ringinfo.max_submissionrings);
1104 max_flowrings = le16_to_cpu(ringinfo.max_flowrings);
1105 max_completionrings = le16_to_cpu(ringinfo.max_completionrings);
1106 } else {
1107 max_submissionrings = le16_to_cpu(ringinfo.max_flowrings);
1108 max_flowrings = max_submissionrings -
1109 BRCMF_NROF_H2D_COMMON_MSGRINGS;
1110 max_completionrings = BRCMF_NROF_D2H_COMMON_MSGRINGS;
1111 }
1112 if (max_flowrings > 512) {
1113 brcmf_err(bus, "invalid max_flowrings(%d)\n", max_flowrings);
1114 return -EIO;
1115 }
1116
1117 if (devinfo->dma_idx_sz != 0) {
1118 bufsz = (max_submissionrings + max_completionrings) *
1119 devinfo->dma_idx_sz * 2;
1120 devinfo->idxbuf = dma_alloc_coherent(&devinfo->pdev->dev, bufsz,
1121 &devinfo->idxbuf_dmahandle,
1122 GFP_KERNEL);
1123 if (!devinfo->idxbuf)
1124 devinfo->dma_idx_sz = 0;
1125 }
1126
1127 if (devinfo->dma_idx_sz == 0) {
1128 d2h_w_idx_ptr = le32_to_cpu(ringinfo.d2h_w_idx_ptr);
1129 d2h_r_idx_ptr = le32_to_cpu(ringinfo.d2h_r_idx_ptr);
1130 h2d_w_idx_ptr = le32_to_cpu(ringinfo.h2d_w_idx_ptr);
1131 h2d_r_idx_ptr = le32_to_cpu(ringinfo.h2d_r_idx_ptr);
1132 idx_offset = sizeof(u32);
1133 devinfo->write_ptr = brcmf_pcie_write_tcm16;
1134 devinfo->read_ptr = brcmf_pcie_read_tcm16;
1135 brcmf_dbg(PCIE, "Using TCM indices\n");
1136 } else {
1137 memset(devinfo->idxbuf, 0, bufsz);
1138 devinfo->idxbuf_sz = bufsz;
1139 idx_offset = devinfo->dma_idx_sz;
1140 devinfo->write_ptr = brcmf_pcie_write_idx;
1141 devinfo->read_ptr = brcmf_pcie_read_idx;
1142
1143 h2d_w_idx_ptr = 0;
1144 address = (u64)devinfo->idxbuf_dmahandle;
1145 ringinfo.h2d_w_idx_hostaddr.low_addr =
1146 cpu_to_le32(address & 0xffffffff);
1147 ringinfo.h2d_w_idx_hostaddr.high_addr =
1148 cpu_to_le32(address >> 32);
1149
1150 h2d_r_idx_ptr = h2d_w_idx_ptr +
1151 max_submissionrings * idx_offset;
1152 address += max_submissionrings * idx_offset;
1153 ringinfo.h2d_r_idx_hostaddr.low_addr =
1154 cpu_to_le32(address & 0xffffffff);
1155 ringinfo.h2d_r_idx_hostaddr.high_addr =
1156 cpu_to_le32(address >> 32);
1157
1158 d2h_w_idx_ptr = h2d_r_idx_ptr +
1159 max_submissionrings * idx_offset;
1160 address += max_submissionrings * idx_offset;
1161 ringinfo.d2h_w_idx_hostaddr.low_addr =
1162 cpu_to_le32(address & 0xffffffff);
1163 ringinfo.d2h_w_idx_hostaddr.high_addr =
1164 cpu_to_le32(address >> 32);
1165
1166 d2h_r_idx_ptr = d2h_w_idx_ptr +
1167 max_completionrings * idx_offset;
1168 address += max_completionrings * idx_offset;
1169 ringinfo.d2h_r_idx_hostaddr.low_addr =
1170 cpu_to_le32(address & 0xffffffff);
1171 ringinfo.d2h_r_idx_hostaddr.high_addr =
1172 cpu_to_le32(address >> 32);
1173
1174 memcpy_toio(devinfo->tcm + devinfo->shared.ring_info_addr,
1175 &ringinfo, sizeof(ringinfo));
1176 brcmf_dbg(PCIE, "Using host memory indices\n");
1177 }
1178
1179 ring_mem_ptr = le32_to_cpu(ringinfo.ringmem);
1180
1181 for (i = 0; i < BRCMF_NROF_H2D_COMMON_MSGRINGS; i++) {
1182 ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
1183 if (!ring)
1184 goto fail;
1185 ring->w_idx_addr = h2d_w_idx_ptr;
1186 ring->r_idx_addr = h2d_r_idx_ptr;
1187 ring->id = i;
1188 devinfo->shared.commonrings[i] = ring;
1189
1190 h2d_w_idx_ptr += idx_offset;
1191 h2d_r_idx_ptr += idx_offset;
1192 ring_mem_ptr += BRCMF_RING_MEM_SZ;
1193 }
1194
1195 for (i = BRCMF_NROF_H2D_COMMON_MSGRINGS;
1196 i < BRCMF_NROF_COMMON_MSGRINGS; i++) {
1197 ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
1198 if (!ring)
1199 goto fail;
1200 ring->w_idx_addr = d2h_w_idx_ptr;
1201 ring->r_idx_addr = d2h_r_idx_ptr;
1202 ring->id = i;
1203 devinfo->shared.commonrings[i] = ring;
1204
1205 d2h_w_idx_ptr += idx_offset;
1206 d2h_r_idx_ptr += idx_offset;
1207 ring_mem_ptr += BRCMF_RING_MEM_SZ;
1208 }
1209
1210 devinfo->shared.max_flowrings = max_flowrings;
1211 devinfo->shared.max_submissionrings = max_submissionrings;
1212 devinfo->shared.max_completionrings = max_completionrings;
1213 rings = kcalloc(max_flowrings, sizeof(*ring), GFP_KERNEL);
1214 if (!rings)
1215 goto fail;
1216
1217 brcmf_dbg(PCIE, "Nr of flowrings is %d\n", max_flowrings);
1218
1219 for (i = 0; i < max_flowrings; i++) {
1220 ring = &rings[i];
1221 ring->devinfo = devinfo;
1222 ring->id = i + BRCMF_H2D_MSGRING_FLOWRING_IDSTART;
1223 brcmf_commonring_register_cb(&ring->commonring,
1224 brcmf_pcie_ring_mb_ring_bell,
1225 brcmf_pcie_ring_mb_update_rptr,
1226 brcmf_pcie_ring_mb_update_wptr,
1227 brcmf_pcie_ring_mb_write_rptr,
1228 brcmf_pcie_ring_mb_write_wptr,
1229 ring);
1230 ring->w_idx_addr = h2d_w_idx_ptr;
1231 ring->r_idx_addr = h2d_r_idx_ptr;
1232 h2d_w_idx_ptr += idx_offset;
1233 h2d_r_idx_ptr += idx_offset;
1234 }
1235 devinfo->shared.flowrings = rings;
1236
1237 return 0;
1238
1239 fail:
1240 brcmf_err(bus, "Allocating ring buffers failed\n");
1241 brcmf_pcie_release_ringbuffers(devinfo);
1242 return -ENOMEM;
1243 }
1244
1245
1246 static void
brcmf_pcie_release_scratchbuffers(struct brcmf_pciedev_info * devinfo)1247 brcmf_pcie_release_scratchbuffers(struct brcmf_pciedev_info *devinfo)
1248 {
1249 if (devinfo->shared.scratch)
1250 dma_free_coherent(&devinfo->pdev->dev,
1251 BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
1252 devinfo->shared.scratch,
1253 devinfo->shared.scratch_dmahandle);
1254 if (devinfo->shared.ringupd)
1255 dma_free_coherent(&devinfo->pdev->dev,
1256 BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
1257 devinfo->shared.ringupd,
1258 devinfo->shared.ringupd_dmahandle);
1259 }
1260
brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info * devinfo)1261 static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
1262 {
1263 struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev);
1264 u64 address;
1265 u32 addr;
1266
1267 devinfo->shared.scratch =
1268 dma_alloc_coherent(&devinfo->pdev->dev,
1269 BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
1270 &devinfo->shared.scratch_dmahandle,
1271 GFP_KERNEL);
1272 if (!devinfo->shared.scratch)
1273 goto fail;
1274
1275 addr = devinfo->shared.tcm_base_address +
1276 BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET;
1277 address = (u64)devinfo->shared.scratch_dmahandle;
1278 brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
1279 brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
1280 addr = devinfo->shared.tcm_base_address +
1281 BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET;
1282 brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
1283
1284 devinfo->shared.ringupd =
1285 dma_alloc_coherent(&devinfo->pdev->dev,
1286 BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
1287 &devinfo->shared.ringupd_dmahandle,
1288 GFP_KERNEL);
1289 if (!devinfo->shared.ringupd)
1290 goto fail;
1291
1292 addr = devinfo->shared.tcm_base_address +
1293 BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET;
1294 address = (u64)devinfo->shared.ringupd_dmahandle;
1295 brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
1296 brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
1297 addr = devinfo->shared.tcm_base_address +
1298 BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET;
1299 brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
1300 return 0;
1301
1302 fail:
1303 brcmf_err(bus, "Allocating scratch buffers failed\n");
1304 brcmf_pcie_release_scratchbuffers(devinfo);
1305 return -ENOMEM;
1306 }
1307
1308
brcmf_pcie_down(struct device * dev)1309 static void brcmf_pcie_down(struct device *dev)
1310 {
1311 }
1312
brcmf_pcie_preinit(struct device * dev)1313 static int brcmf_pcie_preinit(struct device *dev)
1314 {
1315 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1316 struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
1317
1318 brcmf_dbg(PCIE, "Enter\n");
1319
1320 brcmf_pcie_intr_enable(buspub->devinfo);
1321 brcmf_pcie_hostready(buspub->devinfo);
1322
1323 return 0;
1324 }
1325
brcmf_pcie_tx(struct device * dev,struct sk_buff * skb)1326 static int brcmf_pcie_tx(struct device *dev, struct sk_buff *skb)
1327 {
1328 return 0;
1329 }
1330
1331
brcmf_pcie_tx_ctlpkt(struct device * dev,unsigned char * msg,uint len)1332 static int brcmf_pcie_tx_ctlpkt(struct device *dev, unsigned char *msg,
1333 uint len)
1334 {
1335 return 0;
1336 }
1337
1338
brcmf_pcie_rx_ctlpkt(struct device * dev,unsigned char * msg,uint len)1339 static int brcmf_pcie_rx_ctlpkt(struct device *dev, unsigned char *msg,
1340 uint len)
1341 {
1342 return 0;
1343 }
1344
1345
brcmf_pcie_wowl_config(struct device * dev,bool enabled)1346 static void brcmf_pcie_wowl_config(struct device *dev, bool enabled)
1347 {
1348 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1349 struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
1350 struct brcmf_pciedev_info *devinfo = buspub->devinfo;
1351
1352 brcmf_dbg(PCIE, "Configuring WOWL, enabled=%d\n", enabled);
1353 devinfo->wowl_enabled = enabled;
1354 }
1355
1356
brcmf_pcie_get_ramsize(struct device * dev)1357 static size_t brcmf_pcie_get_ramsize(struct device *dev)
1358 {
1359 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1360 struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
1361 struct brcmf_pciedev_info *devinfo = buspub->devinfo;
1362
1363 return devinfo->ci->ramsize - devinfo->ci->srsize;
1364 }
1365
1366
brcmf_pcie_get_memdump(struct device * dev,void * data,size_t len)1367 static int brcmf_pcie_get_memdump(struct device *dev, void *data, size_t len)
1368 {
1369 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1370 struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
1371 struct brcmf_pciedev_info *devinfo = buspub->devinfo;
1372
1373 brcmf_dbg(PCIE, "dump at 0x%08X: len=%zu\n", devinfo->ci->rambase, len);
1374 brcmf_pcie_copy_dev_tomem(devinfo, devinfo->ci->rambase, data, len);
1375 return 0;
1376 }
1377
1378 static
brcmf_pcie_get_fwname(struct device * dev,const char * ext,u8 * fw_name)1379 int brcmf_pcie_get_fwname(struct device *dev, const char *ext, u8 *fw_name)
1380 {
1381 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1382 struct brcmf_fw_request *fwreq;
1383 struct brcmf_fw_name fwnames[] = {
1384 { ext, fw_name },
1385 };
1386
1387 fwreq = brcmf_fw_alloc_request(bus_if->chip, bus_if->chiprev,
1388 brcmf_pcie_fwnames,
1389 ARRAY_SIZE(brcmf_pcie_fwnames),
1390 fwnames, ARRAY_SIZE(fwnames));
1391 if (!fwreq)
1392 return -ENOMEM;
1393
1394 kfree(fwreq);
1395 return 0;
1396 }
1397
brcmf_pcie_reset(struct device * dev)1398 static int brcmf_pcie_reset(struct device *dev)
1399 {
1400 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1401 struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
1402 struct brcmf_pciedev_info *devinfo = buspub->devinfo;
1403 struct brcmf_fw_request *fwreq;
1404 int err;
1405
1406 brcmf_pcie_intr_disable(devinfo);
1407
1408 brcmf_pcie_bus_console_read(devinfo, true);
1409
1410 brcmf_detach(dev);
1411
1412 brcmf_pcie_release_irq(devinfo);
1413 brcmf_pcie_release_scratchbuffers(devinfo);
1414 brcmf_pcie_release_ringbuffers(devinfo);
1415 brcmf_pcie_reset_device(devinfo);
1416
1417 fwreq = brcmf_pcie_prepare_fw_request(devinfo);
1418 if (!fwreq) {
1419 dev_err(dev, "Failed to prepare FW request\n");
1420 return -ENOMEM;
1421 }
1422
1423 err = brcmf_fw_get_firmwares(dev, fwreq, brcmf_pcie_setup);
1424 if (err) {
1425 dev_err(dev, "Failed to prepare FW request\n");
1426 kfree(fwreq);
1427 }
1428
1429 return err;
1430 }
1431
1432 static const struct brcmf_bus_ops brcmf_pcie_bus_ops = {
1433 .preinit = brcmf_pcie_preinit,
1434 .txdata = brcmf_pcie_tx,
1435 .stop = brcmf_pcie_down,
1436 .txctl = brcmf_pcie_tx_ctlpkt,
1437 .rxctl = brcmf_pcie_rx_ctlpkt,
1438 .wowl_config = brcmf_pcie_wowl_config,
1439 .get_ramsize = brcmf_pcie_get_ramsize,
1440 .get_memdump = brcmf_pcie_get_memdump,
1441 .get_fwname = brcmf_pcie_get_fwname,
1442 .reset = brcmf_pcie_reset,
1443 };
1444
1445
1446 static void
brcmf_pcie_adjust_ramsize(struct brcmf_pciedev_info * devinfo,u8 * data,u32 data_len)1447 brcmf_pcie_adjust_ramsize(struct brcmf_pciedev_info *devinfo, u8 *data,
1448 u32 data_len)
1449 {
1450 __le32 *field;
1451 u32 newsize;
1452
1453 if (data_len < BRCMF_RAMSIZE_OFFSET + 8)
1454 return;
1455
1456 field = (__le32 *)&data[BRCMF_RAMSIZE_OFFSET];
1457 if (le32_to_cpup(field) != BRCMF_RAMSIZE_MAGIC)
1458 return;
1459 field++;
1460 newsize = le32_to_cpup(field);
1461
1462 brcmf_dbg(PCIE, "Found ramsize info in FW, adjusting to 0x%x\n",
1463 newsize);
1464 devinfo->ci->ramsize = newsize;
1465 }
1466
1467
1468 static int
brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info * devinfo,u32 sharedram_addr)1469 brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo,
1470 u32 sharedram_addr)
1471 {
1472 struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev);
1473 struct brcmf_pcie_shared_info *shared;
1474 u32 addr;
1475
1476 shared = &devinfo->shared;
1477 shared->tcm_base_address = sharedram_addr;
1478
1479 shared->flags = brcmf_pcie_read_tcm32(devinfo, sharedram_addr);
1480 shared->version = (u8)(shared->flags & BRCMF_PCIE_SHARED_VERSION_MASK);
1481 brcmf_dbg(PCIE, "PCIe protocol version %d\n", shared->version);
1482 if ((shared->version > BRCMF_PCIE_MAX_SHARED_VERSION) ||
1483 (shared->version < BRCMF_PCIE_MIN_SHARED_VERSION)) {
1484 brcmf_err(bus, "Unsupported PCIE version %d\n",
1485 shared->version);
1486 return -EINVAL;
1487 }
1488
1489 /* check firmware support dma indicies */
1490 if (shared->flags & BRCMF_PCIE_SHARED_DMA_INDEX) {
1491 if (shared->flags & BRCMF_PCIE_SHARED_DMA_2B_IDX)
1492 devinfo->dma_idx_sz = sizeof(u16);
1493 else
1494 devinfo->dma_idx_sz = sizeof(u32);
1495 }
1496
1497 addr = sharedram_addr + BRCMF_SHARED_MAX_RXBUFPOST_OFFSET;
1498 shared->max_rxbufpost = brcmf_pcie_read_tcm16(devinfo, addr);
1499 if (shared->max_rxbufpost == 0)
1500 shared->max_rxbufpost = BRCMF_DEF_MAX_RXBUFPOST;
1501
1502 addr = sharedram_addr + BRCMF_SHARED_RX_DATAOFFSET_OFFSET;
1503 shared->rx_dataoffset = brcmf_pcie_read_tcm32(devinfo, addr);
1504
1505 addr = sharedram_addr + BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET;
1506 shared->htod_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr);
1507
1508 addr = sharedram_addr + BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET;
1509 shared->dtoh_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr);
1510
1511 addr = sharedram_addr + BRCMF_SHARED_RING_INFO_ADDR_OFFSET;
1512 shared->ring_info_addr = brcmf_pcie_read_tcm32(devinfo, addr);
1513
1514 brcmf_dbg(PCIE, "max rx buf post %d, rx dataoffset %d\n",
1515 shared->max_rxbufpost, shared->rx_dataoffset);
1516
1517 brcmf_pcie_bus_console_init(devinfo);
1518
1519 return 0;
1520 }
1521
1522
brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info * devinfo,const struct firmware * fw,void * nvram,u32 nvram_len)1523 static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
1524 const struct firmware *fw, void *nvram,
1525 u32 nvram_len)
1526 {
1527 struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev);
1528 u32 sharedram_addr;
1529 u32 sharedram_addr_written;
1530 u32 loop_counter;
1531 int err;
1532 u32 address;
1533 u32 resetintr;
1534
1535 brcmf_dbg(PCIE, "Halt ARM.\n");
1536 err = brcmf_pcie_enter_download_state(devinfo);
1537 if (err)
1538 return err;
1539
1540 brcmf_dbg(PCIE, "Download FW %s\n", devinfo->fw_name);
1541 memcpy_toio(devinfo->tcm + devinfo->ci->rambase,
1542 (void *)fw->data, fw->size);
1543
1544 resetintr = get_unaligned_le32(fw->data);
1545 release_firmware(fw);
1546
1547 /* reset last 4 bytes of RAM address. to be used for shared
1548 * area. This identifies when FW is running
1549 */
1550 brcmf_pcie_write_ram32(devinfo, devinfo->ci->ramsize - 4, 0);
1551
1552 if (nvram) {
1553 brcmf_dbg(PCIE, "Download NVRAM %s\n", devinfo->nvram_name);
1554 address = devinfo->ci->rambase + devinfo->ci->ramsize -
1555 nvram_len;
1556 memcpy_toio(devinfo->tcm + address, nvram, nvram_len);
1557 brcmf_fw_nvram_free(nvram);
1558 } else {
1559 brcmf_dbg(PCIE, "No matching NVRAM file found %s\n",
1560 devinfo->nvram_name);
1561 }
1562
1563 sharedram_addr_written = brcmf_pcie_read_ram32(devinfo,
1564 devinfo->ci->ramsize -
1565 4);
1566 brcmf_dbg(PCIE, "Bring ARM in running state\n");
1567 err = brcmf_pcie_exit_download_state(devinfo, resetintr);
1568 if (err)
1569 return err;
1570
1571 brcmf_dbg(PCIE, "Wait for FW init\n");
1572 sharedram_addr = sharedram_addr_written;
1573 loop_counter = BRCMF_PCIE_FW_UP_TIMEOUT / 50;
1574 while ((sharedram_addr == sharedram_addr_written) && (loop_counter)) {
1575 msleep(50);
1576 sharedram_addr = brcmf_pcie_read_ram32(devinfo,
1577 devinfo->ci->ramsize -
1578 4);
1579 loop_counter--;
1580 }
1581 if (sharedram_addr == sharedram_addr_written) {
1582 brcmf_err(bus, "FW failed to initialize\n");
1583 return -ENODEV;
1584 }
1585 if (sharedram_addr < devinfo->ci->rambase ||
1586 sharedram_addr >= devinfo->ci->rambase + devinfo->ci->ramsize) {
1587 brcmf_err(bus, "Invalid shared RAM address 0x%08x\n",
1588 sharedram_addr);
1589 return -ENODEV;
1590 }
1591 brcmf_dbg(PCIE, "Shared RAM addr: 0x%08x\n", sharedram_addr);
1592
1593 return (brcmf_pcie_init_share_ram_info(devinfo, sharedram_addr));
1594 }
1595
1596
brcmf_pcie_get_resource(struct brcmf_pciedev_info * devinfo)1597 static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo)
1598 {
1599 struct pci_dev *pdev = devinfo->pdev;
1600 struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
1601 int err;
1602 phys_addr_t bar0_addr, bar1_addr;
1603 ulong bar1_size;
1604
1605 err = pci_enable_device(pdev);
1606 if (err) {
1607 brcmf_err(bus, "pci_enable_device failed err=%d\n", err);
1608 return err;
1609 }
1610
1611 pci_set_master(pdev);
1612
1613 /* Bar-0 mapped address */
1614 bar0_addr = pci_resource_start(pdev, 0);
1615 /* Bar-1 mapped address */
1616 bar1_addr = pci_resource_start(pdev, 2);
1617 /* read Bar-1 mapped memory range */
1618 bar1_size = pci_resource_len(pdev, 2);
1619 if ((bar1_size == 0) || (bar1_addr == 0)) {
1620 brcmf_err(bus, "BAR1 Not enabled, device size=%ld, addr=%#016llx\n",
1621 bar1_size, (unsigned long long)bar1_addr);
1622 return -EINVAL;
1623 }
1624
1625 devinfo->regs = ioremap(bar0_addr, BRCMF_PCIE_REG_MAP_SIZE);
1626 devinfo->tcm = ioremap(bar1_addr, bar1_size);
1627
1628 if (!devinfo->regs || !devinfo->tcm) {
1629 brcmf_err(bus, "ioremap() failed (%p,%p)\n", devinfo->regs,
1630 devinfo->tcm);
1631 return -EINVAL;
1632 }
1633 brcmf_dbg(PCIE, "Phys addr : reg space = %p base addr %#016llx\n",
1634 devinfo->regs, (unsigned long long)bar0_addr);
1635 brcmf_dbg(PCIE, "Phys addr : mem space = %p base addr %#016llx size 0x%x\n",
1636 devinfo->tcm, (unsigned long long)bar1_addr,
1637 (unsigned int)bar1_size);
1638
1639 return 0;
1640 }
1641
1642
brcmf_pcie_release_resource(struct brcmf_pciedev_info * devinfo)1643 static void brcmf_pcie_release_resource(struct brcmf_pciedev_info *devinfo)
1644 {
1645 if (devinfo->tcm)
1646 iounmap(devinfo->tcm);
1647 if (devinfo->regs)
1648 iounmap(devinfo->regs);
1649
1650 pci_disable_device(devinfo->pdev);
1651 }
1652
1653
brcmf_pcie_buscore_prep_addr(const struct pci_dev * pdev,u32 addr)1654 static u32 brcmf_pcie_buscore_prep_addr(const struct pci_dev *pdev, u32 addr)
1655 {
1656 u32 ret_addr;
1657
1658 ret_addr = addr & (BRCMF_PCIE_BAR0_REG_SIZE - 1);
1659 addr &= ~(BRCMF_PCIE_BAR0_REG_SIZE - 1);
1660 pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, addr);
1661
1662 return ret_addr;
1663 }
1664
1665
brcmf_pcie_buscore_read32(void * ctx,u32 addr)1666 static u32 brcmf_pcie_buscore_read32(void *ctx, u32 addr)
1667 {
1668 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1669
1670 addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr);
1671 return brcmf_pcie_read_reg32(devinfo, addr);
1672 }
1673
1674
brcmf_pcie_buscore_write32(void * ctx,u32 addr,u32 value)1675 static void brcmf_pcie_buscore_write32(void *ctx, u32 addr, u32 value)
1676 {
1677 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1678
1679 addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr);
1680 brcmf_pcie_write_reg32(devinfo, addr, value);
1681 }
1682
1683
brcmf_pcie_buscoreprep(void * ctx)1684 static int brcmf_pcie_buscoreprep(void *ctx)
1685 {
1686 return brcmf_pcie_get_resource(ctx);
1687 }
1688
1689
brcmf_pcie_buscore_reset(void * ctx,struct brcmf_chip * chip)1690 static int brcmf_pcie_buscore_reset(void *ctx, struct brcmf_chip *chip)
1691 {
1692 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1693 u32 val;
1694
1695 devinfo->ci = chip;
1696 brcmf_pcie_reset_device(devinfo);
1697
1698 val = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
1699 if (val != 0xffffffff)
1700 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
1701 val);
1702
1703 return 0;
1704 }
1705
1706
brcmf_pcie_buscore_activate(void * ctx,struct brcmf_chip * chip,u32 rstvec)1707 static void brcmf_pcie_buscore_activate(void *ctx, struct brcmf_chip *chip,
1708 u32 rstvec)
1709 {
1710 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1711
1712 brcmf_pcie_write_tcm32(devinfo, 0, rstvec);
1713 }
1714
1715
1716 static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
1717 .prepare = brcmf_pcie_buscoreprep,
1718 .reset = brcmf_pcie_buscore_reset,
1719 .activate = brcmf_pcie_buscore_activate,
1720 .read32 = brcmf_pcie_buscore_read32,
1721 .write32 = brcmf_pcie_buscore_write32,
1722 };
1723
1724 #define BRCMF_PCIE_FW_CODE 0
1725 #define BRCMF_PCIE_FW_NVRAM 1
1726
brcmf_pcie_setup(struct device * dev,int ret,struct brcmf_fw_request * fwreq)1727 static void brcmf_pcie_setup(struct device *dev, int ret,
1728 struct brcmf_fw_request *fwreq)
1729 {
1730 const struct firmware *fw;
1731 void *nvram;
1732 struct brcmf_bus *bus;
1733 struct brcmf_pciedev *pcie_bus_dev;
1734 struct brcmf_pciedev_info *devinfo;
1735 struct brcmf_commonring **flowrings;
1736 u32 i, nvram_len;
1737
1738 /* check firmware loading result */
1739 if (ret)
1740 goto fail;
1741
1742 bus = dev_get_drvdata(dev);
1743 pcie_bus_dev = bus->bus_priv.pcie;
1744 devinfo = pcie_bus_dev->devinfo;
1745 brcmf_pcie_attach(devinfo);
1746
1747 fw = fwreq->items[BRCMF_PCIE_FW_CODE].binary;
1748 nvram = fwreq->items[BRCMF_PCIE_FW_NVRAM].nv_data.data;
1749 nvram_len = fwreq->items[BRCMF_PCIE_FW_NVRAM].nv_data.len;
1750 kfree(fwreq);
1751
1752 ret = brcmf_chip_get_raminfo(devinfo->ci);
1753 if (ret) {
1754 brcmf_err(bus, "Failed to get RAM info\n");
1755 release_firmware(fw);
1756 brcmf_fw_nvram_free(nvram);
1757 goto fail;
1758 }
1759
1760 /* Some of the firmwares have the size of the memory of the device
1761 * defined inside the firmware. This is because part of the memory in
1762 * the device is shared and the devision is determined by FW. Parse
1763 * the firmware and adjust the chip memory size now.
1764 */
1765 brcmf_pcie_adjust_ramsize(devinfo, (u8 *)fw->data, fw->size);
1766
1767 ret = brcmf_pcie_download_fw_nvram(devinfo, fw, nvram, nvram_len);
1768 if (ret)
1769 goto fail;
1770
1771 devinfo->state = BRCMFMAC_PCIE_STATE_UP;
1772
1773 ret = brcmf_pcie_init_ringbuffers(devinfo);
1774 if (ret)
1775 goto fail;
1776
1777 ret = brcmf_pcie_init_scratchbuffers(devinfo);
1778 if (ret)
1779 goto fail;
1780
1781 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
1782 ret = brcmf_pcie_request_irq(devinfo);
1783 if (ret)
1784 goto fail;
1785
1786 /* hook the commonrings in the bus structure. */
1787 for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++)
1788 bus->msgbuf->commonrings[i] =
1789 &devinfo->shared.commonrings[i]->commonring;
1790
1791 flowrings = kcalloc(devinfo->shared.max_flowrings, sizeof(*flowrings),
1792 GFP_KERNEL);
1793 if (!flowrings)
1794 goto fail;
1795
1796 for (i = 0; i < devinfo->shared.max_flowrings; i++)
1797 flowrings[i] = &devinfo->shared.flowrings[i].commonring;
1798 bus->msgbuf->flowrings = flowrings;
1799
1800 bus->msgbuf->rx_dataoffset = devinfo->shared.rx_dataoffset;
1801 bus->msgbuf->max_rxbufpost = devinfo->shared.max_rxbufpost;
1802 bus->msgbuf->max_flowrings = devinfo->shared.max_flowrings;
1803
1804 init_waitqueue_head(&devinfo->mbdata_resp_wait);
1805
1806 ret = brcmf_attach(&devinfo->pdev->dev);
1807 if (ret)
1808 goto fail;
1809
1810 brcmf_pcie_bus_console_read(devinfo, false);
1811
1812 return;
1813
1814 fail:
1815 device_release_driver(dev);
1816 }
1817
1818 static struct brcmf_fw_request *
brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info * devinfo)1819 brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo)
1820 {
1821 struct brcmf_fw_request *fwreq;
1822 struct brcmf_fw_name fwnames[] = {
1823 { ".bin", devinfo->fw_name },
1824 { ".txt", devinfo->nvram_name },
1825 };
1826
1827 fwreq = brcmf_fw_alloc_request(devinfo->ci->chip, devinfo->ci->chiprev,
1828 brcmf_pcie_fwnames,
1829 ARRAY_SIZE(brcmf_pcie_fwnames),
1830 fwnames, ARRAY_SIZE(fwnames));
1831 if (!fwreq)
1832 return NULL;
1833
1834 fwreq->items[BRCMF_PCIE_FW_CODE].type = BRCMF_FW_TYPE_BINARY;
1835 fwreq->items[BRCMF_PCIE_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM;
1836 fwreq->items[BRCMF_PCIE_FW_NVRAM].flags = BRCMF_FW_REQF_OPTIONAL;
1837 fwreq->board_type = devinfo->settings->board_type;
1838 /* NVRAM reserves PCI domain 0 for Broadcom's SDK faked bus */
1839 fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus) + 1;
1840 fwreq->bus_nr = devinfo->pdev->bus->number;
1841
1842 return fwreq;
1843 }
1844
1845 static int
brcmf_pcie_probe(struct pci_dev * pdev,const struct pci_device_id * id)1846 brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1847 {
1848 int ret;
1849 struct brcmf_fw_request *fwreq;
1850 struct brcmf_pciedev_info *devinfo;
1851 struct brcmf_pciedev *pcie_bus_dev;
1852 struct brcmf_bus *bus;
1853
1854 brcmf_dbg(PCIE, "Enter %x:%x\n", pdev->vendor, pdev->device);
1855
1856 ret = -ENOMEM;
1857 devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL);
1858 if (devinfo == NULL)
1859 return ret;
1860
1861 devinfo->pdev = pdev;
1862 pcie_bus_dev = NULL;
1863 devinfo->ci = brcmf_chip_attach(devinfo, &brcmf_pcie_buscore_ops);
1864 if (IS_ERR(devinfo->ci)) {
1865 ret = PTR_ERR(devinfo->ci);
1866 devinfo->ci = NULL;
1867 goto fail;
1868 }
1869
1870 pcie_bus_dev = kzalloc(sizeof(*pcie_bus_dev), GFP_KERNEL);
1871 if (pcie_bus_dev == NULL) {
1872 ret = -ENOMEM;
1873 goto fail;
1874 }
1875
1876 devinfo->settings = brcmf_get_module_param(&devinfo->pdev->dev,
1877 BRCMF_BUSTYPE_PCIE,
1878 devinfo->ci->chip,
1879 devinfo->ci->chiprev);
1880 if (!devinfo->settings) {
1881 ret = -ENOMEM;
1882 goto fail;
1883 }
1884
1885 bus = kzalloc(sizeof(*bus), GFP_KERNEL);
1886 if (!bus) {
1887 ret = -ENOMEM;
1888 goto fail;
1889 }
1890 bus->msgbuf = kzalloc(sizeof(*bus->msgbuf), GFP_KERNEL);
1891 if (!bus->msgbuf) {
1892 ret = -ENOMEM;
1893 kfree(bus);
1894 goto fail;
1895 }
1896
1897 /* hook it all together. */
1898 pcie_bus_dev->devinfo = devinfo;
1899 pcie_bus_dev->bus = bus;
1900 bus->dev = &pdev->dev;
1901 bus->bus_priv.pcie = pcie_bus_dev;
1902 bus->ops = &brcmf_pcie_bus_ops;
1903 bus->proto_type = BRCMF_PROTO_MSGBUF;
1904 bus->chip = devinfo->coreid;
1905 bus->wowl_supported = pci_pme_capable(pdev, PCI_D3hot);
1906 dev_set_drvdata(&pdev->dev, bus);
1907
1908 ret = brcmf_alloc(&devinfo->pdev->dev, devinfo->settings);
1909 if (ret)
1910 goto fail_bus;
1911
1912 fwreq = brcmf_pcie_prepare_fw_request(devinfo);
1913 if (!fwreq) {
1914 ret = -ENOMEM;
1915 goto fail_brcmf;
1916 }
1917
1918 ret = brcmf_fw_get_firmwares(bus->dev, fwreq, brcmf_pcie_setup);
1919 if (ret < 0) {
1920 kfree(fwreq);
1921 goto fail_brcmf;
1922 }
1923 return 0;
1924
1925 fail_brcmf:
1926 brcmf_free(&devinfo->pdev->dev);
1927 fail_bus:
1928 kfree(bus->msgbuf);
1929 kfree(bus);
1930 fail:
1931 brcmf_err(NULL, "failed %x:%x\n", pdev->vendor, pdev->device);
1932 brcmf_pcie_release_resource(devinfo);
1933 if (devinfo->ci)
1934 brcmf_chip_detach(devinfo->ci);
1935 if (devinfo->settings)
1936 brcmf_release_module_param(devinfo->settings);
1937 kfree(pcie_bus_dev);
1938 kfree(devinfo);
1939 return ret;
1940 }
1941
1942
1943 static void
brcmf_pcie_remove(struct pci_dev * pdev)1944 brcmf_pcie_remove(struct pci_dev *pdev)
1945 {
1946 struct brcmf_pciedev_info *devinfo;
1947 struct brcmf_bus *bus;
1948
1949 brcmf_dbg(PCIE, "Enter\n");
1950
1951 bus = dev_get_drvdata(&pdev->dev);
1952 if (bus == NULL)
1953 return;
1954
1955 devinfo = bus->bus_priv.pcie->devinfo;
1956
1957 devinfo->state = BRCMFMAC_PCIE_STATE_DOWN;
1958 if (devinfo->ci)
1959 brcmf_pcie_intr_disable(devinfo);
1960
1961 brcmf_detach(&pdev->dev);
1962 brcmf_free(&pdev->dev);
1963
1964 kfree(bus->bus_priv.pcie);
1965 kfree(bus->msgbuf->flowrings);
1966 kfree(bus->msgbuf);
1967 kfree(bus);
1968
1969 brcmf_pcie_release_irq(devinfo);
1970 brcmf_pcie_release_scratchbuffers(devinfo);
1971 brcmf_pcie_release_ringbuffers(devinfo);
1972 brcmf_pcie_reset_device(devinfo);
1973 brcmf_pcie_release_resource(devinfo);
1974
1975 if (devinfo->ci)
1976 brcmf_chip_detach(devinfo->ci);
1977 if (devinfo->settings)
1978 brcmf_release_module_param(devinfo->settings);
1979
1980 kfree(devinfo);
1981 dev_set_drvdata(&pdev->dev, NULL);
1982 }
1983
1984
1985 #ifdef CONFIG_PM
1986
1987
brcmf_pcie_pm_enter_D3(struct device * dev)1988 static int brcmf_pcie_pm_enter_D3(struct device *dev)
1989 {
1990 struct brcmf_pciedev_info *devinfo;
1991 struct brcmf_bus *bus;
1992
1993 brcmf_dbg(PCIE, "Enter\n");
1994
1995 bus = dev_get_drvdata(dev);
1996 devinfo = bus->bus_priv.pcie->devinfo;
1997
1998 brcmf_bus_change_state(bus, BRCMF_BUS_DOWN);
1999
2000 devinfo->mbdata_completed = false;
2001 brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D3_INFORM);
2002
2003 wait_event_timeout(devinfo->mbdata_resp_wait, devinfo->mbdata_completed,
2004 BRCMF_PCIE_MBDATA_TIMEOUT);
2005 if (!devinfo->mbdata_completed) {
2006 brcmf_err(bus, "Timeout on response for entering D3 substate\n");
2007 brcmf_bus_change_state(bus, BRCMF_BUS_UP);
2008 return -EIO;
2009 }
2010
2011 devinfo->state = BRCMFMAC_PCIE_STATE_DOWN;
2012
2013 return 0;
2014 }
2015
2016
brcmf_pcie_pm_leave_D3(struct device * dev)2017 static int brcmf_pcie_pm_leave_D3(struct device *dev)
2018 {
2019 struct brcmf_pciedev_info *devinfo;
2020 struct brcmf_bus *bus;
2021 struct pci_dev *pdev;
2022 int err;
2023
2024 brcmf_dbg(PCIE, "Enter\n");
2025
2026 bus = dev_get_drvdata(dev);
2027 devinfo = bus->bus_priv.pcie->devinfo;
2028 brcmf_dbg(PCIE, "Enter, dev=%p, bus=%p\n", dev, bus);
2029
2030 /* Check if device is still up and running, if so we are ready */
2031 if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_INTMASK) != 0) {
2032 brcmf_dbg(PCIE, "Try to wakeup device....\n");
2033 if (brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D0_INFORM))
2034 goto cleanup;
2035 brcmf_dbg(PCIE, "Hot resume, continue....\n");
2036 devinfo->state = BRCMFMAC_PCIE_STATE_UP;
2037 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
2038 brcmf_bus_change_state(bus, BRCMF_BUS_UP);
2039 brcmf_pcie_intr_enable(devinfo);
2040 brcmf_pcie_hostready(devinfo);
2041 return 0;
2042 }
2043
2044 cleanup:
2045 brcmf_chip_detach(devinfo->ci);
2046 devinfo->ci = NULL;
2047 pdev = devinfo->pdev;
2048 brcmf_pcie_remove(pdev);
2049
2050 err = brcmf_pcie_probe(pdev, NULL);
2051 if (err)
2052 __brcmf_err(NULL, __func__, "probe after resume failed, err=%d\n", err);
2053
2054 return err;
2055 }
2056
2057
2058 static const struct dev_pm_ops brcmf_pciedrvr_pm = {
2059 .suspend = brcmf_pcie_pm_enter_D3,
2060 .resume = brcmf_pcie_pm_leave_D3,
2061 .freeze = brcmf_pcie_pm_enter_D3,
2062 .restore = brcmf_pcie_pm_leave_D3,
2063 };
2064
2065
2066 #endif /* CONFIG_PM */
2067
2068
2069 #define BRCMF_PCIE_DEVICE(dev_id) { BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\
2070 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
2071 #define BRCMF_PCIE_DEVICE_SUB(dev_id, subvend, subdev) { \
2072 BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\
2073 subvend, subdev, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
2074
2075 static const struct pci_device_id brcmf_pcie_devid_table[] = {
2076 BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID),
2077 BRCMF_PCIE_DEVICE_SUB(0x4355, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4355),
2078 BRCMF_PCIE_DEVICE(BRCM_PCIE_4354_RAW_DEVICE_ID),
2079 BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
2080 BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
2081 BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
2082 BRCMF_PCIE_DEVICE(BRCM_PCIE_4358_DEVICE_ID),
2083 BRCMF_PCIE_DEVICE(BRCM_PCIE_4359_DEVICE_ID),
2084 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID),
2085 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID),
2086 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID),
2087 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID),
2088 BRCMF_PCIE_DEVICE(BRCM_PCIE_4364_DEVICE_ID),
2089 BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_DEVICE_ID),
2090 BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_2G_DEVICE_ID),
2091 BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_5G_DEVICE_ID),
2092 BRCMF_PCIE_DEVICE_SUB(0x4365, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4365),
2093 BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_DEVICE_ID),
2094 BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_2G_DEVICE_ID),
2095 BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_5G_DEVICE_ID),
2096 BRCMF_PCIE_DEVICE(BRCM_PCIE_4371_DEVICE_ID),
2097 { /* end: all zeroes */ }
2098 };
2099
2100
2101 MODULE_DEVICE_TABLE(pci, brcmf_pcie_devid_table);
2102
2103
2104 static struct pci_driver brcmf_pciedrvr = {
2105 .node = {},
2106 .name = KBUILD_MODNAME,
2107 .id_table = brcmf_pcie_devid_table,
2108 .probe = brcmf_pcie_probe,
2109 .remove = brcmf_pcie_remove,
2110 #ifdef CONFIG_PM
2111 .driver.pm = &brcmf_pciedrvr_pm,
2112 #endif
2113 .driver.coredump = brcmf_dev_coredump,
2114 };
2115
2116
brcmf_pcie_register(void)2117 int brcmf_pcie_register(void)
2118 {
2119 brcmf_dbg(PCIE, "Enter\n");
2120 return pci_register_driver(&brcmf_pciedrvr);
2121 }
2122
2123
brcmf_pcie_exit(void)2124 void brcmf_pcie_exit(void)
2125 {
2126 brcmf_dbg(PCIE, "Enter\n");
2127 pci_unregister_driver(&brcmf_pciedrvr);
2128 }
2129