1 /*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/kthread.h>
20 #include <linux/printk.h>
21 #include <linux/pci_ids.h>
22 #include <linux/netdevice.h>
23 #include <linux/interrupt.h>
24 #include <linux/sched.h>
25 #include <linux/mmc/sdio.h>
26 #include <linux/mmc/sdio_func.h>
27 #include <linux/mmc/card.h>
28 #include <linux/semaphore.h>
29 #include <linux/firmware.h>
30 #include <linux/module.h>
31 #include <linux/bcma/bcma.h>
32 #include <linux/debugfs.h>
33 #include <linux/vmalloc.h>
34 #include <linux/platform_data/brcmfmac-sdio.h>
35 #include <asm/unaligned.h>
36 #include <defs.h>
37 #include <brcmu_wifi.h>
38 #include <brcmu_utils.h>
39 #include <brcm_hw_ids.h>
40 #include <soc.h>
41 #include "sdio_host.h"
42 #include "sdio_chip.h"
43
44 #define DCMD_RESP_TIMEOUT 2000 /* In milli second */
45
46 #ifdef DEBUG
47
48 #define BRCMF_TRAP_INFO_SIZE 80
49
50 #define CBUF_LEN (128)
51
52 /* Device console log buffer state */
53 #define CONSOLE_BUFFER_MAX 2024
54
55 struct rte_log_le {
56 __le32 buf; /* Can't be pointer on (64-bit) hosts */
57 __le32 buf_size;
58 __le32 idx;
59 char *_buf_compat; /* Redundant pointer for backward compat. */
60 };
61
62 struct rte_console {
63 /* Virtual UART
64 * When there is no UART (e.g. Quickturn),
65 * the host should write a complete
66 * input line directly into cbuf and then write
67 * the length into vcons_in.
68 * This may also be used when there is a real UART
69 * (at risk of conflicting with
70 * the real UART). vcons_out is currently unused.
71 */
72 uint vcons_in;
73 uint vcons_out;
74
75 /* Output (logging) buffer
76 * Console output is written to a ring buffer log_buf at index log_idx.
77 * The host may read the output when it sees log_idx advance.
78 * Output will be lost if the output wraps around faster than the host
79 * polls.
80 */
81 struct rte_log_le log_le;
82
83 /* Console input line buffer
84 * Characters are read one at a time into cbuf
85 * until <CR> is received, then
86 * the buffer is processed as a command line.
87 * Also used for virtual UART.
88 */
89 uint cbuf_idx;
90 char cbuf[CBUF_LEN];
91 };
92
93 #endif /* DEBUG */
94 #include <chipcommon.h>
95
96 #include "dhd_bus.h"
97 #include "dhd_dbg.h"
98 #include "tracepoint.h"
99
100 #define TXQLEN 2048 /* bulk tx queue length */
101 #define TXHI (TXQLEN - 256) /* turn on flow control above TXHI */
102 #define TXLOW (TXHI - 256) /* turn off flow control below TXLOW */
103 #define PRIOMASK 7
104
105 #define TXRETRIES 2 /* # of retries for tx frames */
106
107 #define BRCMF_RXBOUND 50 /* Default for max rx frames in
108 one scheduling */
109
110 #define BRCMF_TXBOUND 20 /* Default for max tx frames in
111 one scheduling */
112
113 #define BRCMF_TXMINMAX 1 /* Max tx frames if rx still pending */
114
115 #define MEMBLOCK 2048 /* Block size used for downloading
116 of dongle image */
117 #define MAX_DATA_BUF (32 * 1024) /* Must be large enough to hold
118 biggest possible glom */
119
120 #define BRCMF_FIRSTREAD (1 << 6)
121
122
123 /* SBSDIO_DEVICE_CTL */
124
125 /* 1: device will assert busy signal when receiving CMD53 */
126 #define SBSDIO_DEVCTL_SETBUSY 0x01
127 /* 1: assertion of sdio interrupt is synchronous to the sdio clock */
128 #define SBSDIO_DEVCTL_SPI_INTR_SYNC 0x02
129 /* 1: mask all interrupts to host except the chipActive (rev 8) */
130 #define SBSDIO_DEVCTL_CA_INT_ONLY 0x04
131 /* 1: isolate internal sdio signals, put external pads in tri-state; requires
132 * sdio bus power cycle to clear (rev 9) */
133 #define SBSDIO_DEVCTL_PADS_ISO 0x08
134 /* Force SD->SB reset mapping (rev 11) */
135 #define SBSDIO_DEVCTL_SB_RST_CTL 0x30
136 /* Determined by CoreControl bit */
137 #define SBSDIO_DEVCTL_RST_CORECTL 0x00
138 /* Force backplane reset */
139 #define SBSDIO_DEVCTL_RST_BPRESET 0x10
140 /* Force no backplane reset */
141 #define SBSDIO_DEVCTL_RST_NOBPRESET 0x20
142
143 /* direct(mapped) cis space */
144
145 /* MAPPED common CIS address */
146 #define SBSDIO_CIS_BASE_COMMON 0x1000
147 /* maximum bytes in one CIS */
148 #define SBSDIO_CIS_SIZE_LIMIT 0x200
149 /* cis offset addr is < 17 bits */
150 #define SBSDIO_CIS_OFT_ADDR_MASK 0x1FFFF
151
152 /* manfid tuple length, include tuple, link bytes */
153 #define SBSDIO_CIS_MANFID_TUPLE_LEN 6
154
155 /* intstatus */
156 #define I_SMB_SW0 (1 << 0) /* To SB Mail S/W interrupt 0 */
157 #define I_SMB_SW1 (1 << 1) /* To SB Mail S/W interrupt 1 */
158 #define I_SMB_SW2 (1 << 2) /* To SB Mail S/W interrupt 2 */
159 #define I_SMB_SW3 (1 << 3) /* To SB Mail S/W interrupt 3 */
160 #define I_SMB_SW_MASK 0x0000000f /* To SB Mail S/W interrupts mask */
161 #define I_SMB_SW_SHIFT 0 /* To SB Mail S/W interrupts shift */
162 #define I_HMB_SW0 (1 << 4) /* To Host Mail S/W interrupt 0 */
163 #define I_HMB_SW1 (1 << 5) /* To Host Mail S/W interrupt 1 */
164 #define I_HMB_SW2 (1 << 6) /* To Host Mail S/W interrupt 2 */
165 #define I_HMB_SW3 (1 << 7) /* To Host Mail S/W interrupt 3 */
166 #define I_HMB_SW_MASK 0x000000f0 /* To Host Mail S/W interrupts mask */
167 #define I_HMB_SW_SHIFT 4 /* To Host Mail S/W interrupts shift */
168 #define I_WR_OOSYNC (1 << 8) /* Write Frame Out Of Sync */
169 #define I_RD_OOSYNC (1 << 9) /* Read Frame Out Of Sync */
170 #define I_PC (1 << 10) /* descriptor error */
171 #define I_PD (1 << 11) /* data error */
172 #define I_DE (1 << 12) /* Descriptor protocol Error */
173 #define I_RU (1 << 13) /* Receive descriptor Underflow */
174 #define I_RO (1 << 14) /* Receive fifo Overflow */
175 #define I_XU (1 << 15) /* Transmit fifo Underflow */
176 #define I_RI (1 << 16) /* Receive Interrupt */
177 #define I_BUSPWR (1 << 17) /* SDIO Bus Power Change (rev 9) */
178 #define I_XMTDATA_AVAIL (1 << 23) /* bits in fifo */
179 #define I_XI (1 << 24) /* Transmit Interrupt */
180 #define I_RF_TERM (1 << 25) /* Read Frame Terminate */
181 #define I_WF_TERM (1 << 26) /* Write Frame Terminate */
182 #define I_PCMCIA_XU (1 << 27) /* PCMCIA Transmit FIFO Underflow */
183 #define I_SBINT (1 << 28) /* sbintstatus Interrupt */
184 #define I_CHIPACTIVE (1 << 29) /* chip from doze to active state */
185 #define I_SRESET (1 << 30) /* CCCR RES interrupt */
186 #define I_IOE2 (1U << 31) /* CCCR IOE2 Bit Changed */
187 #define I_ERRORS (I_PC | I_PD | I_DE | I_RU | I_RO | I_XU)
188 #define I_DMA (I_RI | I_XI | I_ERRORS)
189
190 /* corecontrol */
191 #define CC_CISRDY (1 << 0) /* CIS Ready */
192 #define CC_BPRESEN (1 << 1) /* CCCR RES signal */
193 #define CC_F2RDY (1 << 2) /* set CCCR IOR2 bit */
194 #define CC_CLRPADSISO (1 << 3) /* clear SDIO pads isolation */
195 #define CC_XMTDATAAVAIL_MODE (1 << 4)
196 #define CC_XMTDATAAVAIL_CTRL (1 << 5)
197
198 /* SDA_FRAMECTRL */
199 #define SFC_RF_TERM (1 << 0) /* Read Frame Terminate */
200 #define SFC_WF_TERM (1 << 1) /* Write Frame Terminate */
201 #define SFC_CRC4WOOS (1 << 2) /* CRC error for write out of sync */
202 #define SFC_ABORTALL (1 << 3) /* Abort all in-progress frames */
203
204 /* HW frame tag */
205 #define SDPCM_FRAMETAG_LEN 4 /* 2 bytes len, 2 bytes check val */
206
207 /* Total length of frame header for dongle protocol */
208 #define SDPCM_HDRLEN (SDPCM_FRAMETAG_LEN + SDPCM_SWHEADER_LEN)
209 #define SDPCM_RESERVE (SDPCM_HDRLEN + BRCMF_SDALIGN)
210
211 /*
212 * Software allocation of To SB Mailbox resources
213 */
214
215 /* tosbmailbox bits corresponding to intstatus bits */
216 #define SMB_NAK (1 << 0) /* Frame NAK */
217 #define SMB_INT_ACK (1 << 1) /* Host Interrupt ACK */
218 #define SMB_USE_OOB (1 << 2) /* Use OOB Wakeup */
219 #define SMB_DEV_INT (1 << 3) /* Miscellaneous Interrupt */
220
221 /* tosbmailboxdata */
222 #define SMB_DATA_VERSION_SHIFT 16 /* host protocol version */
223
224 /*
225 * Software allocation of To Host Mailbox resources
226 */
227
228 /* intstatus bits */
229 #define I_HMB_FC_STATE I_HMB_SW0 /* Flow Control State */
230 #define I_HMB_FC_CHANGE I_HMB_SW1 /* Flow Control State Changed */
231 #define I_HMB_FRAME_IND I_HMB_SW2 /* Frame Indication */
232 #define I_HMB_HOST_INT I_HMB_SW3 /* Miscellaneous Interrupt */
233
234 /* tohostmailboxdata */
235 #define HMB_DATA_NAKHANDLED 1 /* retransmit NAK'd frame */
236 #define HMB_DATA_DEVREADY 2 /* talk to host after enable */
237 #define HMB_DATA_FC 4 /* per prio flowcontrol update flag */
238 #define HMB_DATA_FWREADY 8 /* fw ready for protocol activity */
239
240 #define HMB_DATA_FCDATA_MASK 0xff000000
241 #define HMB_DATA_FCDATA_SHIFT 24
242
243 #define HMB_DATA_VERSION_MASK 0x00ff0000
244 #define HMB_DATA_VERSION_SHIFT 16
245
246 /*
247 * Software-defined protocol header
248 */
249
250 /* Current protocol version */
251 #define SDPCM_PROT_VERSION 4
252
253 /* SW frame header */
254 #define SDPCM_PACKET_SEQUENCE(p) (((u8 *)p)[0] & 0xff)
255
256 #define SDPCM_CHANNEL_MASK 0x00000f00
257 #define SDPCM_CHANNEL_SHIFT 8
258 #define SDPCM_PACKET_CHANNEL(p) (((u8 *)p)[1] & 0x0f)
259
260 #define SDPCM_NEXTLEN_OFFSET 2
261
262 /* Data Offset from SOF (HW Tag, SW Tag, Pad) */
263 #define SDPCM_DOFFSET_OFFSET 3 /* Data Offset */
264 #define SDPCM_DOFFSET_VALUE(p) (((u8 *)p)[SDPCM_DOFFSET_OFFSET] & 0xff)
265 #define SDPCM_DOFFSET_MASK 0xff000000
266 #define SDPCM_DOFFSET_SHIFT 24
267 #define SDPCM_FCMASK_OFFSET 4 /* Flow control */
268 #define SDPCM_FCMASK_VALUE(p) (((u8 *)p)[SDPCM_FCMASK_OFFSET] & 0xff)
269 #define SDPCM_WINDOW_OFFSET 5 /* Credit based fc */
270 #define SDPCM_WINDOW_VALUE(p) (((u8 *)p)[SDPCM_WINDOW_OFFSET] & 0xff)
271
272 #define SDPCM_SWHEADER_LEN 8 /* SW header is 64 bits */
273
274 /* logical channel numbers */
275 #define SDPCM_CONTROL_CHANNEL 0 /* Control channel Id */
276 #define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication Channel Id */
277 #define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv Channel Id */
278 #define SDPCM_GLOM_CHANNEL 3 /* For coalesced packets */
279 #define SDPCM_TEST_CHANNEL 15 /* Reserved for test/debug packets */
280
281 #define SDPCM_SEQUENCE_WRAP 256 /* wrap-around val for 8bit frame seq */
282
283 #define SDPCM_GLOMDESC(p) (((u8 *)p)[1] & 0x80)
284
285 /*
286 * Shared structure between dongle and the host.
287 * The structure contains pointers to trap or assert information.
288 */
289 #define SDPCM_SHARED_VERSION 0x0003
290 #define SDPCM_SHARED_VERSION_MASK 0x00FF
291 #define SDPCM_SHARED_ASSERT_BUILT 0x0100
292 #define SDPCM_SHARED_ASSERT 0x0200
293 #define SDPCM_SHARED_TRAP 0x0400
294
295 /* Space for header read, limit for data packets */
296 #define MAX_HDR_READ (1 << 6)
297 #define MAX_RX_DATASZ 2048
298
299 /* Maximum milliseconds to wait for F2 to come up */
300 #define BRCMF_WAIT_F2RDY 3000
301
302 /* Bump up limit on waiting for HT to account for first startup;
303 * if the image is doing a CRC calculation before programming the PMU
304 * for HT availability, it could take a couple hundred ms more, so
305 * max out at a 1 second (1000000us).
306 */
307 #undef PMU_MAX_TRANSITION_DLY
308 #define PMU_MAX_TRANSITION_DLY 1000000
309
310 /* Value for ChipClockCSR during initial setup */
311 #define BRCMF_INIT_CLKCTL1 (SBSDIO_FORCE_HW_CLKREQ_OFF | \
312 SBSDIO_ALP_AVAIL_REQ)
313
314 /* Flags for SDH calls */
315 #define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
316
317 #define BRCMF_SDIO_FW_NAME "brcm/brcmfmac-sdio.bin"
318 #define BRCMF_SDIO_NV_NAME "brcm/brcmfmac-sdio.txt"
319 MODULE_FIRMWARE(BRCMF_SDIO_FW_NAME);
320 MODULE_FIRMWARE(BRCMF_SDIO_NV_NAME);
321
322 #define BRCMF_IDLE_IMMEDIATE (-1) /* Enter idle immediately */
323 #define BRCMF_IDLE_ACTIVE 0 /* Do not request any SD clock change
324 * when idle
325 */
326 #define BRCMF_IDLE_INTERVAL 1
327
328 #define KSO_WAIT_US 50
329 #define MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US)
330
331 /*
332 * Conversion of 802.1D priority to precedence level
333 */
prio2prec(u32 prio)334 static uint prio2prec(u32 prio)
335 {
336 return (prio == PRIO_8021D_NONE || prio == PRIO_8021D_BE) ?
337 (prio^2) : prio;
338 }
339
340 #ifdef DEBUG
341 /* Device console log buffer state */
342 struct brcmf_console {
343 uint count; /* Poll interval msec counter */
344 uint log_addr; /* Log struct address (fixed) */
345 struct rte_log_le log_le; /* Log struct (host copy) */
346 uint bufsize; /* Size of log buffer */
347 u8 *buf; /* Log buffer (host copy) */
348 uint last; /* Last buffer read index */
349 };
350
351 struct brcmf_trap_info {
352 __le32 type;
353 __le32 epc;
354 __le32 cpsr;
355 __le32 spsr;
356 __le32 r0; /* a1 */
357 __le32 r1; /* a2 */
358 __le32 r2; /* a3 */
359 __le32 r3; /* a4 */
360 __le32 r4; /* v1 */
361 __le32 r5; /* v2 */
362 __le32 r6; /* v3 */
363 __le32 r7; /* v4 */
364 __le32 r8; /* v5 */
365 __le32 r9; /* sb/v6 */
366 __le32 r10; /* sl/v7 */
367 __le32 r11; /* fp/v8 */
368 __le32 r12; /* ip */
369 __le32 r13; /* sp */
370 __le32 r14; /* lr */
371 __le32 pc; /* r15 */
372 };
373 #endif /* DEBUG */
374
375 struct sdpcm_shared {
376 u32 flags;
377 u32 trap_addr;
378 u32 assert_exp_addr;
379 u32 assert_file_addr;
380 u32 assert_line;
381 u32 console_addr; /* Address of struct rte_console */
382 u32 msgtrace_addr;
383 u8 tag[32];
384 u32 brpt_addr;
385 };
386
387 struct sdpcm_shared_le {
388 __le32 flags;
389 __le32 trap_addr;
390 __le32 assert_exp_addr;
391 __le32 assert_file_addr;
392 __le32 assert_line;
393 __le32 console_addr; /* Address of struct rte_console */
394 __le32 msgtrace_addr;
395 u8 tag[32];
396 __le32 brpt_addr;
397 };
398
399 /* SDIO read frame info */
400 struct brcmf_sdio_read {
401 u8 seq_num;
402 u8 channel;
403 u16 len;
404 u16 len_left;
405 u16 len_nxtfrm;
406 u8 dat_offset;
407 };
408
409 /* misc chip info needed by some of the routines */
410 /* Private data for SDIO bus interaction */
411 struct brcmf_sdio {
412 struct brcmf_sdio_dev *sdiodev; /* sdio device handler */
413 struct chip_info *ci; /* Chip info struct */
414 char *vars; /* Variables (from CIS and/or other) */
415 uint varsz; /* Size of variables buffer */
416
417 u32 ramsize; /* Size of RAM in SOCRAM (bytes) */
418
419 u32 hostintmask; /* Copy of Host Interrupt Mask */
420 atomic_t intstatus; /* Intstatus bits (events) pending */
421 atomic_t fcstate; /* State of dongle flow-control */
422
423 uint blocksize; /* Block size of SDIO transfers */
424 uint roundup; /* Max roundup limit */
425
426 struct pktq txq; /* Queue length used for flow-control */
427 u8 flowcontrol; /* per prio flow control bitmask */
428 u8 tx_seq; /* Transmit sequence number (next) */
429 u8 tx_max; /* Maximum transmit sequence allowed */
430
431 u8 hdrbuf[MAX_HDR_READ + BRCMF_SDALIGN];
432 u8 *rxhdr; /* Header of current rx frame (in hdrbuf) */
433 u8 rx_seq; /* Receive sequence number (expected) */
434 struct brcmf_sdio_read cur_read;
435 /* info of current read frame */
436 bool rxskip; /* Skip receive (awaiting NAK ACK) */
437 bool rxpending; /* Data frame pending in dongle */
438
439 uint rxbound; /* Rx frames to read before resched */
440 uint txbound; /* Tx frames to send before resched */
441 uint txminmax;
442
443 struct sk_buff *glomd; /* Packet containing glomming descriptor */
444 struct sk_buff_head glom; /* Packet list for glommed superframe */
445 uint glomerr; /* Glom packet read errors */
446
447 u8 *rxbuf; /* Buffer for receiving control packets */
448 uint rxblen; /* Allocated length of rxbuf */
449 u8 *rxctl; /* Aligned pointer into rxbuf */
450 u8 *rxctl_orig; /* pointer for freeing rxctl */
451 u8 *databuf; /* Buffer for receiving big glom packet */
452 u8 *dataptr; /* Aligned pointer into databuf */
453 uint rxlen; /* Length of valid data in buffer */
454 spinlock_t rxctl_lock; /* protection lock for ctrl frame resources */
455
456 u8 sdpcm_ver; /* Bus protocol reported by dongle */
457
458 bool intr; /* Use interrupts */
459 bool poll; /* Use polling */
460 atomic_t ipend; /* Device interrupt is pending */
461 uint spurious; /* Count of spurious interrupts */
462 uint pollrate; /* Ticks between device polls */
463 uint polltick; /* Tick counter */
464
465 #ifdef DEBUG
466 uint console_interval;
467 struct brcmf_console console; /* Console output polling support */
468 uint console_addr; /* Console address from shared struct */
469 #endif /* DEBUG */
470
471 uint clkstate; /* State of sd and backplane clock(s) */
472 bool activity; /* Activity flag for clock down */
473 s32 idletime; /* Control for activity timeout */
474 s32 idlecount; /* Activity timeout counter */
475 s32 idleclock; /* How to set bus driver when idle */
476 s32 sd_rxchain;
477 bool use_rxchain; /* If brcmf should use PKT chains */
478 bool rxflow_mode; /* Rx flow control mode */
479 bool rxflow; /* Is rx flow control on */
480 bool alp_only; /* Don't use HT clock (ALP only) */
481
482 u8 *ctrl_frame_buf;
483 u32 ctrl_frame_len;
484 bool ctrl_frame_stat;
485
486 spinlock_t txqlock;
487 wait_queue_head_t ctrl_wait;
488 wait_queue_head_t dcmd_resp_wait;
489
490 struct timer_list timer;
491 struct completion watchdog_wait;
492 struct task_struct *watchdog_tsk;
493 bool wd_timer_valid;
494 uint save_ms;
495
496 struct workqueue_struct *brcmf_wq;
497 struct work_struct datawork;
498 struct list_head dpc_tsklst;
499 spinlock_t dpc_tl_lock;
500
501 const struct firmware *firmware;
502 u32 fw_ptr;
503
504 bool txoff; /* Transmit flow-controlled */
505 struct brcmf_sdio_count sdcnt;
506 bool sr_enabled; /* SaveRestore enabled */
507 bool sleeping; /* SDIO bus sleeping */
508 };
509
510 /* clkstate */
511 #define CLK_NONE 0
512 #define CLK_SDONLY 1
513 #define CLK_PENDING 2
514 #define CLK_AVAIL 3
515
516 #ifdef DEBUG
517 static int qcount[NUMPRIO];
518 static int tx_packets[NUMPRIO];
519 #endif /* DEBUG */
520
521 #define DEFAULT_SDIO_DRIVE_STRENGTH 6 /* in milliamps */
522
523 #define RETRYCHAN(chan) ((chan) == SDPCM_EVENT_CHANNEL)
524
525 /* Retry count for register access failures */
526 static const uint retry_limit = 2;
527
528 /* Limit on rounding up frames */
529 static const uint max_roundup = 512;
530
531 #define ALIGNMENT 4
532
533 enum brcmf_sdio_frmtype {
534 BRCMF_SDIO_FT_NORMAL,
535 BRCMF_SDIO_FT_SUPER,
536 BRCMF_SDIO_FT_SUB,
537 };
538
pkt_align(struct sk_buff * p,int len,int align)539 static void pkt_align(struct sk_buff *p, int len, int align)
540 {
541 uint datalign;
542 datalign = (unsigned long)(p->data);
543 datalign = roundup(datalign, (align)) - datalign;
544 if (datalign)
545 skb_pull(p, datalign);
546 __skb_trim(p, len);
547 }
548
549 /* To check if there's window offered */
data_ok(struct brcmf_sdio * bus)550 static bool data_ok(struct brcmf_sdio *bus)
551 {
552 return (u8)(bus->tx_max - bus->tx_seq) != 0 &&
553 ((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0;
554 }
555
556 /*
557 * Reads a register in the SDIO hardware block. This block occupies a series of
558 * adresses on the 32 bit backplane bus.
559 */
560 static int
r_sdreg32(struct brcmf_sdio * bus,u32 * regvar,u32 offset)561 r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
562 {
563 u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
564 int ret;
565
566 *regvar = brcmf_sdio_regrl(bus->sdiodev,
567 bus->ci->c_inf[idx].base + offset, &ret);
568
569 return ret;
570 }
571
572 static int
w_sdreg32(struct brcmf_sdio * bus,u32 regval,u32 reg_offset)573 w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
574 {
575 u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
576 int ret;
577
578 brcmf_sdio_regwl(bus->sdiodev,
579 bus->ci->c_inf[idx].base + reg_offset,
580 regval, &ret);
581
582 return ret;
583 }
584
585 static int
brcmf_sdbrcm_kso_control(struct brcmf_sdio * bus,bool on)586 brcmf_sdbrcm_kso_control(struct brcmf_sdio *bus, bool on)
587 {
588 u8 wr_val = 0, rd_val, cmp_val, bmask;
589 int err = 0;
590 int try_cnt = 0;
591
592 brcmf_dbg(TRACE, "Enter\n");
593
594 wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
595 /* 1st KSO write goes to AOS wake up core if device is asleep */
596 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
597 wr_val, &err);
598 if (err) {
599 brcmf_err("SDIO_AOS KSO write error: %d\n", err);
600 return err;
601 }
602
603 if (on) {
604 /* device WAKEUP through KSO:
605 * write bit 0 & read back until
606 * both bits 0 (kso bit) & 1 (dev on status) are set
607 */
608 cmp_val = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK |
609 SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK;
610 bmask = cmp_val;
611 usleep_range(2000, 3000);
612 } else {
613 /* Put device to sleep, turn off KSO */
614 cmp_val = 0;
615 /* only check for bit0, bit1(dev on status) may not
616 * get cleared right away
617 */
618 bmask = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK;
619 }
620
621 do {
622 /* reliable KSO bit set/clr:
623 * the sdiod sleep write access is synced to PMU 32khz clk
624 * just one write attempt may fail,
625 * read it back until it matches written value
626 */
627 rd_val = brcmf_sdio_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
628 &err);
629 if (((rd_val & bmask) == cmp_val) && !err)
630 break;
631 brcmf_dbg(SDIO, "KSO wr/rd retry:%d (max: %d) ERR:%x\n",
632 try_cnt, MAX_KSO_ATTEMPTS, err);
633 udelay(KSO_WAIT_US);
634 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
635 wr_val, &err);
636 } while (try_cnt++ < MAX_KSO_ATTEMPTS);
637
638 return err;
639 }
640
641 #define PKT_AVAILABLE() (intstatus & I_HMB_FRAME_IND)
642
643 #define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE)
644
645 /* Turn backplane clock on or off */
brcmf_sdbrcm_htclk(struct brcmf_sdio * bus,bool on,bool pendok)646 static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
647 {
648 int err;
649 u8 clkctl, clkreq, devctl;
650 unsigned long timeout;
651
652 brcmf_dbg(SDIO, "Enter\n");
653
654 clkctl = 0;
655
656 if (bus->sr_enabled) {
657 bus->clkstate = (on ? CLK_AVAIL : CLK_SDONLY);
658 return 0;
659 }
660
661 if (on) {
662 /* Request HT Avail */
663 clkreq =
664 bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
665
666 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
667 clkreq, &err);
668 if (err) {
669 brcmf_err("HT Avail request error: %d\n", err);
670 return -EBADE;
671 }
672
673 /* Check current status */
674 clkctl = brcmf_sdio_regrb(bus->sdiodev,
675 SBSDIO_FUNC1_CHIPCLKCSR, &err);
676 if (err) {
677 brcmf_err("HT Avail read error: %d\n", err);
678 return -EBADE;
679 }
680
681 /* Go to pending and await interrupt if appropriate */
682 if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
683 /* Allow only clock-available interrupt */
684 devctl = brcmf_sdio_regrb(bus->sdiodev,
685 SBSDIO_DEVICE_CTL, &err);
686 if (err) {
687 brcmf_err("Devctl error setting CA: %d\n",
688 err);
689 return -EBADE;
690 }
691
692 devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
693 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
694 devctl, &err);
695 brcmf_dbg(SDIO, "CLKCTL: set PENDING\n");
696 bus->clkstate = CLK_PENDING;
697
698 return 0;
699 } else if (bus->clkstate == CLK_PENDING) {
700 /* Cancel CA-only interrupt filter */
701 devctl = brcmf_sdio_regrb(bus->sdiodev,
702 SBSDIO_DEVICE_CTL, &err);
703 devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
704 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
705 devctl, &err);
706 }
707
708 /* Otherwise, wait here (polling) for HT Avail */
709 timeout = jiffies +
710 msecs_to_jiffies(PMU_MAX_TRANSITION_DLY/1000);
711 while (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
712 clkctl = brcmf_sdio_regrb(bus->sdiodev,
713 SBSDIO_FUNC1_CHIPCLKCSR,
714 &err);
715 if (time_after(jiffies, timeout))
716 break;
717 else
718 usleep_range(5000, 10000);
719 }
720 if (err) {
721 brcmf_err("HT Avail request error: %d\n", err);
722 return -EBADE;
723 }
724 if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
725 brcmf_err("HT Avail timeout (%d): clkctl 0x%02x\n",
726 PMU_MAX_TRANSITION_DLY, clkctl);
727 return -EBADE;
728 }
729
730 /* Mark clock available */
731 bus->clkstate = CLK_AVAIL;
732 brcmf_dbg(SDIO, "CLKCTL: turned ON\n");
733
734 #if defined(DEBUG)
735 if (!bus->alp_only) {
736 if (SBSDIO_ALPONLY(clkctl))
737 brcmf_err("HT Clock should be on\n");
738 }
739 #endif /* defined (DEBUG) */
740
741 bus->activity = true;
742 } else {
743 clkreq = 0;
744
745 if (bus->clkstate == CLK_PENDING) {
746 /* Cancel CA-only interrupt filter */
747 devctl = brcmf_sdio_regrb(bus->sdiodev,
748 SBSDIO_DEVICE_CTL, &err);
749 devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
750 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
751 devctl, &err);
752 }
753
754 bus->clkstate = CLK_SDONLY;
755 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
756 clkreq, &err);
757 brcmf_dbg(SDIO, "CLKCTL: turned OFF\n");
758 if (err) {
759 brcmf_err("Failed access turning clock off: %d\n",
760 err);
761 return -EBADE;
762 }
763 }
764 return 0;
765 }
766
767 /* Change idle/active SD state */
brcmf_sdbrcm_sdclk(struct brcmf_sdio * bus,bool on)768 static int brcmf_sdbrcm_sdclk(struct brcmf_sdio *bus, bool on)
769 {
770 brcmf_dbg(SDIO, "Enter\n");
771
772 if (on)
773 bus->clkstate = CLK_SDONLY;
774 else
775 bus->clkstate = CLK_NONE;
776
777 return 0;
778 }
779
780 /* Transition SD and backplane clock readiness */
brcmf_sdbrcm_clkctl(struct brcmf_sdio * bus,uint target,bool pendok)781 static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
782 {
783 #ifdef DEBUG
784 uint oldstate = bus->clkstate;
785 #endif /* DEBUG */
786
787 brcmf_dbg(SDIO, "Enter\n");
788
789 /* Early exit if we're already there */
790 if (bus->clkstate == target) {
791 if (target == CLK_AVAIL) {
792 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
793 bus->activity = true;
794 }
795 return 0;
796 }
797
798 switch (target) {
799 case CLK_AVAIL:
800 /* Make sure SD clock is available */
801 if (bus->clkstate == CLK_NONE)
802 brcmf_sdbrcm_sdclk(bus, true);
803 /* Now request HT Avail on the backplane */
804 brcmf_sdbrcm_htclk(bus, true, pendok);
805 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
806 bus->activity = true;
807 break;
808
809 case CLK_SDONLY:
810 /* Remove HT request, or bring up SD clock */
811 if (bus->clkstate == CLK_NONE)
812 brcmf_sdbrcm_sdclk(bus, true);
813 else if (bus->clkstate == CLK_AVAIL)
814 brcmf_sdbrcm_htclk(bus, false, false);
815 else
816 brcmf_err("request for %d -> %d\n",
817 bus->clkstate, target);
818 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
819 break;
820
821 case CLK_NONE:
822 /* Make sure to remove HT request */
823 if (bus->clkstate == CLK_AVAIL)
824 brcmf_sdbrcm_htclk(bus, false, false);
825 /* Now remove the SD clock */
826 brcmf_sdbrcm_sdclk(bus, false);
827 brcmf_sdbrcm_wd_timer(bus, 0);
828 break;
829 }
830 #ifdef DEBUG
831 brcmf_dbg(SDIO, "%d -> %d\n", oldstate, bus->clkstate);
832 #endif /* DEBUG */
833
834 return 0;
835 }
836
837 static int
brcmf_sdbrcm_bus_sleep(struct brcmf_sdio * bus,bool sleep,bool pendok)838 brcmf_sdbrcm_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
839 {
840 int err = 0;
841 brcmf_dbg(TRACE, "Enter\n");
842 brcmf_dbg(SDIO, "request %s currently %s\n",
843 (sleep ? "SLEEP" : "WAKE"),
844 (bus->sleeping ? "SLEEP" : "WAKE"));
845
846 /* If SR is enabled control bus state with KSO */
847 if (bus->sr_enabled) {
848 /* Done if we're already in the requested state */
849 if (sleep == bus->sleeping)
850 goto end;
851
852 /* Going to sleep */
853 if (sleep) {
854 /* Don't sleep if something is pending */
855 if (atomic_read(&bus->intstatus) ||
856 atomic_read(&bus->ipend) > 0 ||
857 (!atomic_read(&bus->fcstate) &&
858 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
859 data_ok(bus)))
860 return -EBUSY;
861 err = brcmf_sdbrcm_kso_control(bus, false);
862 /* disable watchdog */
863 if (!err)
864 brcmf_sdbrcm_wd_timer(bus, 0);
865 } else {
866 bus->idlecount = 0;
867 err = brcmf_sdbrcm_kso_control(bus, true);
868 }
869 if (!err) {
870 /* Change state */
871 bus->sleeping = sleep;
872 brcmf_dbg(SDIO, "new state %s\n",
873 (sleep ? "SLEEP" : "WAKE"));
874 } else {
875 brcmf_err("error while changing bus sleep state %d\n",
876 err);
877 return err;
878 }
879 }
880
881 end:
882 /* control clocks */
883 if (sleep) {
884 if (!bus->sr_enabled)
885 brcmf_sdbrcm_clkctl(bus, CLK_NONE, pendok);
886 } else {
887 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, pendok);
888 }
889
890 return err;
891
892 }
893
brcmf_sdbrcm_hostmail(struct brcmf_sdio * bus)894 static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
895 {
896 u32 intstatus = 0;
897 u32 hmb_data;
898 u8 fcbits;
899 int ret;
900
901 brcmf_dbg(SDIO, "Enter\n");
902
903 /* Read mailbox data and ack that we did so */
904 ret = r_sdreg32(bus, &hmb_data,
905 offsetof(struct sdpcmd_regs, tohostmailboxdata));
906
907 if (ret == 0)
908 w_sdreg32(bus, SMB_INT_ACK,
909 offsetof(struct sdpcmd_regs, tosbmailbox));
910 bus->sdcnt.f1regdata += 2;
911
912 /* Dongle recomposed rx frames, accept them again */
913 if (hmb_data & HMB_DATA_NAKHANDLED) {
914 brcmf_dbg(SDIO, "Dongle reports NAK handled, expect rtx of %d\n",
915 bus->rx_seq);
916 if (!bus->rxskip)
917 brcmf_err("unexpected NAKHANDLED!\n");
918
919 bus->rxskip = false;
920 intstatus |= I_HMB_FRAME_IND;
921 }
922
923 /*
924 * DEVREADY does not occur with gSPI.
925 */
926 if (hmb_data & (HMB_DATA_DEVREADY | HMB_DATA_FWREADY)) {
927 bus->sdpcm_ver =
928 (hmb_data & HMB_DATA_VERSION_MASK) >>
929 HMB_DATA_VERSION_SHIFT;
930 if (bus->sdpcm_ver != SDPCM_PROT_VERSION)
931 brcmf_err("Version mismatch, dongle reports %d, "
932 "expecting %d\n",
933 bus->sdpcm_ver, SDPCM_PROT_VERSION);
934 else
935 brcmf_dbg(SDIO, "Dongle ready, protocol version %d\n",
936 bus->sdpcm_ver);
937 }
938
939 /*
940 * Flow Control has been moved into the RX headers and this out of band
941 * method isn't used any more.
942 * remaining backward compatible with older dongles.
943 */
944 if (hmb_data & HMB_DATA_FC) {
945 fcbits = (hmb_data & HMB_DATA_FCDATA_MASK) >>
946 HMB_DATA_FCDATA_SHIFT;
947
948 if (fcbits & ~bus->flowcontrol)
949 bus->sdcnt.fc_xoff++;
950
951 if (bus->flowcontrol & ~fcbits)
952 bus->sdcnt.fc_xon++;
953
954 bus->sdcnt.fc_rcvd++;
955 bus->flowcontrol = fcbits;
956 }
957
958 /* Shouldn't be any others */
959 if (hmb_data & ~(HMB_DATA_DEVREADY |
960 HMB_DATA_NAKHANDLED |
961 HMB_DATA_FC |
962 HMB_DATA_FWREADY |
963 HMB_DATA_FCDATA_MASK | HMB_DATA_VERSION_MASK))
964 brcmf_err("Unknown mailbox data content: 0x%02x\n",
965 hmb_data);
966
967 return intstatus;
968 }
969
brcmf_sdbrcm_rxfail(struct brcmf_sdio * bus,bool abort,bool rtx)970 static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
971 {
972 uint retries = 0;
973 u16 lastrbc;
974 u8 hi, lo;
975 int err;
976
977 brcmf_err("%sterminate frame%s\n",
978 abort ? "abort command, " : "",
979 rtx ? ", send NAK" : "");
980
981 if (abort)
982 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
983
984 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
985 SFC_RF_TERM, &err);
986 bus->sdcnt.f1regdata++;
987
988 /* Wait until the packet has been flushed (device/FIFO stable) */
989 for (lastrbc = retries = 0xffff; retries > 0; retries--) {
990 hi = brcmf_sdio_regrb(bus->sdiodev,
991 SBSDIO_FUNC1_RFRAMEBCHI, &err);
992 lo = brcmf_sdio_regrb(bus->sdiodev,
993 SBSDIO_FUNC1_RFRAMEBCLO, &err);
994 bus->sdcnt.f1regdata += 2;
995
996 if ((hi == 0) && (lo == 0))
997 break;
998
999 if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) {
1000 brcmf_err("count growing: last 0x%04x now 0x%04x\n",
1001 lastrbc, (hi << 8) + lo);
1002 }
1003 lastrbc = (hi << 8) + lo;
1004 }
1005
1006 if (!retries)
1007 brcmf_err("count never zeroed: last 0x%04x\n", lastrbc);
1008 else
1009 brcmf_dbg(SDIO, "flush took %d iterations\n", 0xffff - retries);
1010
1011 if (rtx) {
1012 bus->sdcnt.rxrtx++;
1013 err = w_sdreg32(bus, SMB_NAK,
1014 offsetof(struct sdpcmd_regs, tosbmailbox));
1015
1016 bus->sdcnt.f1regdata++;
1017 if (err == 0)
1018 bus->rxskip = true;
1019 }
1020
1021 /* Clear partial in any case */
1022 bus->cur_read.len = 0;
1023
1024 /* If we can't reach the device, signal failure */
1025 if (err)
1026 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
1027 }
1028
1029 /* copy a buffer into a pkt buffer chain */
brcmf_sdbrcm_glom_from_buf(struct brcmf_sdio * bus,uint len)1030 static uint brcmf_sdbrcm_glom_from_buf(struct brcmf_sdio *bus, uint len)
1031 {
1032 uint n, ret = 0;
1033 struct sk_buff *p;
1034 u8 *buf;
1035
1036 buf = bus->dataptr;
1037
1038 /* copy the data */
1039 skb_queue_walk(&bus->glom, p) {
1040 n = min_t(uint, p->len, len);
1041 memcpy(p->data, buf, n);
1042 buf += n;
1043 len -= n;
1044 ret += n;
1045 if (!len)
1046 break;
1047 }
1048
1049 return ret;
1050 }
1051
1052 /* return total length of buffer chain */
brcmf_sdbrcm_glom_len(struct brcmf_sdio * bus)1053 static uint brcmf_sdbrcm_glom_len(struct brcmf_sdio *bus)
1054 {
1055 struct sk_buff *p;
1056 uint total;
1057
1058 total = 0;
1059 skb_queue_walk(&bus->glom, p)
1060 total += p->len;
1061 return total;
1062 }
1063
brcmf_sdbrcm_free_glom(struct brcmf_sdio * bus)1064 static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
1065 {
1066 struct sk_buff *cur, *next;
1067
1068 skb_queue_walk_safe(&bus->glom, cur, next) {
1069 skb_unlink(cur, &bus->glom);
1070 brcmu_pkt_buf_free_skb(cur);
1071 }
1072 }
1073
brcmf_sdio_hdparser(struct brcmf_sdio * bus,u8 * header,struct brcmf_sdio_read * rd,enum brcmf_sdio_frmtype type)1074 static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
1075 struct brcmf_sdio_read *rd,
1076 enum brcmf_sdio_frmtype type)
1077 {
1078 u16 len, checksum;
1079 u8 rx_seq, fc, tx_seq_max;
1080
1081 /*
1082 * 4 bytes hardware header (frame tag)
1083 * Byte 0~1: Frame length
1084 * Byte 2~3: Checksum, bit-wise inverse of frame length
1085 */
1086 len = get_unaligned_le16(header);
1087 checksum = get_unaligned_le16(header + sizeof(u16));
1088 /* All zero means no more to read */
1089 if (!(len | checksum)) {
1090 bus->rxpending = false;
1091 return -ENODATA;
1092 }
1093 if ((u16)(~(len ^ checksum))) {
1094 brcmf_err("HW header checksum error\n");
1095 bus->sdcnt.rx_badhdr++;
1096 brcmf_sdbrcm_rxfail(bus, false, false);
1097 return -EIO;
1098 }
1099 if (len < SDPCM_HDRLEN) {
1100 brcmf_err("HW header length error\n");
1101 return -EPROTO;
1102 }
1103 if (type == BRCMF_SDIO_FT_SUPER &&
1104 (roundup(len, bus->blocksize) != rd->len)) {
1105 brcmf_err("HW superframe header length error\n");
1106 return -EPROTO;
1107 }
1108 if (type == BRCMF_SDIO_FT_SUB && len > rd->len) {
1109 brcmf_err("HW subframe header length error\n");
1110 return -EPROTO;
1111 }
1112 rd->len = len;
1113
1114 /*
1115 * 8 bytes hardware header
1116 * Byte 0: Rx sequence number
1117 * Byte 1: 4 MSB Channel number, 4 LSB arbitrary flag
1118 * Byte 2: Length of next data frame
1119 * Byte 3: Data offset
1120 * Byte 4: Flow control bits
1121 * Byte 5: Maximum Sequence number allow for Tx
1122 * Byte 6~7: Reserved
1123 */
1124 if (type == BRCMF_SDIO_FT_SUPER &&
1125 SDPCM_GLOMDESC(&header[SDPCM_FRAMETAG_LEN])) {
1126 brcmf_err("Glom descriptor found in superframe head\n");
1127 rd->len = 0;
1128 return -EINVAL;
1129 }
1130 rx_seq = SDPCM_PACKET_SEQUENCE(&header[SDPCM_FRAMETAG_LEN]);
1131 rd->channel = SDPCM_PACKET_CHANNEL(&header[SDPCM_FRAMETAG_LEN]);
1132 if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL &&
1133 type != BRCMF_SDIO_FT_SUPER) {
1134 brcmf_err("HW header length too long\n");
1135 bus->sdcnt.rx_toolong++;
1136 brcmf_sdbrcm_rxfail(bus, false, false);
1137 rd->len = 0;
1138 return -EPROTO;
1139 }
1140 if (type == BRCMF_SDIO_FT_SUPER && rd->channel != SDPCM_GLOM_CHANNEL) {
1141 brcmf_err("Wrong channel for superframe\n");
1142 rd->len = 0;
1143 return -EINVAL;
1144 }
1145 if (type == BRCMF_SDIO_FT_SUB && rd->channel != SDPCM_DATA_CHANNEL &&
1146 rd->channel != SDPCM_EVENT_CHANNEL) {
1147 brcmf_err("Wrong channel for subframe\n");
1148 rd->len = 0;
1149 return -EINVAL;
1150 }
1151 rd->dat_offset = SDPCM_DOFFSET_VALUE(&header[SDPCM_FRAMETAG_LEN]);
1152 if (rd->dat_offset < SDPCM_HDRLEN || rd->dat_offset > rd->len) {
1153 brcmf_err("seq %d: bad data offset\n", rx_seq);
1154 bus->sdcnt.rx_badhdr++;
1155 brcmf_sdbrcm_rxfail(bus, false, false);
1156 rd->len = 0;
1157 return -ENXIO;
1158 }
1159 if (rd->seq_num != rx_seq) {
1160 brcmf_err("seq %d: sequence number error, expect %d\n",
1161 rx_seq, rd->seq_num);
1162 bus->sdcnt.rx_badseq++;
1163 rd->seq_num = rx_seq;
1164 }
1165 /* no need to check the reset for subframe */
1166 if (type == BRCMF_SDIO_FT_SUB)
1167 return 0;
1168 rd->len_nxtfrm = header[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
1169 if (rd->len_nxtfrm << 4 > MAX_RX_DATASZ) {
1170 /* only warm for NON glom packet */
1171 if (rd->channel != SDPCM_GLOM_CHANNEL)
1172 brcmf_err("seq %d: next length error\n", rx_seq);
1173 rd->len_nxtfrm = 0;
1174 }
1175 fc = SDPCM_FCMASK_VALUE(&header[SDPCM_FRAMETAG_LEN]);
1176 if (bus->flowcontrol != fc) {
1177 if (~bus->flowcontrol & fc)
1178 bus->sdcnt.fc_xoff++;
1179 if (bus->flowcontrol & ~fc)
1180 bus->sdcnt.fc_xon++;
1181 bus->sdcnt.fc_rcvd++;
1182 bus->flowcontrol = fc;
1183 }
1184 tx_seq_max = SDPCM_WINDOW_VALUE(&header[SDPCM_FRAMETAG_LEN]);
1185 if ((u8)(tx_seq_max - bus->tx_seq) > 0x40) {
1186 brcmf_err("seq %d: max tx seq number error\n", rx_seq);
1187 tx_seq_max = bus->tx_seq + 2;
1188 }
1189 bus->tx_max = tx_seq_max;
1190
1191 return 0;
1192 }
1193
brcmf_sdbrcm_rxglom(struct brcmf_sdio * bus,u8 rxseq)1194 static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1195 {
1196 u16 dlen, totlen;
1197 u8 *dptr, num = 0;
1198
1199 u16 sublen;
1200 struct sk_buff *pfirst, *pnext;
1201
1202 int errcode;
1203 u8 doff, sfdoff;
1204
1205 bool usechain = bus->use_rxchain;
1206
1207 struct brcmf_sdio_read rd_new;
1208
1209 /* If packets, issue read(s) and send up packet chain */
1210 /* Return sequence numbers consumed? */
1211
1212 brcmf_dbg(SDIO, "start: glomd %p glom %p\n",
1213 bus->glomd, skb_peek(&bus->glom));
1214
1215 /* If there's a descriptor, generate the packet chain */
1216 if (bus->glomd) {
1217 pfirst = pnext = NULL;
1218 dlen = (u16) (bus->glomd->len);
1219 dptr = bus->glomd->data;
1220 if (!dlen || (dlen & 1)) {
1221 brcmf_err("bad glomd len(%d), ignore descriptor\n",
1222 dlen);
1223 dlen = 0;
1224 }
1225
1226 for (totlen = num = 0; dlen; num++) {
1227 /* Get (and move past) next length */
1228 sublen = get_unaligned_le16(dptr);
1229 dlen -= sizeof(u16);
1230 dptr += sizeof(u16);
1231 if ((sublen < SDPCM_HDRLEN) ||
1232 ((num == 0) && (sublen < (2 * SDPCM_HDRLEN)))) {
1233 brcmf_err("descriptor len %d bad: %d\n",
1234 num, sublen);
1235 pnext = NULL;
1236 break;
1237 }
1238 if (sublen % BRCMF_SDALIGN) {
1239 brcmf_err("sublen %d not multiple of %d\n",
1240 sublen, BRCMF_SDALIGN);
1241 usechain = false;
1242 }
1243 totlen += sublen;
1244
1245 /* For last frame, adjust read len so total
1246 is a block multiple */
1247 if (!dlen) {
1248 sublen +=
1249 (roundup(totlen, bus->blocksize) - totlen);
1250 totlen = roundup(totlen, bus->blocksize);
1251 }
1252
1253 /* Allocate/chain packet for next subframe */
1254 pnext = brcmu_pkt_buf_get_skb(sublen + BRCMF_SDALIGN);
1255 if (pnext == NULL) {
1256 brcmf_err("bcm_pkt_buf_get_skb failed, num %d len %d\n",
1257 num, sublen);
1258 break;
1259 }
1260 skb_queue_tail(&bus->glom, pnext);
1261
1262 /* Adhere to start alignment requirements */
1263 pkt_align(pnext, sublen, BRCMF_SDALIGN);
1264 }
1265
1266 /* If all allocations succeeded, save packet chain
1267 in bus structure */
1268 if (pnext) {
1269 brcmf_dbg(GLOM, "allocated %d-byte packet chain for %d subframes\n",
1270 totlen, num);
1271 if (BRCMF_GLOM_ON() && bus->cur_read.len &&
1272 totlen != bus->cur_read.len) {
1273 brcmf_dbg(GLOM, "glomdesc mismatch: nextlen %d glomdesc %d rxseq %d\n",
1274 bus->cur_read.len, totlen, rxseq);
1275 }
1276 pfirst = pnext = NULL;
1277 } else {
1278 brcmf_sdbrcm_free_glom(bus);
1279 num = 0;
1280 }
1281
1282 /* Done with descriptor packet */
1283 brcmu_pkt_buf_free_skb(bus->glomd);
1284 bus->glomd = NULL;
1285 bus->cur_read.len = 0;
1286 }
1287
1288 /* Ok -- either we just generated a packet chain,
1289 or had one from before */
1290 if (!skb_queue_empty(&bus->glom)) {
1291 if (BRCMF_GLOM_ON()) {
1292 brcmf_dbg(GLOM, "try superframe read, packet chain:\n");
1293 skb_queue_walk(&bus->glom, pnext) {
1294 brcmf_dbg(GLOM, " %p: %p len 0x%04x (%d)\n",
1295 pnext, (u8 *) (pnext->data),
1296 pnext->len, pnext->len);
1297 }
1298 }
1299
1300 pfirst = skb_peek(&bus->glom);
1301 dlen = (u16) brcmf_sdbrcm_glom_len(bus);
1302
1303 /* Do an SDIO read for the superframe. Configurable iovar to
1304 * read directly into the chained packet, or allocate a large
1305 * packet and and copy into the chain.
1306 */
1307 sdio_claim_host(bus->sdiodev->func[1]);
1308 if (usechain) {
1309 errcode = brcmf_sdcard_recv_chain(bus->sdiodev,
1310 bus->sdiodev->sbwad,
1311 SDIO_FUNC_2, F2SYNC, &bus->glom);
1312 } else if (bus->dataptr) {
1313 errcode = brcmf_sdcard_recv_buf(bus->sdiodev,
1314 bus->sdiodev->sbwad,
1315 SDIO_FUNC_2, F2SYNC,
1316 bus->dataptr, dlen);
1317 sublen = (u16) brcmf_sdbrcm_glom_from_buf(bus, dlen);
1318 if (sublen != dlen) {
1319 brcmf_err("FAILED TO COPY, dlen %d sublen %d\n",
1320 dlen, sublen);
1321 errcode = -1;
1322 }
1323 pnext = NULL;
1324 } else {
1325 brcmf_err("COULDN'T ALLOC %d-BYTE GLOM, FORCE FAILURE\n",
1326 dlen);
1327 errcode = -1;
1328 }
1329 sdio_release_host(bus->sdiodev->func[1]);
1330 bus->sdcnt.f2rxdata++;
1331
1332 /* On failure, kill the superframe, allow a couple retries */
1333 if (errcode < 0) {
1334 brcmf_err("glom read of %d bytes failed: %d\n",
1335 dlen, errcode);
1336
1337 sdio_claim_host(bus->sdiodev->func[1]);
1338 if (bus->glomerr++ < 3) {
1339 brcmf_sdbrcm_rxfail(bus, true, true);
1340 } else {
1341 bus->glomerr = 0;
1342 brcmf_sdbrcm_rxfail(bus, true, false);
1343 bus->sdcnt.rxglomfail++;
1344 brcmf_sdbrcm_free_glom(bus);
1345 }
1346 sdio_release_host(bus->sdiodev->func[1]);
1347 return 0;
1348 }
1349
1350 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1351 pfirst->data, min_t(int, pfirst->len, 48),
1352 "SUPERFRAME:\n");
1353
1354 rd_new.seq_num = rxseq;
1355 rd_new.len = dlen;
1356 sdio_claim_host(bus->sdiodev->func[1]);
1357 errcode = brcmf_sdio_hdparser(bus, pfirst->data, &rd_new,
1358 BRCMF_SDIO_FT_SUPER);
1359 sdio_release_host(bus->sdiodev->func[1]);
1360 bus->cur_read.len = rd_new.len_nxtfrm << 4;
1361
1362 /* Remove superframe header, remember offset */
1363 skb_pull(pfirst, rd_new.dat_offset);
1364 sfdoff = rd_new.dat_offset;
1365 num = 0;
1366
1367 /* Validate all the subframe headers */
1368 skb_queue_walk(&bus->glom, pnext) {
1369 /* leave when invalid subframe is found */
1370 if (errcode)
1371 break;
1372
1373 rd_new.len = pnext->len;
1374 rd_new.seq_num = rxseq++;
1375 sdio_claim_host(bus->sdiodev->func[1]);
1376 errcode = brcmf_sdio_hdparser(bus, pnext->data, &rd_new,
1377 BRCMF_SDIO_FT_SUB);
1378 sdio_release_host(bus->sdiodev->func[1]);
1379 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1380 pnext->data, 32, "subframe:\n");
1381
1382 num++;
1383 }
1384
1385 if (errcode) {
1386 /* Terminate frame on error, request
1387 a couple retries */
1388 sdio_claim_host(bus->sdiodev->func[1]);
1389 if (bus->glomerr++ < 3) {
1390 /* Restore superframe header space */
1391 skb_push(pfirst, sfdoff);
1392 brcmf_sdbrcm_rxfail(bus, true, true);
1393 } else {
1394 bus->glomerr = 0;
1395 brcmf_sdbrcm_rxfail(bus, true, false);
1396 bus->sdcnt.rxglomfail++;
1397 brcmf_sdbrcm_free_glom(bus);
1398 }
1399 sdio_release_host(bus->sdiodev->func[1]);
1400 bus->cur_read.len = 0;
1401 return 0;
1402 }
1403
1404 /* Basic SD framing looks ok - process each packet (header) */
1405
1406 skb_queue_walk_safe(&bus->glom, pfirst, pnext) {
1407 dptr = (u8 *) (pfirst->data);
1408 sublen = get_unaligned_le16(dptr);
1409 doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
1410
1411 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
1412 dptr, pfirst->len,
1413 "Rx Subframe Data:\n");
1414
1415 __skb_trim(pfirst, sublen);
1416 skb_pull(pfirst, doff);
1417
1418 if (pfirst->len == 0) {
1419 skb_unlink(pfirst, &bus->glom);
1420 brcmu_pkt_buf_free_skb(pfirst);
1421 continue;
1422 }
1423
1424 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1425 pfirst->data,
1426 min_t(int, pfirst->len, 32),
1427 "subframe %d to stack, %p (%p/%d) nxt/lnk %p/%p\n",
1428 bus->glom.qlen, pfirst, pfirst->data,
1429 pfirst->len, pfirst->next,
1430 pfirst->prev);
1431 }
1432 /* sent any remaining packets up */
1433 if (bus->glom.qlen)
1434 brcmf_rx_frames(bus->sdiodev->dev, &bus->glom);
1435
1436 bus->sdcnt.rxglomframes++;
1437 bus->sdcnt.rxglompkts += bus->glom.qlen;
1438 }
1439 return num;
1440 }
1441
brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_sdio * bus,uint * condition,bool * pending)1442 static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition,
1443 bool *pending)
1444 {
1445 DECLARE_WAITQUEUE(wait, current);
1446 int timeout = msecs_to_jiffies(DCMD_RESP_TIMEOUT);
1447
1448 /* Wait until control frame is available */
1449 add_wait_queue(&bus->dcmd_resp_wait, &wait);
1450 set_current_state(TASK_INTERRUPTIBLE);
1451
1452 while (!(*condition) && (!signal_pending(current) && timeout))
1453 timeout = schedule_timeout(timeout);
1454
1455 if (signal_pending(current))
1456 *pending = true;
1457
1458 set_current_state(TASK_RUNNING);
1459 remove_wait_queue(&bus->dcmd_resp_wait, &wait);
1460
1461 return timeout;
1462 }
1463
brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_sdio * bus)1464 static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_sdio *bus)
1465 {
1466 if (waitqueue_active(&bus->dcmd_resp_wait))
1467 wake_up_interruptible(&bus->dcmd_resp_wait);
1468
1469 return 0;
1470 }
1471 static void
brcmf_sdbrcm_read_control(struct brcmf_sdio * bus,u8 * hdr,uint len,uint doff)1472 brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
1473 {
1474 uint rdlen, pad;
1475 u8 *buf = NULL, *rbuf;
1476 int sdret;
1477
1478 brcmf_dbg(TRACE, "Enter\n");
1479
1480 if (bus->rxblen)
1481 buf = vzalloc(bus->rxblen);
1482 if (!buf)
1483 goto done;
1484
1485 rbuf = bus->rxbuf;
1486 pad = ((unsigned long)rbuf % BRCMF_SDALIGN);
1487 if (pad)
1488 rbuf += (BRCMF_SDALIGN - pad);
1489
1490 /* Copy the already-read portion over */
1491 memcpy(buf, hdr, BRCMF_FIRSTREAD);
1492 if (len <= BRCMF_FIRSTREAD)
1493 goto gotpkt;
1494
1495 /* Raise rdlen to next SDIO block to avoid tail command */
1496 rdlen = len - BRCMF_FIRSTREAD;
1497 if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
1498 pad = bus->blocksize - (rdlen % bus->blocksize);
1499 if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
1500 ((len + pad) < bus->sdiodev->bus_if->maxctl))
1501 rdlen += pad;
1502 } else if (rdlen % BRCMF_SDALIGN) {
1503 rdlen += BRCMF_SDALIGN - (rdlen % BRCMF_SDALIGN);
1504 }
1505
1506 /* Satisfy length-alignment requirements */
1507 if (rdlen & (ALIGNMENT - 1))
1508 rdlen = roundup(rdlen, ALIGNMENT);
1509
1510 /* Drop if the read is too big or it exceeds our maximum */
1511 if ((rdlen + BRCMF_FIRSTREAD) > bus->sdiodev->bus_if->maxctl) {
1512 brcmf_err("%d-byte control read exceeds %d-byte buffer\n",
1513 rdlen, bus->sdiodev->bus_if->maxctl);
1514 brcmf_sdbrcm_rxfail(bus, false, false);
1515 goto done;
1516 }
1517
1518 if ((len - doff) > bus->sdiodev->bus_if->maxctl) {
1519 brcmf_err("%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
1520 len, len - doff, bus->sdiodev->bus_if->maxctl);
1521 bus->sdcnt.rx_toolong++;
1522 brcmf_sdbrcm_rxfail(bus, false, false);
1523 goto done;
1524 }
1525
1526 /* Read remain of frame body */
1527 sdret = brcmf_sdcard_recv_buf(bus->sdiodev,
1528 bus->sdiodev->sbwad,
1529 SDIO_FUNC_2,
1530 F2SYNC, rbuf, rdlen);
1531 bus->sdcnt.f2rxdata++;
1532
1533 /* Control frame failures need retransmission */
1534 if (sdret < 0) {
1535 brcmf_err("read %d control bytes failed: %d\n",
1536 rdlen, sdret);
1537 bus->sdcnt.rxc_errors++;
1538 brcmf_sdbrcm_rxfail(bus, true, true);
1539 goto done;
1540 } else
1541 memcpy(buf + BRCMF_FIRSTREAD, rbuf, rdlen);
1542
1543 gotpkt:
1544
1545 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
1546 buf, len, "RxCtrl:\n");
1547
1548 /* Point to valid data and indicate its length */
1549 spin_lock_bh(&bus->rxctl_lock);
1550 if (bus->rxctl) {
1551 brcmf_err("last control frame is being processed.\n");
1552 spin_unlock_bh(&bus->rxctl_lock);
1553 vfree(buf);
1554 goto done;
1555 }
1556 bus->rxctl = buf + doff;
1557 bus->rxctl_orig = buf;
1558 bus->rxlen = len - doff;
1559 spin_unlock_bh(&bus->rxctl_lock);
1560
1561 done:
1562 /* Awake any waiters */
1563 brcmf_sdbrcm_dcmd_resp_wake(bus);
1564 }
1565
1566 /* Pad read to blocksize for efficiency */
brcmf_pad(struct brcmf_sdio * bus,u16 * pad,u16 * rdlen)1567 static void brcmf_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
1568 {
1569 if (bus->roundup && bus->blocksize && *rdlen > bus->blocksize) {
1570 *pad = bus->blocksize - (*rdlen % bus->blocksize);
1571 if (*pad <= bus->roundup && *pad < bus->blocksize &&
1572 *rdlen + *pad + BRCMF_FIRSTREAD < MAX_RX_DATASZ)
1573 *rdlen += *pad;
1574 } else if (*rdlen % BRCMF_SDALIGN) {
1575 *rdlen += BRCMF_SDALIGN - (*rdlen % BRCMF_SDALIGN);
1576 }
1577 }
1578
brcmf_sdio_readframes(struct brcmf_sdio * bus,uint maxframes)1579 static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
1580 {
1581 struct sk_buff *pkt; /* Packet for event or data frames */
1582 struct sk_buff_head pktlist; /* needed for bus interface */
1583 u16 pad; /* Number of pad bytes to read */
1584 uint rxleft = 0; /* Remaining number of frames allowed */
1585 int ret; /* Return code from calls */
1586 uint rxcount = 0; /* Total frames read */
1587 struct brcmf_sdio_read *rd = &bus->cur_read, rd_new;
1588 u8 head_read = 0;
1589
1590 brcmf_dbg(TRACE, "Enter\n");
1591
1592 /* Not finished unless we encounter no more frames indication */
1593 bus->rxpending = true;
1594
1595 for (rd->seq_num = bus->rx_seq, rxleft = maxframes;
1596 !bus->rxskip && rxleft &&
1597 bus->sdiodev->bus_if->state != BRCMF_BUS_DOWN;
1598 rd->seq_num++, rxleft--) {
1599
1600 /* Handle glomming separately */
1601 if (bus->glomd || !skb_queue_empty(&bus->glom)) {
1602 u8 cnt;
1603 brcmf_dbg(GLOM, "calling rxglom: glomd %p, glom %p\n",
1604 bus->glomd, skb_peek(&bus->glom));
1605 cnt = brcmf_sdbrcm_rxglom(bus, rd->seq_num);
1606 brcmf_dbg(GLOM, "rxglom returned %d\n", cnt);
1607 rd->seq_num += cnt - 1;
1608 rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1;
1609 continue;
1610 }
1611
1612 rd->len_left = rd->len;
1613 /* read header first for unknow frame length */
1614 sdio_claim_host(bus->sdiodev->func[1]);
1615 if (!rd->len) {
1616 ret = brcmf_sdcard_recv_buf(bus->sdiodev,
1617 bus->sdiodev->sbwad,
1618 SDIO_FUNC_2, F2SYNC,
1619 bus->rxhdr,
1620 BRCMF_FIRSTREAD);
1621 bus->sdcnt.f2rxhdrs++;
1622 if (ret < 0) {
1623 brcmf_err("RXHEADER FAILED: %d\n",
1624 ret);
1625 bus->sdcnt.rx_hdrfail++;
1626 brcmf_sdbrcm_rxfail(bus, true, true);
1627 sdio_release_host(bus->sdiodev->func[1]);
1628 continue;
1629 }
1630
1631 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() || BRCMF_HDRS_ON(),
1632 bus->rxhdr, SDPCM_HDRLEN,
1633 "RxHdr:\n");
1634
1635 if (brcmf_sdio_hdparser(bus, bus->rxhdr, rd,
1636 BRCMF_SDIO_FT_NORMAL)) {
1637 sdio_release_host(bus->sdiodev->func[1]);
1638 if (!bus->rxpending)
1639 break;
1640 else
1641 continue;
1642 }
1643
1644 if (rd->channel == SDPCM_CONTROL_CHANNEL) {
1645 brcmf_sdbrcm_read_control(bus, bus->rxhdr,
1646 rd->len,
1647 rd->dat_offset);
1648 /* prepare the descriptor for the next read */
1649 rd->len = rd->len_nxtfrm << 4;
1650 rd->len_nxtfrm = 0;
1651 /* treat all packet as event if we don't know */
1652 rd->channel = SDPCM_EVENT_CHANNEL;
1653 sdio_release_host(bus->sdiodev->func[1]);
1654 continue;
1655 }
1656 rd->len_left = rd->len > BRCMF_FIRSTREAD ?
1657 rd->len - BRCMF_FIRSTREAD : 0;
1658 head_read = BRCMF_FIRSTREAD;
1659 }
1660
1661 brcmf_pad(bus, &pad, &rd->len_left);
1662
1663 pkt = brcmu_pkt_buf_get_skb(rd->len_left + head_read +
1664 BRCMF_SDALIGN);
1665 if (!pkt) {
1666 /* Give up on data, request rtx of events */
1667 brcmf_err("brcmu_pkt_buf_get_skb failed\n");
1668 brcmf_sdbrcm_rxfail(bus, false,
1669 RETRYCHAN(rd->channel));
1670 sdio_release_host(bus->sdiodev->func[1]);
1671 continue;
1672 }
1673 skb_pull(pkt, head_read);
1674 pkt_align(pkt, rd->len_left, BRCMF_SDALIGN);
1675
1676 ret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
1677 SDIO_FUNC_2, F2SYNC, pkt);
1678 bus->sdcnt.f2rxdata++;
1679 sdio_release_host(bus->sdiodev->func[1]);
1680
1681 if (ret < 0) {
1682 brcmf_err("read %d bytes from channel %d failed: %d\n",
1683 rd->len, rd->channel, ret);
1684 brcmu_pkt_buf_free_skb(pkt);
1685 sdio_claim_host(bus->sdiodev->func[1]);
1686 brcmf_sdbrcm_rxfail(bus, true,
1687 RETRYCHAN(rd->channel));
1688 sdio_release_host(bus->sdiodev->func[1]);
1689 continue;
1690 }
1691
1692 if (head_read) {
1693 skb_push(pkt, head_read);
1694 memcpy(pkt->data, bus->rxhdr, head_read);
1695 head_read = 0;
1696 } else {
1697 memcpy(bus->rxhdr, pkt->data, SDPCM_HDRLEN);
1698 rd_new.seq_num = rd->seq_num;
1699 sdio_claim_host(bus->sdiodev->func[1]);
1700 if (brcmf_sdio_hdparser(bus, bus->rxhdr, &rd_new,
1701 BRCMF_SDIO_FT_NORMAL)) {
1702 rd->len = 0;
1703 brcmu_pkt_buf_free_skb(pkt);
1704 }
1705 bus->sdcnt.rx_readahead_cnt++;
1706 if (rd->len != roundup(rd_new.len, 16)) {
1707 brcmf_err("frame length mismatch:read %d, should be %d\n",
1708 rd->len,
1709 roundup(rd_new.len, 16) >> 4);
1710 rd->len = 0;
1711 brcmf_sdbrcm_rxfail(bus, true, true);
1712 sdio_release_host(bus->sdiodev->func[1]);
1713 brcmu_pkt_buf_free_skb(pkt);
1714 continue;
1715 }
1716 sdio_release_host(bus->sdiodev->func[1]);
1717 rd->len_nxtfrm = rd_new.len_nxtfrm;
1718 rd->channel = rd_new.channel;
1719 rd->dat_offset = rd_new.dat_offset;
1720
1721 brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() &&
1722 BRCMF_DATA_ON()) &&
1723 BRCMF_HDRS_ON(),
1724 bus->rxhdr, SDPCM_HDRLEN,
1725 "RxHdr:\n");
1726
1727 if (rd_new.channel == SDPCM_CONTROL_CHANNEL) {
1728 brcmf_err("readahead on control packet %d?\n",
1729 rd_new.seq_num);
1730 /* Force retry w/normal header read */
1731 rd->len = 0;
1732 sdio_claim_host(bus->sdiodev->func[1]);
1733 brcmf_sdbrcm_rxfail(bus, false, true);
1734 sdio_release_host(bus->sdiodev->func[1]);
1735 brcmu_pkt_buf_free_skb(pkt);
1736 continue;
1737 }
1738 }
1739
1740 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
1741 pkt->data, rd->len, "Rx Data:\n");
1742
1743 /* Save superframe descriptor and allocate packet frame */
1744 if (rd->channel == SDPCM_GLOM_CHANNEL) {
1745 if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) {
1746 brcmf_dbg(GLOM, "glom descriptor, %d bytes:\n",
1747 rd->len);
1748 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1749 pkt->data, rd->len,
1750 "Glom Data:\n");
1751 __skb_trim(pkt, rd->len);
1752 skb_pull(pkt, SDPCM_HDRLEN);
1753 bus->glomd = pkt;
1754 } else {
1755 brcmf_err("%s: glom superframe w/o "
1756 "descriptor!\n", __func__);
1757 sdio_claim_host(bus->sdiodev->func[1]);
1758 brcmf_sdbrcm_rxfail(bus, false, false);
1759 sdio_release_host(bus->sdiodev->func[1]);
1760 }
1761 /* prepare the descriptor for the next read */
1762 rd->len = rd->len_nxtfrm << 4;
1763 rd->len_nxtfrm = 0;
1764 /* treat all packet as event if we don't know */
1765 rd->channel = SDPCM_EVENT_CHANNEL;
1766 continue;
1767 }
1768
1769 /* Fill in packet len and prio, deliver upward */
1770 __skb_trim(pkt, rd->len);
1771 skb_pull(pkt, rd->dat_offset);
1772
1773 /* prepare the descriptor for the next read */
1774 rd->len = rd->len_nxtfrm << 4;
1775 rd->len_nxtfrm = 0;
1776 /* treat all packet as event if we don't know */
1777 rd->channel = SDPCM_EVENT_CHANNEL;
1778
1779 if (pkt->len == 0) {
1780 brcmu_pkt_buf_free_skb(pkt);
1781 continue;
1782 }
1783
1784 skb_queue_head_init(&pktlist);
1785 skb_queue_tail(&pktlist, pkt);
1786 brcmf_rx_frames(bus->sdiodev->dev, &pktlist);
1787 }
1788
1789 rxcount = maxframes - rxleft;
1790 /* Message if we hit the limit */
1791 if (!rxleft)
1792 brcmf_dbg(DATA, "hit rx limit of %d frames\n", maxframes);
1793 else
1794 brcmf_dbg(DATA, "processed %d frames\n", rxcount);
1795 /* Back off rxseq if awaiting rtx, update rx_seq */
1796 if (bus->rxskip)
1797 rd->seq_num--;
1798 bus->rx_seq = rd->seq_num;
1799
1800 return rxcount;
1801 }
1802
1803 static void
brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio * bus)1804 brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
1805 {
1806 if (waitqueue_active(&bus->ctrl_wait))
1807 wake_up_interruptible(&bus->ctrl_wait);
1808 return;
1809 }
1810
1811 /* Writes a HW/SW header into the packet and sends it. */
1812 /* Assumes: (a) header space already there, (b) caller holds lock */
brcmf_sdbrcm_txpkt(struct brcmf_sdio * bus,struct sk_buff * pkt,uint chan)1813 static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
1814 uint chan)
1815 {
1816 int ret;
1817 u8 *frame;
1818 u16 len, pad = 0;
1819 u32 swheader;
1820 int i;
1821
1822 brcmf_dbg(TRACE, "Enter\n");
1823
1824 frame = (u8 *) (pkt->data);
1825
1826 /* Add alignment padding, allocate new packet if needed */
1827 pad = ((unsigned long)frame % BRCMF_SDALIGN);
1828 if (pad) {
1829 if (skb_headroom(pkt) < pad) {
1830 brcmf_dbg(INFO, "insufficient headroom %d for %d pad\n",
1831 skb_headroom(pkt), pad);
1832 bus->sdiodev->bus_if->tx_realloc++;
1833 ret = skb_cow(pkt, BRCMF_SDALIGN);
1834 if (ret)
1835 goto done;
1836 pad = ((unsigned long)frame % BRCMF_SDALIGN);
1837 }
1838 skb_push(pkt, pad);
1839 frame = (u8 *) (pkt->data);
1840 memset(frame, 0, pad + SDPCM_HDRLEN);
1841 }
1842 /* precondition: pad < BRCMF_SDALIGN */
1843
1844 /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
1845 len = (u16) (pkt->len);
1846 *(__le16 *) frame = cpu_to_le16(len);
1847 *(((__le16 *) frame) + 1) = cpu_to_le16(~len);
1848
1849 /* Software tag: channel, sequence number, data offset */
1850 swheader =
1851 ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) | bus->tx_seq |
1852 (((pad +
1853 SDPCM_HDRLEN) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
1854
1855 *(((__le32 *) frame) + 1) = cpu_to_le32(swheader);
1856 *(((__le32 *) frame) + 2) = 0;
1857
1858 #ifdef DEBUG
1859 tx_packets[pkt->priority]++;
1860 #endif
1861
1862 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() &&
1863 ((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) ||
1864 (BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL)),
1865 frame, len, "Tx Frame:\n");
1866 brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() &&
1867 ((BRCMF_CTL_ON() &&
1868 chan == SDPCM_CONTROL_CHANNEL) ||
1869 (BRCMF_DATA_ON() &&
1870 chan != SDPCM_CONTROL_CHANNEL))) &&
1871 BRCMF_HDRS_ON(),
1872 frame, min_t(u16, len, 16), "TxHdr:\n");
1873
1874 /* Raise len to next SDIO block to eliminate tail command */
1875 if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
1876 u16 pad = bus->blocksize - (len % bus->blocksize);
1877 if ((pad <= bus->roundup) && (pad < bus->blocksize))
1878 len += pad;
1879 } else if (len % BRCMF_SDALIGN) {
1880 len += BRCMF_SDALIGN - (len % BRCMF_SDALIGN);
1881 }
1882
1883 /* Some controllers have trouble with odd bytes -- round to even */
1884 if (len & (ALIGNMENT - 1))
1885 len = roundup(len, ALIGNMENT);
1886
1887 sdio_claim_host(bus->sdiodev->func[1]);
1888 ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad,
1889 SDIO_FUNC_2, F2SYNC, pkt);
1890 bus->sdcnt.f2txdata++;
1891
1892 if (ret < 0) {
1893 /* On failure, abort the command and terminate the frame */
1894 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
1895 ret);
1896 bus->sdcnt.tx_sderrs++;
1897
1898 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
1899 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
1900 SFC_WF_TERM, NULL);
1901 bus->sdcnt.f1regdata++;
1902
1903 for (i = 0; i < 3; i++) {
1904 u8 hi, lo;
1905 hi = brcmf_sdio_regrb(bus->sdiodev,
1906 SBSDIO_FUNC1_WFRAMEBCHI, NULL);
1907 lo = brcmf_sdio_regrb(bus->sdiodev,
1908 SBSDIO_FUNC1_WFRAMEBCLO, NULL);
1909 bus->sdcnt.f1regdata += 2;
1910 if ((hi == 0) && (lo == 0))
1911 break;
1912 }
1913
1914 }
1915 sdio_release_host(bus->sdiodev->func[1]);
1916 if (ret == 0)
1917 bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
1918
1919 done:
1920 /* restore pkt buffer pointer before calling tx complete routine */
1921 skb_pull(pkt, SDPCM_HDRLEN + pad);
1922 brcmf_txcomplete(bus->sdiodev->dev, pkt, ret == 0);
1923 return ret;
1924 }
1925
brcmf_sdbrcm_sendfromq(struct brcmf_sdio * bus,uint maxframes)1926 static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
1927 {
1928 struct sk_buff *pkt;
1929 u32 intstatus = 0;
1930 int ret = 0, prec_out;
1931 uint cnt = 0;
1932 uint datalen;
1933 u8 tx_prec_map;
1934
1935 brcmf_dbg(TRACE, "Enter\n");
1936
1937 tx_prec_map = ~bus->flowcontrol;
1938
1939 /* Send frames until the limit or some other event */
1940 for (cnt = 0; (cnt < maxframes) && data_ok(bus); cnt++) {
1941 spin_lock_bh(&bus->txqlock);
1942 pkt = brcmu_pktq_mdeq(&bus->txq, tx_prec_map, &prec_out);
1943 if (pkt == NULL) {
1944 spin_unlock_bh(&bus->txqlock);
1945 break;
1946 }
1947 spin_unlock_bh(&bus->txqlock);
1948 datalen = pkt->len - SDPCM_HDRLEN;
1949
1950 ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL);
1951
1952 /* In poll mode, need to check for other events */
1953 if (!bus->intr && cnt) {
1954 /* Check device status, signal pending interrupt */
1955 sdio_claim_host(bus->sdiodev->func[1]);
1956 ret = r_sdreg32(bus, &intstatus,
1957 offsetof(struct sdpcmd_regs,
1958 intstatus));
1959 sdio_release_host(bus->sdiodev->func[1]);
1960 bus->sdcnt.f2txdata++;
1961 if (ret != 0)
1962 break;
1963 if (intstatus & bus->hostintmask)
1964 atomic_set(&bus->ipend, 1);
1965 }
1966 }
1967
1968 /* Deflow-control stack if needed */
1969 if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DATA) &&
1970 bus->txoff && (pktq_len(&bus->txq) < TXLOW)) {
1971 bus->txoff = false;
1972 brcmf_txflowblock(bus->sdiodev->dev, false);
1973 }
1974
1975 return cnt;
1976 }
1977
brcmf_sdbrcm_bus_stop(struct device * dev)1978 static void brcmf_sdbrcm_bus_stop(struct device *dev)
1979 {
1980 u32 local_hostintmask;
1981 u8 saveclk;
1982 int err;
1983 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1984 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
1985 struct brcmf_sdio *bus = sdiodev->bus;
1986
1987 brcmf_dbg(TRACE, "Enter\n");
1988
1989 if (bus->watchdog_tsk) {
1990 send_sig(SIGTERM, bus->watchdog_tsk, 1);
1991 kthread_stop(bus->watchdog_tsk);
1992 bus->watchdog_tsk = NULL;
1993 }
1994
1995 sdio_claim_host(bus->sdiodev->func[1]);
1996
1997 /* Enable clock for device interrupts */
1998 brcmf_sdbrcm_bus_sleep(bus, false, false);
1999
2000 /* Disable and clear interrupts at the chip level also */
2001 w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask));
2002 local_hostintmask = bus->hostintmask;
2003 bus->hostintmask = 0;
2004
2005 /* Change our idea of bus state */
2006 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2007
2008 /* Force clocks on backplane to be sure F2 interrupt propagates */
2009 saveclk = brcmf_sdio_regrb(bus->sdiodev,
2010 SBSDIO_FUNC1_CHIPCLKCSR, &err);
2011 if (!err) {
2012 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
2013 (saveclk | SBSDIO_FORCE_HT), &err);
2014 }
2015 if (err)
2016 brcmf_err("Failed to force clock for F2: err %d\n", err);
2017
2018 /* Turn off the bus (F2), free any pending packets */
2019 brcmf_dbg(INTR, "disable SDIO interrupts\n");
2020 brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx, SDIO_FUNC_ENABLE_1,
2021 NULL);
2022
2023 /* Clear any pending interrupts now that F2 is disabled */
2024 w_sdreg32(bus, local_hostintmask,
2025 offsetof(struct sdpcmd_regs, intstatus));
2026
2027 /* Turn off the backplane clock (only) */
2028 brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
2029 sdio_release_host(bus->sdiodev->func[1]);
2030
2031 /* Clear the data packet queues */
2032 brcmu_pktq_flush(&bus->txq, true, NULL, NULL);
2033
2034 /* Clear any held glomming stuff */
2035 if (bus->glomd)
2036 brcmu_pkt_buf_free_skb(bus->glomd);
2037 brcmf_sdbrcm_free_glom(bus);
2038
2039 /* Clear rx control and wake any waiters */
2040 spin_lock_bh(&bus->rxctl_lock);
2041 bus->rxlen = 0;
2042 spin_unlock_bh(&bus->rxctl_lock);
2043 brcmf_sdbrcm_dcmd_resp_wake(bus);
2044
2045 /* Reset some F2 state stuff */
2046 bus->rxskip = false;
2047 bus->tx_seq = bus->rx_seq = 0;
2048 }
2049
brcmf_sdbrcm_clrintr(struct brcmf_sdio * bus)2050 static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus)
2051 {
2052 unsigned long flags;
2053
2054 if (bus->sdiodev->oob_irq_requested) {
2055 spin_lock_irqsave(&bus->sdiodev->irq_en_lock, flags);
2056 if (!bus->sdiodev->irq_en && !atomic_read(&bus->ipend)) {
2057 enable_irq(bus->sdiodev->pdata->oob_irq_nr);
2058 bus->sdiodev->irq_en = true;
2059 }
2060 spin_unlock_irqrestore(&bus->sdiodev->irq_en_lock, flags);
2061 }
2062 }
2063
brcmf_sdbrcm_adddpctsk(struct brcmf_sdio * bus)2064 static inline void brcmf_sdbrcm_adddpctsk(struct brcmf_sdio *bus)
2065 {
2066 struct list_head *new_hd;
2067 unsigned long flags;
2068
2069 if (in_interrupt())
2070 new_hd = kzalloc(sizeof(struct list_head), GFP_ATOMIC);
2071 else
2072 new_hd = kzalloc(sizeof(struct list_head), GFP_KERNEL);
2073 if (new_hd == NULL)
2074 return;
2075
2076 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
2077 list_add_tail(new_hd, &bus->dpc_tsklst);
2078 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2079 }
2080
brcmf_sdio_intr_rstatus(struct brcmf_sdio * bus)2081 static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
2082 {
2083 u8 idx;
2084 u32 addr;
2085 unsigned long val;
2086 int n, ret;
2087
2088 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
2089 addr = bus->ci->c_inf[idx].base +
2090 offsetof(struct sdpcmd_regs, intstatus);
2091
2092 ret = brcmf_sdio_regrw_helper(bus->sdiodev, addr, &val, false);
2093 bus->sdcnt.f1regdata++;
2094 if (ret != 0)
2095 val = 0;
2096
2097 val &= bus->hostintmask;
2098 atomic_set(&bus->fcstate, !!(val & I_HMB_FC_STATE));
2099
2100 /* Clear interrupts */
2101 if (val) {
2102 ret = brcmf_sdio_regrw_helper(bus->sdiodev, addr, &val, true);
2103 bus->sdcnt.f1regdata++;
2104 }
2105
2106 if (ret) {
2107 atomic_set(&bus->intstatus, 0);
2108 } else if (val) {
2109 for_each_set_bit(n, &val, 32)
2110 set_bit(n, (unsigned long *)&bus->intstatus.counter);
2111 }
2112
2113 return ret;
2114 }
2115
brcmf_sdbrcm_dpc(struct brcmf_sdio * bus)2116 static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2117 {
2118 u32 newstatus = 0;
2119 unsigned long intstatus;
2120 uint rxlimit = bus->rxbound; /* Rx frames to read before resched */
2121 uint txlimit = bus->txbound; /* Tx frames to send before resched */
2122 uint framecnt = 0; /* Temporary counter of tx/rx frames */
2123 int err = 0, n;
2124
2125 brcmf_dbg(TRACE, "Enter\n");
2126
2127 sdio_claim_host(bus->sdiodev->func[1]);
2128
2129 /* If waiting for HTAVAIL, check status */
2130 if (!bus->sr_enabled && bus->clkstate == CLK_PENDING) {
2131 u8 clkctl, devctl = 0;
2132
2133 #ifdef DEBUG
2134 /* Check for inconsistent device control */
2135 devctl = brcmf_sdio_regrb(bus->sdiodev,
2136 SBSDIO_DEVICE_CTL, &err);
2137 if (err) {
2138 brcmf_err("error reading DEVCTL: %d\n", err);
2139 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2140 }
2141 #endif /* DEBUG */
2142
2143 /* Read CSR, if clock on switch to AVAIL, else ignore */
2144 clkctl = brcmf_sdio_regrb(bus->sdiodev,
2145 SBSDIO_FUNC1_CHIPCLKCSR, &err);
2146 if (err) {
2147 brcmf_err("error reading CSR: %d\n",
2148 err);
2149 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2150 }
2151
2152 brcmf_dbg(SDIO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n",
2153 devctl, clkctl);
2154
2155 if (SBSDIO_HTAV(clkctl)) {
2156 devctl = brcmf_sdio_regrb(bus->sdiodev,
2157 SBSDIO_DEVICE_CTL, &err);
2158 if (err) {
2159 brcmf_err("error reading DEVCTL: %d\n",
2160 err);
2161 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2162 }
2163 devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
2164 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
2165 devctl, &err);
2166 if (err) {
2167 brcmf_err("error writing DEVCTL: %d\n",
2168 err);
2169 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2170 }
2171 bus->clkstate = CLK_AVAIL;
2172 }
2173 }
2174
2175 /* Make sure backplane clock is on */
2176 brcmf_sdbrcm_bus_sleep(bus, false, true);
2177
2178 /* Pending interrupt indicates new device status */
2179 if (atomic_read(&bus->ipend) > 0) {
2180 atomic_set(&bus->ipend, 0);
2181 err = brcmf_sdio_intr_rstatus(bus);
2182 }
2183
2184 /* Start with leftover status bits */
2185 intstatus = atomic_xchg(&bus->intstatus, 0);
2186
2187 /* Handle flow-control change: read new state in case our ack
2188 * crossed another change interrupt. If change still set, assume
2189 * FC ON for safety, let next loop through do the debounce.
2190 */
2191 if (intstatus & I_HMB_FC_CHANGE) {
2192 intstatus &= ~I_HMB_FC_CHANGE;
2193 err = w_sdreg32(bus, I_HMB_FC_CHANGE,
2194 offsetof(struct sdpcmd_regs, intstatus));
2195
2196 err = r_sdreg32(bus, &newstatus,
2197 offsetof(struct sdpcmd_regs, intstatus));
2198 bus->sdcnt.f1regdata += 2;
2199 atomic_set(&bus->fcstate,
2200 !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE)));
2201 intstatus |= (newstatus & bus->hostintmask);
2202 }
2203
2204 /* Handle host mailbox indication */
2205 if (intstatus & I_HMB_HOST_INT) {
2206 intstatus &= ~I_HMB_HOST_INT;
2207 intstatus |= brcmf_sdbrcm_hostmail(bus);
2208 }
2209
2210 sdio_release_host(bus->sdiodev->func[1]);
2211
2212 /* Generally don't ask for these, can get CRC errors... */
2213 if (intstatus & I_WR_OOSYNC) {
2214 brcmf_err("Dongle reports WR_OOSYNC\n");
2215 intstatus &= ~I_WR_OOSYNC;
2216 }
2217
2218 if (intstatus & I_RD_OOSYNC) {
2219 brcmf_err("Dongle reports RD_OOSYNC\n");
2220 intstatus &= ~I_RD_OOSYNC;
2221 }
2222
2223 if (intstatus & I_SBINT) {
2224 brcmf_err("Dongle reports SBINT\n");
2225 intstatus &= ~I_SBINT;
2226 }
2227
2228 /* Would be active due to wake-wlan in gSPI */
2229 if (intstatus & I_CHIPACTIVE) {
2230 brcmf_dbg(INFO, "Dongle reports CHIPACTIVE\n");
2231 intstatus &= ~I_CHIPACTIVE;
2232 }
2233
2234 /* Ignore frame indications if rxskip is set */
2235 if (bus->rxskip)
2236 intstatus &= ~I_HMB_FRAME_IND;
2237
2238 /* On frame indication, read available frames */
2239 if (PKT_AVAILABLE() && bus->clkstate == CLK_AVAIL) {
2240 framecnt = brcmf_sdio_readframes(bus, rxlimit);
2241 if (!bus->rxpending)
2242 intstatus &= ~I_HMB_FRAME_IND;
2243 rxlimit -= min(framecnt, rxlimit);
2244 }
2245
2246 /* Keep still-pending events for next scheduling */
2247 if (intstatus) {
2248 for_each_set_bit(n, &intstatus, 32)
2249 set_bit(n, (unsigned long *)&bus->intstatus.counter);
2250 }
2251
2252 brcmf_sdbrcm_clrintr(bus);
2253
2254 if (data_ok(bus) && bus->ctrl_frame_stat &&
2255 (bus->clkstate == CLK_AVAIL)) {
2256 int i;
2257
2258 sdio_claim_host(bus->sdiodev->func[1]);
2259 err = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
2260 SDIO_FUNC_2, F2SYNC, bus->ctrl_frame_buf,
2261 (u32) bus->ctrl_frame_len);
2262
2263 if (err < 0) {
2264 /* On failure, abort the command and
2265 terminate the frame */
2266 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
2267 err);
2268 bus->sdcnt.tx_sderrs++;
2269
2270 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
2271
2272 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
2273 SFC_WF_TERM, &err);
2274 bus->sdcnt.f1regdata++;
2275
2276 for (i = 0; i < 3; i++) {
2277 u8 hi, lo;
2278 hi = brcmf_sdio_regrb(bus->sdiodev,
2279 SBSDIO_FUNC1_WFRAMEBCHI,
2280 &err);
2281 lo = brcmf_sdio_regrb(bus->sdiodev,
2282 SBSDIO_FUNC1_WFRAMEBCLO,
2283 &err);
2284 bus->sdcnt.f1regdata += 2;
2285 if ((hi == 0) && (lo == 0))
2286 break;
2287 }
2288
2289 } else {
2290 bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
2291 }
2292 sdio_release_host(bus->sdiodev->func[1]);
2293 bus->ctrl_frame_stat = false;
2294 brcmf_sdbrcm_wait_event_wakeup(bus);
2295 }
2296 /* Send queued frames (limit 1 if rx may still be pending) */
2297 else if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) &&
2298 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit
2299 && data_ok(bus)) {
2300 framecnt = bus->rxpending ? min(txlimit, bus->txminmax) :
2301 txlimit;
2302 framecnt = brcmf_sdbrcm_sendfromq(bus, framecnt);
2303 txlimit -= framecnt;
2304 }
2305
2306 if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) || (err != 0)) {
2307 brcmf_err("failed backplane access over SDIO, halting operation\n");
2308 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2309 atomic_set(&bus->intstatus, 0);
2310 } else if (atomic_read(&bus->intstatus) ||
2311 atomic_read(&bus->ipend) > 0 ||
2312 (!atomic_read(&bus->fcstate) &&
2313 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
2314 data_ok(bus)) || PKT_AVAILABLE()) {
2315 brcmf_sdbrcm_adddpctsk(bus);
2316 }
2317
2318 /* If we're done for now, turn off clock request. */
2319 if ((bus->clkstate != CLK_PENDING)
2320 && bus->idletime == BRCMF_IDLE_IMMEDIATE) {
2321 bus->activity = false;
2322 brcmf_dbg(SDIO, "idle state\n");
2323 sdio_claim_host(bus->sdiodev->func[1]);
2324 brcmf_sdbrcm_bus_sleep(bus, true, false);
2325 sdio_release_host(bus->sdiodev->func[1]);
2326 }
2327 }
2328
brcmf_sdbrcm_bus_gettxq(struct device * dev)2329 static struct pktq *brcmf_sdbrcm_bus_gettxq(struct device *dev)
2330 {
2331 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2332 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2333 struct brcmf_sdio *bus = sdiodev->bus;
2334
2335 return &bus->txq;
2336 }
2337
brcmf_sdbrcm_bus_txdata(struct device * dev,struct sk_buff * pkt)2338 static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
2339 {
2340 int ret = -EBADE;
2341 uint datalen, prec;
2342 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2343 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2344 struct brcmf_sdio *bus = sdiodev->bus;
2345 unsigned long flags;
2346
2347 brcmf_dbg(TRACE, "Enter\n");
2348
2349 datalen = pkt->len;
2350
2351 /* Add space for the header */
2352 skb_push(pkt, SDPCM_HDRLEN);
2353 /* precondition: IS_ALIGNED((unsigned long)(pkt->data), 2) */
2354
2355 prec = prio2prec((pkt->priority & PRIOMASK));
2356
2357 /* Check for existing queue, current flow-control,
2358 pending event, or pending clock */
2359 brcmf_dbg(TRACE, "deferring pktq len %d\n", pktq_len(&bus->txq));
2360 bus->sdcnt.fcqueued++;
2361
2362 /* Priority based enq */
2363 spin_lock_bh(&bus->txqlock);
2364 if (!brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec)) {
2365 skb_pull(pkt, SDPCM_HDRLEN);
2366 brcmf_txcomplete(bus->sdiodev->dev, pkt, false);
2367 brcmf_err("out of bus->txq !!!\n");
2368 ret = -ENOSR;
2369 } else {
2370 ret = 0;
2371 }
2372 spin_unlock_bh(&bus->txqlock);
2373
2374 if (pktq_len(&bus->txq) >= TXHI) {
2375 bus->txoff = true;
2376 brcmf_txflowblock(bus->sdiodev->dev, true);
2377 }
2378
2379 #ifdef DEBUG
2380 if (pktq_plen(&bus->txq, prec) > qcount[prec])
2381 qcount[prec] = pktq_plen(&bus->txq, prec);
2382 #endif
2383
2384 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
2385 if (list_empty(&bus->dpc_tsklst)) {
2386 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2387
2388 brcmf_sdbrcm_adddpctsk(bus);
2389 queue_work(bus->brcmf_wq, &bus->datawork);
2390 } else {
2391 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2392 }
2393
2394 return ret;
2395 }
2396
2397 #ifdef DEBUG
2398 #define CONSOLE_LINE_MAX 192
2399
brcmf_sdbrcm_readconsole(struct brcmf_sdio * bus)2400 static int brcmf_sdbrcm_readconsole(struct brcmf_sdio *bus)
2401 {
2402 struct brcmf_console *c = &bus->console;
2403 u8 line[CONSOLE_LINE_MAX], ch;
2404 u32 n, idx, addr;
2405 int rv;
2406
2407 /* Don't do anything until FWREADY updates console address */
2408 if (bus->console_addr == 0)
2409 return 0;
2410
2411 /* Read console log struct */
2412 addr = bus->console_addr + offsetof(struct rte_console, log_le);
2413 rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr, (u8 *)&c->log_le,
2414 sizeof(c->log_le));
2415 if (rv < 0)
2416 return rv;
2417
2418 /* Allocate console buffer (one time only) */
2419 if (c->buf == NULL) {
2420 c->bufsize = le32_to_cpu(c->log_le.buf_size);
2421 c->buf = kmalloc(c->bufsize, GFP_ATOMIC);
2422 if (c->buf == NULL)
2423 return -ENOMEM;
2424 }
2425
2426 idx = le32_to_cpu(c->log_le.idx);
2427
2428 /* Protect against corrupt value */
2429 if (idx > c->bufsize)
2430 return -EBADE;
2431
2432 /* Skip reading the console buffer if the index pointer
2433 has not moved */
2434 if (idx == c->last)
2435 return 0;
2436
2437 /* Read the console buffer */
2438 addr = le32_to_cpu(c->log_le.buf);
2439 rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr, c->buf, c->bufsize);
2440 if (rv < 0)
2441 return rv;
2442
2443 while (c->last != idx) {
2444 for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
2445 if (c->last == idx) {
2446 /* This would output a partial line.
2447 * Instead, back up
2448 * the buffer pointer and output this
2449 * line next time around.
2450 */
2451 if (c->last >= n)
2452 c->last -= n;
2453 else
2454 c->last = c->bufsize - n;
2455 goto break2;
2456 }
2457 ch = c->buf[c->last];
2458 c->last = (c->last + 1) % c->bufsize;
2459 if (ch == '\n')
2460 break;
2461 line[n] = ch;
2462 }
2463
2464 if (n > 0) {
2465 if (line[n - 1] == '\r')
2466 n--;
2467 line[n] = 0;
2468 pr_debug("CONSOLE: %s\n", line);
2469 }
2470 }
2471 break2:
2472
2473 return 0;
2474 }
2475 #endif /* DEBUG */
2476
brcmf_tx_frame(struct brcmf_sdio * bus,u8 * frame,u16 len)2477 static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
2478 {
2479 int i;
2480 int ret;
2481
2482 bus->ctrl_frame_stat = false;
2483 ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
2484 SDIO_FUNC_2, F2SYNC, frame, len);
2485
2486 if (ret < 0) {
2487 /* On failure, abort the command and terminate the frame */
2488 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
2489 ret);
2490 bus->sdcnt.tx_sderrs++;
2491
2492 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
2493
2494 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
2495 SFC_WF_TERM, NULL);
2496 bus->sdcnt.f1regdata++;
2497
2498 for (i = 0; i < 3; i++) {
2499 u8 hi, lo;
2500 hi = brcmf_sdio_regrb(bus->sdiodev,
2501 SBSDIO_FUNC1_WFRAMEBCHI, NULL);
2502 lo = brcmf_sdio_regrb(bus->sdiodev,
2503 SBSDIO_FUNC1_WFRAMEBCLO, NULL);
2504 bus->sdcnt.f1regdata += 2;
2505 if (hi == 0 && lo == 0)
2506 break;
2507 }
2508 return ret;
2509 }
2510
2511 bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
2512
2513 return ret;
2514 }
2515
2516 static int
brcmf_sdbrcm_bus_txctl(struct device * dev,unsigned char * msg,uint msglen)2517 brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2518 {
2519 u8 *frame;
2520 u16 len;
2521 u32 swheader;
2522 uint retries = 0;
2523 u8 doff = 0;
2524 int ret = -1;
2525 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2526 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2527 struct brcmf_sdio *bus = sdiodev->bus;
2528 unsigned long flags;
2529
2530 brcmf_dbg(TRACE, "Enter\n");
2531
2532 /* Back the pointer to make a room for bus header */
2533 frame = msg - SDPCM_HDRLEN;
2534 len = (msglen += SDPCM_HDRLEN);
2535
2536 /* Add alignment padding (optional for ctl frames) */
2537 doff = ((unsigned long)frame % BRCMF_SDALIGN);
2538 if (doff) {
2539 frame -= doff;
2540 len += doff;
2541 msglen += doff;
2542 memset(frame, 0, doff + SDPCM_HDRLEN);
2543 }
2544 /* precondition: doff < BRCMF_SDALIGN */
2545 doff += SDPCM_HDRLEN;
2546
2547 /* Round send length to next SDIO block */
2548 if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
2549 u16 pad = bus->blocksize - (len % bus->blocksize);
2550 if ((pad <= bus->roundup) && (pad < bus->blocksize))
2551 len += pad;
2552 } else if (len % BRCMF_SDALIGN) {
2553 len += BRCMF_SDALIGN - (len % BRCMF_SDALIGN);
2554 }
2555
2556 /* Satisfy length-alignment requirements */
2557 if (len & (ALIGNMENT - 1))
2558 len = roundup(len, ALIGNMENT);
2559
2560 /* precondition: IS_ALIGNED((unsigned long)frame, 2) */
2561
2562 /* Make sure backplane clock is on */
2563 sdio_claim_host(bus->sdiodev->func[1]);
2564 brcmf_sdbrcm_bus_sleep(bus, false, false);
2565 sdio_release_host(bus->sdiodev->func[1]);
2566
2567 /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
2568 *(__le16 *) frame = cpu_to_le16((u16) msglen);
2569 *(((__le16 *) frame) + 1) = cpu_to_le16(~msglen);
2570
2571 /* Software tag: channel, sequence number, data offset */
2572 swheader =
2573 ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) &
2574 SDPCM_CHANNEL_MASK)
2575 | bus->tx_seq | ((doff << SDPCM_DOFFSET_SHIFT) &
2576 SDPCM_DOFFSET_MASK);
2577 put_unaligned_le32(swheader, frame + SDPCM_FRAMETAG_LEN);
2578 put_unaligned_le32(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
2579
2580 if (!data_ok(bus)) {
2581 brcmf_dbg(INFO, "No bus credit bus->tx_max %d, bus->tx_seq %d\n",
2582 bus->tx_max, bus->tx_seq);
2583 bus->ctrl_frame_stat = true;
2584 /* Send from dpc */
2585 bus->ctrl_frame_buf = frame;
2586 bus->ctrl_frame_len = len;
2587
2588 wait_event_interruptible_timeout(bus->ctrl_wait,
2589 !bus->ctrl_frame_stat,
2590 msecs_to_jiffies(2000));
2591
2592 if (!bus->ctrl_frame_stat) {
2593 brcmf_dbg(SDIO, "ctrl_frame_stat == false\n");
2594 ret = 0;
2595 } else {
2596 brcmf_dbg(SDIO, "ctrl_frame_stat == true\n");
2597 ret = -1;
2598 }
2599 }
2600
2601 if (ret == -1) {
2602 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
2603 frame, len, "Tx Frame:\n");
2604 brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() && BRCMF_CTL_ON()) &&
2605 BRCMF_HDRS_ON(),
2606 frame, min_t(u16, len, 16), "TxHdr:\n");
2607
2608 do {
2609 sdio_claim_host(bus->sdiodev->func[1]);
2610 ret = brcmf_tx_frame(bus, frame, len);
2611 sdio_release_host(bus->sdiodev->func[1]);
2612 } while (ret < 0 && retries++ < TXRETRIES);
2613 }
2614
2615 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
2616 if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) &&
2617 list_empty(&bus->dpc_tsklst)) {
2618 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2619
2620 bus->activity = false;
2621 sdio_claim_host(bus->sdiodev->func[1]);
2622 brcmf_dbg(INFO, "idle\n");
2623 brcmf_sdbrcm_clkctl(bus, CLK_NONE, true);
2624 sdio_release_host(bus->sdiodev->func[1]);
2625 } else {
2626 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2627 }
2628
2629 if (ret)
2630 bus->sdcnt.tx_ctlerrs++;
2631 else
2632 bus->sdcnt.tx_ctlpkts++;
2633
2634 return ret ? -EIO : 0;
2635 }
2636
2637 #ifdef DEBUG
brcmf_sdio_valid_shared_address(u32 addr)2638 static inline bool brcmf_sdio_valid_shared_address(u32 addr)
2639 {
2640 return !(addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff));
2641 }
2642
brcmf_sdio_readshared(struct brcmf_sdio * bus,struct sdpcm_shared * sh)2643 static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
2644 struct sdpcm_shared *sh)
2645 {
2646 u32 addr;
2647 int rv;
2648 u32 shaddr = 0;
2649 struct sdpcm_shared_le sh_le;
2650 __le32 addr_le;
2651
2652 shaddr = bus->ci->rambase + bus->ramsize - 4;
2653
2654 /*
2655 * Read last word in socram to determine
2656 * address of sdpcm_shared structure
2657 */
2658 sdio_claim_host(bus->sdiodev->func[1]);
2659 brcmf_sdbrcm_bus_sleep(bus, false, false);
2660 rv = brcmf_sdio_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4);
2661 sdio_release_host(bus->sdiodev->func[1]);
2662 if (rv < 0)
2663 return rv;
2664
2665 addr = le32_to_cpu(addr_le);
2666
2667 brcmf_dbg(SDIO, "sdpcm_shared address 0x%08X\n", addr);
2668
2669 /*
2670 * Check if addr is valid.
2671 * NVRAM length at the end of memory should have been overwritten.
2672 */
2673 if (!brcmf_sdio_valid_shared_address(addr)) {
2674 brcmf_err("invalid sdpcm_shared address 0x%08X\n",
2675 addr);
2676 return -EINVAL;
2677 }
2678
2679 /* Read hndrte_shared structure */
2680 rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
2681 sizeof(struct sdpcm_shared_le));
2682 if (rv < 0)
2683 return rv;
2684
2685 /* Endianness */
2686 sh->flags = le32_to_cpu(sh_le.flags);
2687 sh->trap_addr = le32_to_cpu(sh_le.trap_addr);
2688 sh->assert_exp_addr = le32_to_cpu(sh_le.assert_exp_addr);
2689 sh->assert_file_addr = le32_to_cpu(sh_le.assert_file_addr);
2690 sh->assert_line = le32_to_cpu(sh_le.assert_line);
2691 sh->console_addr = le32_to_cpu(sh_le.console_addr);
2692 sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr);
2693
2694 if ((sh->flags & SDPCM_SHARED_VERSION_MASK) > SDPCM_SHARED_VERSION) {
2695 brcmf_err("sdpcm shared version unsupported: dhd %d dongle %d\n",
2696 SDPCM_SHARED_VERSION,
2697 sh->flags & SDPCM_SHARED_VERSION_MASK);
2698 return -EPROTO;
2699 }
2700
2701 return 0;
2702 }
2703
brcmf_sdio_dump_console(struct brcmf_sdio * bus,struct sdpcm_shared * sh,char __user * data,size_t count)2704 static int brcmf_sdio_dump_console(struct brcmf_sdio *bus,
2705 struct sdpcm_shared *sh, char __user *data,
2706 size_t count)
2707 {
2708 u32 addr, console_ptr, console_size, console_index;
2709 char *conbuf = NULL;
2710 __le32 sh_val;
2711 int rv;
2712 loff_t pos = 0;
2713 int nbytes = 0;
2714
2715 /* obtain console information from device memory */
2716 addr = sh->console_addr + offsetof(struct rte_console, log_le);
2717 rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr,
2718 (u8 *)&sh_val, sizeof(u32));
2719 if (rv < 0)
2720 return rv;
2721 console_ptr = le32_to_cpu(sh_val);
2722
2723 addr = sh->console_addr + offsetof(struct rte_console, log_le.buf_size);
2724 rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr,
2725 (u8 *)&sh_val, sizeof(u32));
2726 if (rv < 0)
2727 return rv;
2728 console_size = le32_to_cpu(sh_val);
2729
2730 addr = sh->console_addr + offsetof(struct rte_console, log_le.idx);
2731 rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr,
2732 (u8 *)&sh_val, sizeof(u32));
2733 if (rv < 0)
2734 return rv;
2735 console_index = le32_to_cpu(sh_val);
2736
2737 /* allocate buffer for console data */
2738 if (console_size <= CONSOLE_BUFFER_MAX)
2739 conbuf = vzalloc(console_size+1);
2740
2741 if (!conbuf)
2742 return -ENOMEM;
2743
2744 /* obtain the console data from device */
2745 conbuf[console_size] = '\0';
2746 rv = brcmf_sdio_ramrw(bus->sdiodev, false, console_ptr, (u8 *)conbuf,
2747 console_size);
2748 if (rv < 0)
2749 goto done;
2750
2751 rv = simple_read_from_buffer(data, count, &pos,
2752 conbuf + console_index,
2753 console_size - console_index);
2754 if (rv < 0)
2755 goto done;
2756
2757 nbytes = rv;
2758 if (console_index > 0) {
2759 pos = 0;
2760 rv = simple_read_from_buffer(data+nbytes, count, &pos,
2761 conbuf, console_index - 1);
2762 if (rv < 0)
2763 goto done;
2764 rv += nbytes;
2765 }
2766 done:
2767 vfree(conbuf);
2768 return rv;
2769 }
2770
brcmf_sdio_trap_info(struct brcmf_sdio * bus,struct sdpcm_shared * sh,char __user * data,size_t count)2771 static int brcmf_sdio_trap_info(struct brcmf_sdio *bus, struct sdpcm_shared *sh,
2772 char __user *data, size_t count)
2773 {
2774 int error, res;
2775 char buf[350];
2776 struct brcmf_trap_info tr;
2777 loff_t pos = 0;
2778
2779 if ((sh->flags & SDPCM_SHARED_TRAP) == 0) {
2780 brcmf_dbg(INFO, "no trap in firmware\n");
2781 return 0;
2782 }
2783
2784 error = brcmf_sdio_ramrw(bus->sdiodev, false, sh->trap_addr, (u8 *)&tr,
2785 sizeof(struct brcmf_trap_info));
2786 if (error < 0)
2787 return error;
2788
2789 res = scnprintf(buf, sizeof(buf),
2790 "dongle trap info: type 0x%x @ epc 0x%08x\n"
2791 " cpsr 0x%08x spsr 0x%08x sp 0x%08x\n"
2792 " lr 0x%08x pc 0x%08x offset 0x%x\n"
2793 " r0 0x%08x r1 0x%08x r2 0x%08x r3 0x%08x\n"
2794 " r4 0x%08x r5 0x%08x r6 0x%08x r7 0x%08x\n",
2795 le32_to_cpu(tr.type), le32_to_cpu(tr.epc),
2796 le32_to_cpu(tr.cpsr), le32_to_cpu(tr.spsr),
2797 le32_to_cpu(tr.r13), le32_to_cpu(tr.r14),
2798 le32_to_cpu(tr.pc), sh->trap_addr,
2799 le32_to_cpu(tr.r0), le32_to_cpu(tr.r1),
2800 le32_to_cpu(tr.r2), le32_to_cpu(tr.r3),
2801 le32_to_cpu(tr.r4), le32_to_cpu(tr.r5),
2802 le32_to_cpu(tr.r6), le32_to_cpu(tr.r7));
2803
2804 return simple_read_from_buffer(data, count, &pos, buf, res);
2805 }
2806
brcmf_sdio_assert_info(struct brcmf_sdio * bus,struct sdpcm_shared * sh,char __user * data,size_t count)2807 static int brcmf_sdio_assert_info(struct brcmf_sdio *bus,
2808 struct sdpcm_shared *sh, char __user *data,
2809 size_t count)
2810 {
2811 int error = 0;
2812 char buf[200];
2813 char file[80] = "?";
2814 char expr[80] = "<???>";
2815 int res;
2816 loff_t pos = 0;
2817
2818 if ((sh->flags & SDPCM_SHARED_ASSERT_BUILT) == 0) {
2819 brcmf_dbg(INFO, "firmware not built with -assert\n");
2820 return 0;
2821 } else if ((sh->flags & SDPCM_SHARED_ASSERT) == 0) {
2822 brcmf_dbg(INFO, "no assert in dongle\n");
2823 return 0;
2824 }
2825
2826 sdio_claim_host(bus->sdiodev->func[1]);
2827 if (sh->assert_file_addr != 0) {
2828 error = brcmf_sdio_ramrw(bus->sdiodev, false,
2829 sh->assert_file_addr, (u8 *)file, 80);
2830 if (error < 0)
2831 return error;
2832 }
2833 if (sh->assert_exp_addr != 0) {
2834 error = brcmf_sdio_ramrw(bus->sdiodev, false,
2835 sh->assert_exp_addr, (u8 *)expr, 80);
2836 if (error < 0)
2837 return error;
2838 }
2839 sdio_release_host(bus->sdiodev->func[1]);
2840
2841 res = scnprintf(buf, sizeof(buf),
2842 "dongle assert: %s:%d: assert(%s)\n",
2843 file, sh->assert_line, expr);
2844 return simple_read_from_buffer(data, count, &pos, buf, res);
2845 }
2846
brcmf_sdbrcm_checkdied(struct brcmf_sdio * bus)2847 static int brcmf_sdbrcm_checkdied(struct brcmf_sdio *bus)
2848 {
2849 int error;
2850 struct sdpcm_shared sh;
2851
2852 error = brcmf_sdio_readshared(bus, &sh);
2853
2854 if (error < 0)
2855 return error;
2856
2857 if ((sh.flags & SDPCM_SHARED_ASSERT_BUILT) == 0)
2858 brcmf_dbg(INFO, "firmware not built with -assert\n");
2859 else if (sh.flags & SDPCM_SHARED_ASSERT)
2860 brcmf_err("assertion in dongle\n");
2861
2862 if (sh.flags & SDPCM_SHARED_TRAP)
2863 brcmf_err("firmware trap in dongle\n");
2864
2865 return 0;
2866 }
2867
brcmf_sdbrcm_died_dump(struct brcmf_sdio * bus,char __user * data,size_t count,loff_t * ppos)2868 static int brcmf_sdbrcm_died_dump(struct brcmf_sdio *bus, char __user *data,
2869 size_t count, loff_t *ppos)
2870 {
2871 int error = 0;
2872 struct sdpcm_shared sh;
2873 int nbytes = 0;
2874 loff_t pos = *ppos;
2875
2876 if (pos != 0)
2877 return 0;
2878
2879 error = brcmf_sdio_readshared(bus, &sh);
2880 if (error < 0)
2881 goto done;
2882
2883 error = brcmf_sdio_assert_info(bus, &sh, data, count);
2884 if (error < 0)
2885 goto done;
2886 nbytes = error;
2887
2888 error = brcmf_sdio_trap_info(bus, &sh, data+nbytes, count);
2889 if (error < 0)
2890 goto done;
2891 nbytes += error;
2892
2893 error = brcmf_sdio_dump_console(bus, &sh, data+nbytes, count);
2894 if (error < 0)
2895 goto done;
2896 nbytes += error;
2897
2898 error = nbytes;
2899 *ppos += nbytes;
2900 done:
2901 return error;
2902 }
2903
brcmf_sdio_forensic_read(struct file * f,char __user * data,size_t count,loff_t * ppos)2904 static ssize_t brcmf_sdio_forensic_read(struct file *f, char __user *data,
2905 size_t count, loff_t *ppos)
2906 {
2907 struct brcmf_sdio *bus = f->private_data;
2908 int res;
2909
2910 res = brcmf_sdbrcm_died_dump(bus, data, count, ppos);
2911 if (res > 0)
2912 *ppos += res;
2913 return (ssize_t)res;
2914 }
2915
2916 static const struct file_operations brcmf_sdio_forensic_ops = {
2917 .owner = THIS_MODULE,
2918 .open = simple_open,
2919 .read = brcmf_sdio_forensic_read
2920 };
2921
brcmf_sdio_debugfs_create(struct brcmf_sdio * bus)2922 static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
2923 {
2924 struct brcmf_pub *drvr = bus->sdiodev->bus_if->drvr;
2925 struct dentry *dentry = brcmf_debugfs_get_devdir(drvr);
2926
2927 if (IS_ERR_OR_NULL(dentry))
2928 return;
2929
2930 debugfs_create_file("forensics", S_IRUGO, dentry, bus,
2931 &brcmf_sdio_forensic_ops);
2932 brcmf_debugfs_create_sdio_count(drvr, &bus->sdcnt);
2933 }
2934 #else
brcmf_sdbrcm_checkdied(struct brcmf_sdio * bus)2935 static int brcmf_sdbrcm_checkdied(struct brcmf_sdio *bus)
2936 {
2937 return 0;
2938 }
2939
brcmf_sdio_debugfs_create(struct brcmf_sdio * bus)2940 static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
2941 {
2942 }
2943 #endif /* DEBUG */
2944
2945 static int
brcmf_sdbrcm_bus_rxctl(struct device * dev,unsigned char * msg,uint msglen)2946 brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
2947 {
2948 int timeleft;
2949 uint rxlen = 0;
2950 bool pending;
2951 u8 *buf;
2952 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2953 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2954 struct brcmf_sdio *bus = sdiodev->bus;
2955
2956 brcmf_dbg(TRACE, "Enter\n");
2957
2958 /* Wait until control frame is available */
2959 timeleft = brcmf_sdbrcm_dcmd_resp_wait(bus, &bus->rxlen, &pending);
2960
2961 spin_lock_bh(&bus->rxctl_lock);
2962 rxlen = bus->rxlen;
2963 memcpy(msg, bus->rxctl, min(msglen, rxlen));
2964 bus->rxctl = NULL;
2965 buf = bus->rxctl_orig;
2966 bus->rxctl_orig = NULL;
2967 bus->rxlen = 0;
2968 spin_unlock_bh(&bus->rxctl_lock);
2969 vfree(buf);
2970
2971 if (rxlen) {
2972 brcmf_dbg(CTL, "resumed on rxctl frame, got %d expected %d\n",
2973 rxlen, msglen);
2974 } else if (timeleft == 0) {
2975 brcmf_err("resumed on timeout\n");
2976 brcmf_sdbrcm_checkdied(bus);
2977 } else if (pending) {
2978 brcmf_dbg(CTL, "cancelled\n");
2979 return -ERESTARTSYS;
2980 } else {
2981 brcmf_dbg(CTL, "resumed for unknown reason?\n");
2982 brcmf_sdbrcm_checkdied(bus);
2983 }
2984
2985 if (rxlen)
2986 bus->sdcnt.rx_ctlpkts++;
2987 else
2988 bus->sdcnt.rx_ctlerrs++;
2989
2990 return rxlen ? (int)rxlen : -ETIMEDOUT;
2991 }
2992
brcmf_sdbrcm_download_state(struct brcmf_sdio * bus,bool enter)2993 static bool brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter)
2994 {
2995 struct chip_info *ci = bus->ci;
2996
2997 /* To enter download state, disable ARM and reset SOCRAM.
2998 * To exit download state, simply reset ARM (default is RAM boot).
2999 */
3000 if (enter) {
3001 bus->alp_only = true;
3002
3003 brcmf_sdio_chip_enter_download(bus->sdiodev, ci);
3004 } else {
3005 if (!brcmf_sdio_chip_exit_download(bus->sdiodev, ci, bus->vars,
3006 bus->varsz))
3007 return false;
3008
3009 /* Allow HT Clock now that the ARM is running. */
3010 bus->alp_only = false;
3011
3012 bus->sdiodev->bus_if->state = BRCMF_BUS_LOAD;
3013 }
3014
3015 return true;
3016 }
3017
brcmf_sdbrcm_get_image(char * buf,int len,struct brcmf_sdio * bus)3018 static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_sdio *bus)
3019 {
3020 if (bus->firmware->size < bus->fw_ptr + len)
3021 len = bus->firmware->size - bus->fw_ptr;
3022
3023 memcpy(buf, &bus->firmware->data[bus->fw_ptr], len);
3024 bus->fw_ptr += len;
3025 return len;
3026 }
3027
brcmf_sdbrcm_download_code_file(struct brcmf_sdio * bus)3028 static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
3029 {
3030 int offset;
3031 uint len;
3032 u8 *memblock = NULL, *memptr;
3033 int ret;
3034 u8 idx;
3035
3036 brcmf_dbg(INFO, "Enter\n");
3037
3038 ret = request_firmware(&bus->firmware, BRCMF_SDIO_FW_NAME,
3039 &bus->sdiodev->func[2]->dev);
3040 if (ret) {
3041 brcmf_err("Fail to request firmware %d\n", ret);
3042 return ret;
3043 }
3044 bus->fw_ptr = 0;
3045
3046 memptr = memblock = kmalloc(MEMBLOCK + BRCMF_SDALIGN, GFP_ATOMIC);
3047 if (memblock == NULL) {
3048 ret = -ENOMEM;
3049 goto err;
3050 }
3051 if ((u32)(unsigned long)memblock % BRCMF_SDALIGN)
3052 memptr += (BRCMF_SDALIGN -
3053 ((u32)(unsigned long)memblock % BRCMF_SDALIGN));
3054
3055 offset = bus->ci->rambase;
3056
3057 /* Download image */
3058 len = brcmf_sdbrcm_get_image((char *)memptr, MEMBLOCK, bus);
3059 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_ARM_CR4);
3060 if (BRCMF_MAX_CORENUM != idx)
3061 memcpy(&bus->ci->rst_vec, memptr, sizeof(bus->ci->rst_vec));
3062 while (len) {
3063 ret = brcmf_sdio_ramrw(bus->sdiodev, true, offset, memptr, len);
3064 if (ret) {
3065 brcmf_err("error %d on writing %d membytes at 0x%08x\n",
3066 ret, MEMBLOCK, offset);
3067 goto err;
3068 }
3069
3070 offset += MEMBLOCK;
3071 len = brcmf_sdbrcm_get_image((char *)memptr, MEMBLOCK, bus);
3072 }
3073
3074 err:
3075 kfree(memblock);
3076
3077 release_firmware(bus->firmware);
3078 bus->fw_ptr = 0;
3079
3080 return ret;
3081 }
3082
3083 /*
3084 * ProcessVars:Takes a buffer of "<var>=<value>\n" lines read from a file
3085 * and ending in a NUL.
3086 * Removes carriage returns, empty lines, comment lines, and converts
3087 * newlines to NULs.
3088 * Shortens buffer as needed and pads with NULs. End of buffer is marked
3089 * by two NULs.
3090 */
3091
brcmf_process_nvram_vars(struct brcmf_sdio * bus)3092 static int brcmf_process_nvram_vars(struct brcmf_sdio *bus)
3093 {
3094 char *varbuf;
3095 char *dp;
3096 bool findNewline;
3097 int column;
3098 int ret = 0;
3099 uint buf_len, n, len;
3100
3101 len = bus->firmware->size;
3102 varbuf = vmalloc(len);
3103 if (!varbuf)
3104 return -ENOMEM;
3105
3106 memcpy(varbuf, bus->firmware->data, len);
3107 dp = varbuf;
3108
3109 findNewline = false;
3110 column = 0;
3111
3112 for (n = 0; n < len; n++) {
3113 if (varbuf[n] == 0)
3114 break;
3115 if (varbuf[n] == '\r')
3116 continue;
3117 if (findNewline && varbuf[n] != '\n')
3118 continue;
3119 findNewline = false;
3120 if (varbuf[n] == '#') {
3121 findNewline = true;
3122 continue;
3123 }
3124 if (varbuf[n] == '\n') {
3125 if (column == 0)
3126 continue;
3127 *dp++ = 0;
3128 column = 0;
3129 continue;
3130 }
3131 *dp++ = varbuf[n];
3132 column++;
3133 }
3134 buf_len = dp - varbuf;
3135 while (dp < varbuf + n)
3136 *dp++ = 0;
3137
3138 kfree(bus->vars);
3139 /* roundup needed for download to device */
3140 bus->varsz = roundup(buf_len + 1, 4);
3141 bus->vars = kmalloc(bus->varsz, GFP_KERNEL);
3142 if (bus->vars == NULL) {
3143 bus->varsz = 0;
3144 ret = -ENOMEM;
3145 goto err;
3146 }
3147
3148 /* copy the processed variables and add null termination */
3149 memcpy(bus->vars, varbuf, buf_len);
3150 bus->vars[buf_len] = 0;
3151 err:
3152 vfree(varbuf);
3153 return ret;
3154 }
3155
brcmf_sdbrcm_download_nvram(struct brcmf_sdio * bus)3156 static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
3157 {
3158 int ret;
3159
3160 ret = request_firmware(&bus->firmware, BRCMF_SDIO_NV_NAME,
3161 &bus->sdiodev->func[2]->dev);
3162 if (ret) {
3163 brcmf_err("Fail to request nvram %d\n", ret);
3164 return ret;
3165 }
3166
3167 ret = brcmf_process_nvram_vars(bus);
3168
3169 release_firmware(bus->firmware);
3170
3171 return ret;
3172 }
3173
_brcmf_sdbrcm_download_firmware(struct brcmf_sdio * bus)3174 static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
3175 {
3176 int bcmerror = -1;
3177
3178 /* Keep arm in reset */
3179 if (!brcmf_sdbrcm_download_state(bus, true)) {
3180 brcmf_err("error placing ARM core in reset\n");
3181 goto err;
3182 }
3183
3184 if (brcmf_sdbrcm_download_code_file(bus)) {
3185 brcmf_err("dongle image file download failed\n");
3186 goto err;
3187 }
3188
3189 if (brcmf_sdbrcm_download_nvram(bus)) {
3190 brcmf_err("dongle nvram file download failed\n");
3191 goto err;
3192 }
3193
3194 /* Take arm out of reset */
3195 if (!brcmf_sdbrcm_download_state(bus, false)) {
3196 brcmf_err("error getting out of ARM core reset\n");
3197 goto err;
3198 }
3199
3200 bcmerror = 0;
3201
3202 err:
3203 return bcmerror;
3204 }
3205
brcmf_sdbrcm_sr_capable(struct brcmf_sdio * bus)3206 static bool brcmf_sdbrcm_sr_capable(struct brcmf_sdio *bus)
3207 {
3208 u32 addr, reg;
3209
3210 brcmf_dbg(TRACE, "Enter\n");
3211
3212 /* old chips with PMU version less than 17 don't support save restore */
3213 if (bus->ci->pmurev < 17)
3214 return false;
3215
3216 /* read PMU chipcontrol register 3*/
3217 addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_addr);
3218 brcmf_sdio_regwl(bus->sdiodev, addr, 3, NULL);
3219 addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_data);
3220 reg = brcmf_sdio_regrl(bus->sdiodev, addr, NULL);
3221
3222 return (bool)reg;
3223 }
3224
brcmf_sdbrcm_sr_init(struct brcmf_sdio * bus)3225 static void brcmf_sdbrcm_sr_init(struct brcmf_sdio *bus)
3226 {
3227 int err = 0;
3228 u8 val;
3229
3230 brcmf_dbg(TRACE, "Enter\n");
3231
3232 val = brcmf_sdio_regrb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL,
3233 &err);
3234 if (err) {
3235 brcmf_err("error reading SBSDIO_FUNC1_WAKEUPCTRL\n");
3236 return;
3237 }
3238
3239 val |= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT;
3240 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL,
3241 val, &err);
3242 if (err) {
3243 brcmf_err("error writing SBSDIO_FUNC1_WAKEUPCTRL\n");
3244 return;
3245 }
3246
3247 /* Add CMD14 Support */
3248 brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_BRCM_CARDCAP,
3249 (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT |
3250 SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT),
3251 &err);
3252 if (err) {
3253 brcmf_err("error writing SDIO_CCCR_BRCM_CARDCAP\n");
3254 return;
3255 }
3256
3257 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3258 SBSDIO_FORCE_HT, &err);
3259 if (err) {
3260 brcmf_err("error writing SBSDIO_FUNC1_CHIPCLKCSR\n");
3261 return;
3262 }
3263
3264 /* set flag */
3265 bus->sr_enabled = true;
3266 brcmf_dbg(INFO, "SR enabled\n");
3267 }
3268
3269 /* enable KSO bit */
brcmf_sdbrcm_kso_init(struct brcmf_sdio * bus)3270 static int brcmf_sdbrcm_kso_init(struct brcmf_sdio *bus)
3271 {
3272 u8 val;
3273 int err = 0;
3274
3275 brcmf_dbg(TRACE, "Enter\n");
3276
3277 /* KSO bit added in SDIO core rev 12 */
3278 if (bus->ci->c_inf[1].rev < 12)
3279 return 0;
3280
3281 val = brcmf_sdio_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
3282 &err);
3283 if (err) {
3284 brcmf_err("error reading SBSDIO_FUNC1_SLEEPCSR\n");
3285 return err;
3286 }
3287
3288 if (!(val & SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) {
3289 val |= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN <<
3290 SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
3291 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
3292 val, &err);
3293 if (err) {
3294 brcmf_err("error writing SBSDIO_FUNC1_SLEEPCSR\n");
3295 return err;
3296 }
3297 }
3298
3299 return 0;
3300 }
3301
3302
3303 static bool
brcmf_sdbrcm_download_firmware(struct brcmf_sdio * bus)3304 brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
3305 {
3306 bool ret;
3307
3308 sdio_claim_host(bus->sdiodev->func[1]);
3309
3310 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
3311
3312 ret = _brcmf_sdbrcm_download_firmware(bus) == 0;
3313
3314 brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
3315
3316 sdio_release_host(bus->sdiodev->func[1]);
3317
3318 return ret;
3319 }
3320
brcmf_sdbrcm_bus_init(struct device * dev)3321 static int brcmf_sdbrcm_bus_init(struct device *dev)
3322 {
3323 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
3324 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
3325 struct brcmf_sdio *bus = sdiodev->bus;
3326 unsigned long timeout;
3327 u8 ready, enable;
3328 int err, ret = 0;
3329 u8 saveclk;
3330
3331 brcmf_dbg(TRACE, "Enter\n");
3332
3333 /* try to download image and nvram to the dongle */
3334 if (bus_if->state == BRCMF_BUS_DOWN) {
3335 if (!(brcmf_sdbrcm_download_firmware(bus)))
3336 return -1;
3337 }
3338
3339 if (!bus->sdiodev->bus_if->drvr)
3340 return 0;
3341
3342 /* Start the watchdog timer */
3343 bus->sdcnt.tickcnt = 0;
3344 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
3345
3346 sdio_claim_host(bus->sdiodev->func[1]);
3347
3348 /* Make sure backplane clock is on, needed to generate F2 interrupt */
3349 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
3350 if (bus->clkstate != CLK_AVAIL)
3351 goto exit;
3352
3353 /* Force clocks on backplane to be sure F2 interrupt propagates */
3354 saveclk = brcmf_sdio_regrb(bus->sdiodev,
3355 SBSDIO_FUNC1_CHIPCLKCSR, &err);
3356 if (!err) {
3357 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3358 (saveclk | SBSDIO_FORCE_HT), &err);
3359 }
3360 if (err) {
3361 brcmf_err("Failed to force clock for F2: err %d\n", err);
3362 goto exit;
3363 }
3364
3365 /* Enable function 2 (frame transfers) */
3366 w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
3367 offsetof(struct sdpcmd_regs, tosbmailboxdata));
3368 enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2);
3369
3370 brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx, enable, NULL);
3371
3372 timeout = jiffies + msecs_to_jiffies(BRCMF_WAIT_F2RDY);
3373 ready = 0;
3374 while (enable != ready) {
3375 ready = brcmf_sdio_regrb(bus->sdiodev,
3376 SDIO_CCCR_IORx, NULL);
3377 if (time_after(jiffies, timeout))
3378 break;
3379 else if (time_after(jiffies, timeout - BRCMF_WAIT_F2RDY + 50))
3380 /* prevent busy waiting if it takes too long */
3381 msleep_interruptible(20);
3382 }
3383
3384 brcmf_dbg(INFO, "enable 0x%02x, ready 0x%02x\n", enable, ready);
3385
3386 /* If F2 successfully enabled, set core and enable interrupts */
3387 if (ready == enable) {
3388 /* Set up the interrupt mask and enable interrupts */
3389 bus->hostintmask = HOSTINTMASK;
3390 w_sdreg32(bus, bus->hostintmask,
3391 offsetof(struct sdpcmd_regs, hostintmask));
3392
3393 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_WATERMARK, 8, &err);
3394 } else {
3395 /* Disable F2 again */
3396 enable = SDIO_FUNC_ENABLE_1;
3397 brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx, enable, NULL);
3398 ret = -ENODEV;
3399 }
3400
3401 if (brcmf_sdbrcm_sr_capable(bus)) {
3402 brcmf_sdbrcm_sr_init(bus);
3403 } else {
3404 /* Restore previous clock setting */
3405 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3406 saveclk, &err);
3407 }
3408
3409 if (ret == 0) {
3410 ret = brcmf_sdio_intr_register(bus->sdiodev);
3411 if (ret != 0)
3412 brcmf_err("intr register failed:%d\n", ret);
3413 }
3414
3415 /* If we didn't come up, turn off backplane clock */
3416 if (bus_if->state != BRCMF_BUS_DATA)
3417 brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
3418
3419 exit:
3420 sdio_release_host(bus->sdiodev->func[1]);
3421
3422 return ret;
3423 }
3424
brcmf_sdbrcm_isr(void * arg)3425 void brcmf_sdbrcm_isr(void *arg)
3426 {
3427 struct brcmf_sdio *bus = (struct brcmf_sdio *) arg;
3428
3429 brcmf_dbg(TRACE, "Enter\n");
3430
3431 if (!bus) {
3432 brcmf_err("bus is null pointer, exiting\n");
3433 return;
3434 }
3435
3436 if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
3437 brcmf_err("bus is down. we have nothing to do\n");
3438 return;
3439 }
3440 /* Count the interrupt call */
3441 bus->sdcnt.intrcount++;
3442 if (in_interrupt())
3443 atomic_set(&bus->ipend, 1);
3444 else
3445 if (brcmf_sdio_intr_rstatus(bus)) {
3446 brcmf_err("failed backplane access\n");
3447 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
3448 }
3449
3450 /* Disable additional interrupts (is this needed now)? */
3451 if (!bus->intr)
3452 brcmf_err("isr w/o interrupt configured!\n");
3453
3454 brcmf_sdbrcm_adddpctsk(bus);
3455 queue_work(bus->brcmf_wq, &bus->datawork);
3456 }
3457
brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio * bus)3458 static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
3459 {
3460 #ifdef DEBUG
3461 struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev);
3462 #endif /* DEBUG */
3463 unsigned long flags;
3464
3465 brcmf_dbg(TIMER, "Enter\n");
3466
3467 /* Poll period: check device if appropriate. */
3468 if (!bus->sr_enabled &&
3469 bus->poll && (++bus->polltick >= bus->pollrate)) {
3470 u32 intstatus = 0;
3471
3472 /* Reset poll tick */
3473 bus->polltick = 0;
3474
3475 /* Check device if no interrupts */
3476 if (!bus->intr ||
3477 (bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) {
3478
3479 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
3480 if (list_empty(&bus->dpc_tsklst)) {
3481 u8 devpend;
3482 spin_unlock_irqrestore(&bus->dpc_tl_lock,
3483 flags);
3484 sdio_claim_host(bus->sdiodev->func[1]);
3485 devpend = brcmf_sdio_regrb(bus->sdiodev,
3486 SDIO_CCCR_INTx,
3487 NULL);
3488 sdio_release_host(bus->sdiodev->func[1]);
3489 intstatus =
3490 devpend & (INTR_STATUS_FUNC1 |
3491 INTR_STATUS_FUNC2);
3492 } else {
3493 spin_unlock_irqrestore(&bus->dpc_tl_lock,
3494 flags);
3495 }
3496
3497 /* If there is something, make like the ISR and
3498 schedule the DPC */
3499 if (intstatus) {
3500 bus->sdcnt.pollcnt++;
3501 atomic_set(&bus->ipend, 1);
3502
3503 brcmf_sdbrcm_adddpctsk(bus);
3504 queue_work(bus->brcmf_wq, &bus->datawork);
3505 }
3506 }
3507
3508 /* Update interrupt tracking */
3509 bus->sdcnt.lastintrs = bus->sdcnt.intrcount;
3510 }
3511 #ifdef DEBUG
3512 /* Poll for console output periodically */
3513 if (bus_if && bus_if->state == BRCMF_BUS_DATA &&
3514 bus->console_interval != 0) {
3515 bus->console.count += BRCMF_WD_POLL_MS;
3516 if (bus->console.count >= bus->console_interval) {
3517 bus->console.count -= bus->console_interval;
3518 sdio_claim_host(bus->sdiodev->func[1]);
3519 /* Make sure backplane clock is on */
3520 brcmf_sdbrcm_bus_sleep(bus, false, false);
3521 if (brcmf_sdbrcm_readconsole(bus) < 0)
3522 /* stop on error */
3523 bus->console_interval = 0;
3524 sdio_release_host(bus->sdiodev->func[1]);
3525 }
3526 }
3527 #endif /* DEBUG */
3528
3529 /* On idle timeout clear activity flag and/or turn off clock */
3530 if ((bus->idletime > 0) && (bus->clkstate == CLK_AVAIL)) {
3531 if (++bus->idlecount >= bus->idletime) {
3532 bus->idlecount = 0;
3533 if (bus->activity) {
3534 bus->activity = false;
3535 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
3536 } else {
3537 brcmf_dbg(SDIO, "idle\n");
3538 sdio_claim_host(bus->sdiodev->func[1]);
3539 brcmf_sdbrcm_bus_sleep(bus, true, false);
3540 sdio_release_host(bus->sdiodev->func[1]);
3541 }
3542 }
3543 }
3544
3545 return (atomic_read(&bus->ipend) > 0);
3546 }
3547
brcmf_sdbrcm_chipmatch(u16 chipid)3548 static bool brcmf_sdbrcm_chipmatch(u16 chipid)
3549 {
3550 if (chipid == BCM43143_CHIP_ID)
3551 return true;
3552 if (chipid == BCM43241_CHIP_ID)
3553 return true;
3554 if (chipid == BCM4329_CHIP_ID)
3555 return true;
3556 if (chipid == BCM4330_CHIP_ID)
3557 return true;
3558 if (chipid == BCM4334_CHIP_ID)
3559 return true;
3560 if (chipid == BCM4335_CHIP_ID)
3561 return true;
3562 return false;
3563 }
3564
brcmf_sdio_dataworker(struct work_struct * work)3565 static void brcmf_sdio_dataworker(struct work_struct *work)
3566 {
3567 struct brcmf_sdio *bus = container_of(work, struct brcmf_sdio,
3568 datawork);
3569 struct list_head *cur_hd, *tmp_hd;
3570 unsigned long flags;
3571
3572 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
3573 list_for_each_safe(cur_hd, tmp_hd, &bus->dpc_tsklst) {
3574 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
3575
3576 brcmf_sdbrcm_dpc(bus);
3577
3578 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
3579 list_del(cur_hd);
3580 kfree(cur_hd);
3581 }
3582 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
3583 }
3584
brcmf_sdbrcm_release_malloc(struct brcmf_sdio * bus)3585 static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus)
3586 {
3587 brcmf_dbg(TRACE, "Enter\n");
3588
3589 kfree(bus->rxbuf);
3590 bus->rxctl = bus->rxbuf = NULL;
3591 bus->rxlen = 0;
3592
3593 kfree(bus->databuf);
3594 bus->databuf = NULL;
3595 }
3596
brcmf_sdbrcm_probe_malloc(struct brcmf_sdio * bus)3597 static bool brcmf_sdbrcm_probe_malloc(struct brcmf_sdio *bus)
3598 {
3599 brcmf_dbg(TRACE, "Enter\n");
3600
3601 if (bus->sdiodev->bus_if->maxctl) {
3602 bus->rxblen =
3603 roundup((bus->sdiodev->bus_if->maxctl + SDPCM_HDRLEN),
3604 ALIGNMENT) + BRCMF_SDALIGN;
3605 bus->rxbuf = kmalloc(bus->rxblen, GFP_ATOMIC);
3606 if (!(bus->rxbuf))
3607 goto fail;
3608 }
3609
3610 /* Allocate buffer to receive glomed packet */
3611 bus->databuf = kmalloc(MAX_DATA_BUF, GFP_ATOMIC);
3612 if (!(bus->databuf)) {
3613 /* release rxbuf which was already located as above */
3614 if (!bus->rxblen)
3615 kfree(bus->rxbuf);
3616 goto fail;
3617 }
3618
3619 /* Align the buffer */
3620 if ((unsigned long)bus->databuf % BRCMF_SDALIGN)
3621 bus->dataptr = bus->databuf + (BRCMF_SDALIGN -
3622 ((unsigned long)bus->databuf % BRCMF_SDALIGN));
3623 else
3624 bus->dataptr = bus->databuf;
3625
3626 return true;
3627
3628 fail:
3629 return false;
3630 }
3631
3632 static bool
brcmf_sdbrcm_probe_attach(struct brcmf_sdio * bus,u32 regsva)3633 brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
3634 {
3635 u8 clkctl = 0;
3636 int err = 0;
3637 int reg_addr;
3638 u32 reg_val;
3639 u32 drivestrength;
3640
3641 bus->alp_only = true;
3642
3643 sdio_claim_host(bus->sdiodev->func[1]);
3644
3645 pr_debug("F1 signature read @0x18000000=0x%4x\n",
3646 brcmf_sdio_regrl(bus->sdiodev, SI_ENUM_BASE, NULL));
3647
3648 /*
3649 * Force PLL off until brcmf_sdio_chip_attach()
3650 * programs PLL control regs
3651 */
3652
3653 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3654 BRCMF_INIT_CLKCTL1, &err);
3655 if (!err)
3656 clkctl = brcmf_sdio_regrb(bus->sdiodev,
3657 SBSDIO_FUNC1_CHIPCLKCSR, &err);
3658
3659 if (err || ((clkctl & ~SBSDIO_AVBITS) != BRCMF_INIT_CLKCTL1)) {
3660 brcmf_err("ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
3661 err, BRCMF_INIT_CLKCTL1, clkctl);
3662 goto fail;
3663 }
3664
3665 if (brcmf_sdio_chip_attach(bus->sdiodev, &bus->ci, regsva)) {
3666 brcmf_err("brcmf_sdio_chip_attach failed!\n");
3667 goto fail;
3668 }
3669
3670 if (!brcmf_sdbrcm_chipmatch((u16) bus->ci->chip)) {
3671 brcmf_err("unsupported chip: 0x%04x\n", bus->ci->chip);
3672 goto fail;
3673 }
3674
3675 if (brcmf_sdbrcm_kso_init(bus)) {
3676 brcmf_err("error enabling KSO\n");
3677 goto fail;
3678 }
3679
3680 if ((bus->sdiodev->pdata) && (bus->sdiodev->pdata->drive_strength))
3681 drivestrength = bus->sdiodev->pdata->drive_strength;
3682 else
3683 drivestrength = DEFAULT_SDIO_DRIVE_STRENGTH;
3684 brcmf_sdio_chip_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength);
3685
3686 /* Get info on the SOCRAM cores... */
3687 bus->ramsize = bus->ci->ramsize;
3688 if (!(bus->ramsize)) {
3689 brcmf_err("failed to find SOCRAM memory!\n");
3690 goto fail;
3691 }
3692
3693 /* Set card control so an SDIO card reset does a WLAN backplane reset */
3694 reg_val = brcmf_sdio_regrb(bus->sdiodev,
3695 SDIO_CCCR_BRCM_CARDCTRL, &err);
3696 if (err)
3697 goto fail;
3698
3699 reg_val |= SDIO_CCCR_BRCM_CARDCTRL_WLANRESET;
3700
3701 brcmf_sdio_regwb(bus->sdiodev,
3702 SDIO_CCCR_BRCM_CARDCTRL, reg_val, &err);
3703 if (err)
3704 goto fail;
3705
3706 /* set PMUControl so a backplane reset does PMU state reload */
3707 reg_addr = CORE_CC_REG(bus->ci->c_inf[0].base,
3708 pmucontrol);
3709 reg_val = brcmf_sdio_regrl(bus->sdiodev,
3710 reg_addr,
3711 &err);
3712 if (err)
3713 goto fail;
3714
3715 reg_val |= (BCMA_CC_PMU_CTL_RES_RELOAD << BCMA_CC_PMU_CTL_RES_SHIFT);
3716
3717 brcmf_sdio_regwl(bus->sdiodev,
3718 reg_addr,
3719 reg_val,
3720 &err);
3721 if (err)
3722 goto fail;
3723
3724
3725 sdio_release_host(bus->sdiodev->func[1]);
3726
3727 brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
3728
3729 /* Locate an appropriately-aligned portion of hdrbuf */
3730 bus->rxhdr = (u8 *) roundup((unsigned long)&bus->hdrbuf[0],
3731 BRCMF_SDALIGN);
3732
3733 /* Set the poll and/or interrupt flags */
3734 bus->intr = true;
3735 bus->poll = false;
3736 if (bus->poll)
3737 bus->pollrate = 1;
3738
3739 return true;
3740
3741 fail:
3742 sdio_release_host(bus->sdiodev->func[1]);
3743 return false;
3744 }
3745
brcmf_sdbrcm_probe_init(struct brcmf_sdio * bus)3746 static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus)
3747 {
3748 brcmf_dbg(TRACE, "Enter\n");
3749
3750 sdio_claim_host(bus->sdiodev->func[1]);
3751
3752 /* Disable F2 to clear any intermediate frame state on the dongle */
3753 brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx,
3754 SDIO_FUNC_ENABLE_1, NULL);
3755
3756 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
3757 bus->rxflow = false;
3758
3759 /* Done with backplane-dependent accesses, can drop clock... */
3760 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
3761
3762 sdio_release_host(bus->sdiodev->func[1]);
3763
3764 /* ...and initialize clock/power states */
3765 bus->clkstate = CLK_SDONLY;
3766 bus->idletime = BRCMF_IDLE_INTERVAL;
3767 bus->idleclock = BRCMF_IDLE_ACTIVE;
3768
3769 /* Query the F2 block size, set roundup accordingly */
3770 bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
3771 bus->roundup = min(max_roundup, bus->blocksize);
3772
3773 /* bus module does not support packet chaining */
3774 bus->use_rxchain = false;
3775 bus->sd_rxchain = false;
3776
3777 /* SR state */
3778 bus->sleeping = false;
3779 bus->sr_enabled = false;
3780
3781 return true;
3782 }
3783
3784 static int
brcmf_sdbrcm_watchdog_thread(void * data)3785 brcmf_sdbrcm_watchdog_thread(void *data)
3786 {
3787 struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
3788
3789 allow_signal(SIGTERM);
3790 /* Run until signal received */
3791 while (1) {
3792 if (kthread_should_stop())
3793 break;
3794 if (!wait_for_completion_interruptible(&bus->watchdog_wait)) {
3795 brcmf_sdbrcm_bus_watchdog(bus);
3796 /* Count the tick for reference */
3797 bus->sdcnt.tickcnt++;
3798 } else
3799 break;
3800 }
3801 return 0;
3802 }
3803
3804 static void
brcmf_sdbrcm_watchdog(unsigned long data)3805 brcmf_sdbrcm_watchdog(unsigned long data)
3806 {
3807 struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
3808
3809 if (bus->watchdog_tsk) {
3810 complete(&bus->watchdog_wait);
3811 /* Reschedule the watchdog */
3812 if (bus->wd_timer_valid)
3813 mod_timer(&bus->timer,
3814 jiffies + BRCMF_WD_POLL_MS * HZ / 1000);
3815 }
3816 }
3817
brcmf_sdbrcm_release_dongle(struct brcmf_sdio * bus)3818 static void brcmf_sdbrcm_release_dongle(struct brcmf_sdio *bus)
3819 {
3820 brcmf_dbg(TRACE, "Enter\n");
3821
3822 if (bus->ci) {
3823 sdio_claim_host(bus->sdiodev->func[1]);
3824 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
3825 brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
3826 sdio_release_host(bus->sdiodev->func[1]);
3827 brcmf_sdio_chip_detach(&bus->ci);
3828 if (bus->vars && bus->varsz)
3829 kfree(bus->vars);
3830 bus->vars = NULL;
3831 }
3832
3833 brcmf_dbg(TRACE, "Disconnected\n");
3834 }
3835
3836 /* Detach and free everything */
brcmf_sdbrcm_release(struct brcmf_sdio * bus)3837 static void brcmf_sdbrcm_release(struct brcmf_sdio *bus)
3838 {
3839 brcmf_dbg(TRACE, "Enter\n");
3840
3841 if (bus) {
3842 /* De-register interrupt handler */
3843 brcmf_sdio_intr_unregister(bus->sdiodev);
3844
3845 cancel_work_sync(&bus->datawork);
3846 if (bus->brcmf_wq)
3847 destroy_workqueue(bus->brcmf_wq);
3848
3849 if (bus->sdiodev->bus_if->drvr) {
3850 brcmf_detach(bus->sdiodev->dev);
3851 brcmf_sdbrcm_release_dongle(bus);
3852 }
3853
3854 brcmf_sdbrcm_release_malloc(bus);
3855
3856 kfree(bus);
3857 }
3858
3859 brcmf_dbg(TRACE, "Disconnected\n");
3860 }
3861
3862 static struct brcmf_bus_ops brcmf_sdio_bus_ops = {
3863 .stop = brcmf_sdbrcm_bus_stop,
3864 .init = brcmf_sdbrcm_bus_init,
3865 .txdata = brcmf_sdbrcm_bus_txdata,
3866 .txctl = brcmf_sdbrcm_bus_txctl,
3867 .rxctl = brcmf_sdbrcm_bus_rxctl,
3868 .gettxq = brcmf_sdbrcm_bus_gettxq,
3869 };
3870
brcmf_sdbrcm_probe(u32 regsva,struct brcmf_sdio_dev * sdiodev)3871 void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
3872 {
3873 int ret;
3874 struct brcmf_sdio *bus;
3875 struct brcmf_bus_dcmd *dlst;
3876 u32 dngl_txglom;
3877 u32 dngl_txglomalign;
3878 u8 idx;
3879
3880 brcmf_dbg(TRACE, "Enter\n");
3881
3882 /* We make an assumption about address window mappings:
3883 * regsva == SI_ENUM_BASE*/
3884
3885 /* Allocate private bus interface state */
3886 bus = kzalloc(sizeof(struct brcmf_sdio), GFP_ATOMIC);
3887 if (!bus)
3888 goto fail;
3889
3890 bus->sdiodev = sdiodev;
3891 sdiodev->bus = bus;
3892 skb_queue_head_init(&bus->glom);
3893 bus->txbound = BRCMF_TXBOUND;
3894 bus->rxbound = BRCMF_RXBOUND;
3895 bus->txminmax = BRCMF_TXMINMAX;
3896 bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1;
3897
3898 INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
3899 bus->brcmf_wq = create_singlethread_workqueue("brcmf_wq");
3900 if (bus->brcmf_wq == NULL) {
3901 brcmf_err("insufficient memory to create txworkqueue\n");
3902 goto fail;
3903 }
3904
3905 /* attempt to attach to the dongle */
3906 if (!(brcmf_sdbrcm_probe_attach(bus, regsva))) {
3907 brcmf_err("brcmf_sdbrcm_probe_attach failed\n");
3908 goto fail;
3909 }
3910
3911 spin_lock_init(&bus->rxctl_lock);
3912 spin_lock_init(&bus->txqlock);
3913 init_waitqueue_head(&bus->ctrl_wait);
3914 init_waitqueue_head(&bus->dcmd_resp_wait);
3915
3916 /* Set up the watchdog timer */
3917 init_timer(&bus->timer);
3918 bus->timer.data = (unsigned long)bus;
3919 bus->timer.function = brcmf_sdbrcm_watchdog;
3920
3921 /* Initialize watchdog thread */
3922 init_completion(&bus->watchdog_wait);
3923 bus->watchdog_tsk = kthread_run(brcmf_sdbrcm_watchdog_thread,
3924 bus, "brcmf_watchdog");
3925 if (IS_ERR(bus->watchdog_tsk)) {
3926 pr_warn("brcmf_watchdog thread failed to start\n");
3927 bus->watchdog_tsk = NULL;
3928 }
3929 /* Initialize DPC thread */
3930 INIT_LIST_HEAD(&bus->dpc_tsklst);
3931 spin_lock_init(&bus->dpc_tl_lock);
3932
3933 /* Assign bus interface call back */
3934 bus->sdiodev->bus_if->dev = bus->sdiodev->dev;
3935 bus->sdiodev->bus_if->ops = &brcmf_sdio_bus_ops;
3936 bus->sdiodev->bus_if->chip = bus->ci->chip;
3937 bus->sdiodev->bus_if->chiprev = bus->ci->chiprev;
3938
3939 /* Attach to the brcmf/OS/network interface */
3940 ret = brcmf_attach(SDPCM_RESERVE, bus->sdiodev->dev);
3941 if (ret != 0) {
3942 brcmf_err("brcmf_attach failed\n");
3943 goto fail;
3944 }
3945
3946 /* Allocate buffers */
3947 if (!(brcmf_sdbrcm_probe_malloc(bus))) {
3948 brcmf_err("brcmf_sdbrcm_probe_malloc failed\n");
3949 goto fail;
3950 }
3951
3952 if (!(brcmf_sdbrcm_probe_init(bus))) {
3953 brcmf_err("brcmf_sdbrcm_probe_init failed\n");
3954 goto fail;
3955 }
3956
3957 brcmf_sdio_debugfs_create(bus);
3958 brcmf_dbg(INFO, "completed!!\n");
3959
3960 /* sdio bus core specific dcmd */
3961 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
3962 dlst = kzalloc(sizeof(struct brcmf_bus_dcmd), GFP_KERNEL);
3963 if (dlst) {
3964 if (bus->ci->c_inf[idx].rev < 12) {
3965 /* for sdio core rev < 12, disable txgloming */
3966 dngl_txglom = 0;
3967 dlst->name = "bus:txglom";
3968 dlst->param = (char *)&dngl_txglom;
3969 dlst->param_len = sizeof(u32);
3970 } else {
3971 /* otherwise, set txglomalign */
3972 dngl_txglomalign = bus->sdiodev->bus_if->align;
3973 dlst->name = "bus:txglomalign";
3974 dlst->param = (char *)&dngl_txglomalign;
3975 dlst->param_len = sizeof(u32);
3976 }
3977 list_add(&dlst->list, &bus->sdiodev->bus_if->dcmd_list);
3978 }
3979
3980 /* if firmware path present try to download and bring up bus */
3981 ret = brcmf_bus_start(bus->sdiodev->dev);
3982 if (ret != 0) {
3983 brcmf_err("dongle is not responding\n");
3984 goto fail;
3985 }
3986
3987 return bus;
3988
3989 fail:
3990 brcmf_sdbrcm_release(bus);
3991 return NULL;
3992 }
3993
brcmf_sdbrcm_disconnect(void * ptr)3994 void brcmf_sdbrcm_disconnect(void *ptr)
3995 {
3996 struct brcmf_sdio *bus = (struct brcmf_sdio *)ptr;
3997
3998 brcmf_dbg(TRACE, "Enter\n");
3999
4000 if (bus)
4001 brcmf_sdbrcm_release(bus);
4002
4003 brcmf_dbg(TRACE, "Disconnected\n");
4004 }
4005
4006 void
brcmf_sdbrcm_wd_timer(struct brcmf_sdio * bus,uint wdtick)4007 brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick)
4008 {
4009 /* Totally stop the timer */
4010 if (!wdtick && bus->wd_timer_valid) {
4011 del_timer_sync(&bus->timer);
4012 bus->wd_timer_valid = false;
4013 bus->save_ms = wdtick;
4014 return;
4015 }
4016
4017 /* don't start the wd until fw is loaded */
4018 if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN)
4019 return;
4020
4021 if (wdtick) {
4022 if (bus->save_ms != BRCMF_WD_POLL_MS) {
4023 if (bus->wd_timer_valid)
4024 /* Stop timer and restart at new value */
4025 del_timer_sync(&bus->timer);
4026
4027 /* Create timer again when watchdog period is
4028 dynamically changed or in the first instance
4029 */
4030 bus->timer.expires =
4031 jiffies + BRCMF_WD_POLL_MS * HZ / 1000;
4032 add_timer(&bus->timer);
4033
4034 } else {
4035 /* Re arm the timer, at last watchdog period */
4036 mod_timer(&bus->timer,
4037 jiffies + BRCMF_WD_POLL_MS * HZ / 1000);
4038 }
4039
4040 bus->wd_timer_valid = true;
4041 bus->save_ms = wdtick;
4042 }
4043 }
4044