1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (c) 2010 Broadcom Corporation
4 */
5 /* ****************** SDIO CARD Interface Functions **************************/
6
7 #include <linux/types.h>
8 #include <linux/netdevice.h>
9 #include <linux/pci.h>
10 #include <linux/pci_ids.h>
11 #include <linux/sched.h>
12 #include <linux/completion.h>
13 #include <linux/interrupt.h>
14 #include <linux/scatterlist.h>
15 #include <linux/mmc/sdio.h>
16 #include <linux/mmc/core.h>
17 #include <linux/mmc/sdio_func.h>
18 #include <linux/mmc/card.h>
19 #include <linux/mmc/host.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/suspend.h>
22 #include <linux/errno.h>
23 #include <linux/module.h>
24 #include <linux/acpi.h>
25 #include <net/cfg80211.h>
26
27 #include <defs.h>
28 #include <brcm_hw_ids.h>
29 #include <brcmu_utils.h>
30 #include <brcmu_wifi.h>
31 #include <chipcommon.h>
32 #include <soc.h>
33 #include "chip.h"
34 #include "bus.h"
35 #include "debug.h"
36 #include "sdio.h"
37 #include "core.h"
38 #include "common.h"
39
40 #define SDIOH_API_ACCESS_RETRY_LIMIT 2
41
42 #define DMA_ALIGN_MASK 0x03
43
44 #define SDIO_FUNC1_BLOCKSIZE 64
45 #define SDIO_FUNC2_BLOCKSIZE 512
46 #define SDIO_4373_FUNC2_BLOCKSIZE 256
47 #define SDIO_435X_FUNC2_BLOCKSIZE 256
48 #define SDIO_4329_FUNC2_BLOCKSIZE 128
49 /* Maximum milliseconds to wait for F2 to come up */
50 #define SDIO_WAIT_F2RDY 3000
51
52 #define BRCMF_DEFAULT_RXGLOM_SIZE 32 /* max rx frames in glom chain */
53
54 struct brcmf_sdiod_freezer {
55 atomic_t freezing;
56 atomic_t thread_count;
57 u32 frozen_count;
58 wait_queue_head_t thread_freeze;
59 struct completion resumed;
60 };
61
brcmf_sdiod_oob_irqhandler(int irq,void * dev_id)62 static irqreturn_t brcmf_sdiod_oob_irqhandler(int irq, void *dev_id)
63 {
64 struct brcmf_bus *bus_if = dev_get_drvdata(dev_id);
65 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
66
67 brcmf_dbg(INTR, "OOB intr triggered\n");
68
69 /* out-of-band interrupt is level-triggered which won't
70 * be cleared until dpc
71 */
72 if (sdiodev->irq_en) {
73 disable_irq_nosync(irq);
74 sdiodev->irq_en = false;
75 }
76
77 brcmf_sdio_isr(sdiodev->bus, true);
78
79 return IRQ_HANDLED;
80 }
81
brcmf_sdiod_ib_irqhandler(struct sdio_func * func)82 static void brcmf_sdiod_ib_irqhandler(struct sdio_func *func)
83 {
84 struct brcmf_bus *bus_if = dev_get_drvdata(&func->dev);
85 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
86
87 brcmf_dbg(INTR, "IB intr triggered\n");
88
89 brcmf_sdio_isr(sdiodev->bus, false);
90 }
91
92 /* dummy handler for SDIO function 2 interrupt */
brcmf_sdiod_dummy_irqhandler(struct sdio_func * func)93 static void brcmf_sdiod_dummy_irqhandler(struct sdio_func *func)
94 {
95 }
96
brcmf_sdiod_intr_register(struct brcmf_sdio_dev * sdiodev)97 int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
98 {
99 struct brcmfmac_sdio_pd *pdata;
100 int ret = 0;
101 u8 data;
102 u32 addr, gpiocontrol;
103
104 pdata = &sdiodev->settings->bus.sdio;
105 if (pdata->oob_irq_supported) {
106 brcmf_dbg(SDIO, "Enter, register OOB IRQ %d\n",
107 pdata->oob_irq_nr);
108 spin_lock_init(&sdiodev->irq_en_lock);
109 sdiodev->irq_en = true;
110
111 ret = request_irq(pdata->oob_irq_nr, brcmf_sdiod_oob_irqhandler,
112 pdata->oob_irq_flags, "brcmf_oob_intr",
113 &sdiodev->func1->dev);
114 if (ret != 0) {
115 brcmf_err("request_irq failed %d\n", ret);
116 return ret;
117 }
118 sdiodev->oob_irq_requested = true;
119
120 ret = enable_irq_wake(pdata->oob_irq_nr);
121 if (ret != 0) {
122 brcmf_err("enable_irq_wake failed %d\n", ret);
123 return ret;
124 }
125 disable_irq_wake(pdata->oob_irq_nr);
126
127 sdio_claim_host(sdiodev->func1);
128
129 if (sdiodev->bus_if->chip == BRCM_CC_43362_CHIP_ID) {
130 /* assign GPIO to SDIO core */
131 addr = brcmf_chip_enum_base(sdiodev->func1->device);
132 addr = CORE_CC_REG(addr, gpiocontrol);
133 gpiocontrol = brcmf_sdiod_readl(sdiodev, addr, &ret);
134 gpiocontrol |= 0x2;
135 brcmf_sdiod_writel(sdiodev, addr, gpiocontrol, &ret);
136
137 brcmf_sdiod_writeb(sdiodev, SBSDIO_GPIO_SELECT,
138 0xf, &ret);
139 brcmf_sdiod_writeb(sdiodev, SBSDIO_GPIO_OUT, 0, &ret);
140 brcmf_sdiod_writeb(sdiodev, SBSDIO_GPIO_EN, 0x2, &ret);
141 }
142
143 /* must configure SDIO_CCCR_IENx to enable irq */
144 data = brcmf_sdiod_func0_rb(sdiodev, SDIO_CCCR_IENx, &ret);
145 data |= SDIO_CCCR_IEN_FUNC1 | SDIO_CCCR_IEN_FUNC2 |
146 SDIO_CCCR_IEN_FUNC0;
147 brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_IENx, data, &ret);
148
149 /* redirect, configure and enable io for interrupt signal */
150 data = SDIO_CCCR_BRCM_SEPINT_MASK | SDIO_CCCR_BRCM_SEPINT_OE;
151 if (pdata->oob_irq_flags & IRQF_TRIGGER_HIGH)
152 data |= SDIO_CCCR_BRCM_SEPINT_ACT_HI;
153 brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_BRCM_SEPINT,
154 data, &ret);
155 sdio_release_host(sdiodev->func1);
156 } else {
157 brcmf_dbg(SDIO, "Entering\n");
158 sdio_claim_host(sdiodev->func1);
159 sdio_claim_irq(sdiodev->func1, brcmf_sdiod_ib_irqhandler);
160 sdio_claim_irq(sdiodev->func2, brcmf_sdiod_dummy_irqhandler);
161 sdio_release_host(sdiodev->func1);
162 sdiodev->sd_irq_requested = true;
163 }
164
165 return 0;
166 }
167
brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev * sdiodev)168 void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
169 {
170
171 brcmf_dbg(SDIO, "Entering oob=%d sd=%d\n",
172 sdiodev->oob_irq_requested,
173 sdiodev->sd_irq_requested);
174
175 if (sdiodev->oob_irq_requested) {
176 struct brcmfmac_sdio_pd *pdata;
177
178 pdata = &sdiodev->settings->bus.sdio;
179 sdio_claim_host(sdiodev->func1);
180 brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
181 brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
182 sdio_release_host(sdiodev->func1);
183
184 sdiodev->oob_irq_requested = false;
185 free_irq(pdata->oob_irq_nr, &sdiodev->func1->dev);
186 sdiodev->irq_en = false;
187 sdiodev->oob_irq_requested = false;
188 }
189
190 if (sdiodev->sd_irq_requested) {
191 sdio_claim_host(sdiodev->func1);
192 sdio_release_irq(sdiodev->func2);
193 sdio_release_irq(sdiodev->func1);
194 sdio_release_host(sdiodev->func1);
195 sdiodev->sd_irq_requested = false;
196 }
197 }
198
brcmf_sdiod_change_state(struct brcmf_sdio_dev * sdiodev,enum brcmf_sdiod_state state)199 void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
200 enum brcmf_sdiod_state state)
201 {
202 if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM ||
203 state == sdiodev->state)
204 return;
205
206 brcmf_dbg(TRACE, "%d -> %d\n", sdiodev->state, state);
207 switch (sdiodev->state) {
208 case BRCMF_SDIOD_DATA:
209 /* any other state means bus interface is down */
210 brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN);
211 break;
212 case BRCMF_SDIOD_DOWN:
213 /* transition from DOWN to DATA means bus interface is up */
214 if (state == BRCMF_SDIOD_DATA)
215 brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_UP);
216 break;
217 default:
218 break;
219 }
220 sdiodev->state = state;
221 }
222
brcmf_sdiod_set_backplane_window(struct brcmf_sdio_dev * sdiodev,u32 addr)223 static int brcmf_sdiod_set_backplane_window(struct brcmf_sdio_dev *sdiodev,
224 u32 addr)
225 {
226 u32 v, bar0 = addr & SBSDIO_SBWINDOW_MASK;
227 int err = 0, i;
228
229 if (bar0 == sdiodev->sbwad)
230 return 0;
231
232 v = bar0 >> 8;
233
234 for (i = 0 ; i < 3 && !err ; i++, v >>= 8)
235 brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_SBADDRLOW + i,
236 v & 0xff, &err);
237
238 if (!err)
239 sdiodev->sbwad = bar0;
240
241 return err;
242 }
243
brcmf_sdiod_readl(struct brcmf_sdio_dev * sdiodev,u32 addr,int * ret)244 u32 brcmf_sdiod_readl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
245 {
246 u32 data = 0;
247 int retval;
248
249 retval = brcmf_sdiod_set_backplane_window(sdiodev, addr);
250 if (retval)
251 goto out;
252
253 addr &= SBSDIO_SB_OFT_ADDR_MASK;
254 addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
255
256 data = sdio_readl(sdiodev->func1, addr, &retval);
257
258 out:
259 if (ret)
260 *ret = retval;
261
262 return data;
263 }
264
brcmf_sdiod_writel(struct brcmf_sdio_dev * sdiodev,u32 addr,u32 data,int * ret)265 void brcmf_sdiod_writel(struct brcmf_sdio_dev *sdiodev, u32 addr,
266 u32 data, int *ret)
267 {
268 int retval;
269
270 retval = brcmf_sdiod_set_backplane_window(sdiodev, addr);
271 if (retval)
272 goto out;
273
274 addr &= SBSDIO_SB_OFT_ADDR_MASK;
275 addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
276
277 sdio_writel(sdiodev->func1, data, addr, &retval);
278
279 out:
280 if (ret)
281 *ret = retval;
282 }
283
brcmf_sdiod_skbuff_read(struct brcmf_sdio_dev * sdiodev,struct sdio_func * func,u32 addr,struct sk_buff * skb)284 static int brcmf_sdiod_skbuff_read(struct brcmf_sdio_dev *sdiodev,
285 struct sdio_func *func, u32 addr,
286 struct sk_buff *skb)
287 {
288 unsigned int req_sz;
289 int err;
290
291 /* Single skb use the standard mmc interface */
292 req_sz = skb->len + 3;
293 req_sz &= (uint)~3;
294
295 switch (func->num) {
296 case 1:
297 err = sdio_memcpy_fromio(func, ((u8 *)(skb->data)), addr,
298 req_sz);
299 break;
300 case 2:
301 err = sdio_readsb(func, ((u8 *)(skb->data)), addr, req_sz);
302 break;
303 default:
304 /* bail out as things are really fishy here */
305 WARN(1, "invalid sdio function number: %d\n", func->num);
306 err = -ENOMEDIUM;
307 }
308
309 if (err == -ENOMEDIUM)
310 brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
311
312 return err;
313 }
314
brcmf_sdiod_skbuff_write(struct brcmf_sdio_dev * sdiodev,struct sdio_func * func,u32 addr,struct sk_buff * skb)315 static int brcmf_sdiod_skbuff_write(struct brcmf_sdio_dev *sdiodev,
316 struct sdio_func *func, u32 addr,
317 struct sk_buff *skb)
318 {
319 unsigned int req_sz;
320 int err;
321
322 /* Single skb use the standard mmc interface */
323 req_sz = skb->len + 3;
324 req_sz &= (uint)~3;
325
326 err = sdio_memcpy_toio(func, addr, ((u8 *)(skb->data)), req_sz);
327
328 if (err == -ENOMEDIUM)
329 brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
330
331 return err;
332 }
333
mmc_submit_one(struct mmc_data * md,struct mmc_request * mr,struct mmc_command * mc,int sg_cnt,int req_sz,int func_blk_sz,u32 * addr,struct brcmf_sdio_dev * sdiodev,struct sdio_func * func,int write)334 static int mmc_submit_one(struct mmc_data *md, struct mmc_request *mr,
335 struct mmc_command *mc, int sg_cnt, int req_sz,
336 int func_blk_sz, u32 *addr,
337 struct brcmf_sdio_dev *sdiodev,
338 struct sdio_func *func, int write)
339 {
340 int ret;
341
342 md->sg_len = sg_cnt;
343 md->blocks = req_sz / func_blk_sz;
344 mc->arg |= (*addr & 0x1FFFF) << 9; /* address */
345 mc->arg |= md->blocks & 0x1FF; /* block count */
346 /* incrementing addr for function 1 */
347 if (func->num == 1)
348 *addr += req_sz;
349
350 mmc_set_data_timeout(md, func->card);
351 mmc_wait_for_req(func->card->host, mr);
352
353 ret = mc->error ? mc->error : md->error;
354 if (ret == -ENOMEDIUM) {
355 brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
356 } else if (ret != 0) {
357 brcmf_err("CMD53 sg block %s failed %d\n",
358 write ? "write" : "read", ret);
359 ret = -EIO;
360 }
361
362 return ret;
363 }
364
365 /**
366 * brcmf_sdiod_sglist_rw - SDIO interface function for block data access
367 * @sdiodev: brcmfmac sdio device
368 * @func: SDIO function
369 * @write: direction flag
370 * @addr: dongle memory address as source/destination
371 * @pktlist: skb buffer head pointer
372 *
373 * This function takes the respbonsibility as the interface function to MMC
374 * stack for block data access. It assumes that the skb passed down by the
375 * caller has already been padded and aligned.
376 */
brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev * sdiodev,struct sdio_func * func,bool write,u32 addr,struct sk_buff_head * pktlist)377 static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
378 struct sdio_func *func,
379 bool write, u32 addr,
380 struct sk_buff_head *pktlist)
381 {
382 unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
383 unsigned int max_req_sz, src_offset, dst_offset;
384 unsigned char *pkt_data, *orig_data, *dst_data;
385 struct sk_buff_head local_list, *target_list;
386 struct sk_buff *pkt_next = NULL, *src;
387 unsigned short max_seg_cnt;
388 struct mmc_request mmc_req;
389 struct mmc_command mmc_cmd;
390 struct mmc_data mmc_dat;
391 struct scatterlist *sgl;
392 int ret = 0;
393
394 if (!pktlist->qlen)
395 return -EINVAL;
396
397 target_list = pktlist;
398 /* for host with broken sg support, prepare a page aligned list */
399 __skb_queue_head_init(&local_list);
400 if (!write && sdiodev->settings->bus.sdio.broken_sg_support) {
401 req_sz = 0;
402 skb_queue_walk(pktlist, pkt_next)
403 req_sz += pkt_next->len;
404 req_sz = ALIGN(req_sz, func->cur_blksize);
405 while (req_sz > PAGE_SIZE) {
406 pkt_next = brcmu_pkt_buf_get_skb(PAGE_SIZE);
407 if (pkt_next == NULL) {
408 ret = -ENOMEM;
409 goto exit;
410 }
411 __skb_queue_tail(&local_list, pkt_next);
412 req_sz -= PAGE_SIZE;
413 }
414 pkt_next = brcmu_pkt_buf_get_skb(req_sz);
415 if (pkt_next == NULL) {
416 ret = -ENOMEM;
417 goto exit;
418 }
419 __skb_queue_tail(&local_list, pkt_next);
420 target_list = &local_list;
421 }
422
423 func_blk_sz = func->cur_blksize;
424 max_req_sz = sdiodev->max_request_size;
425 max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count,
426 target_list->qlen);
427
428 memset(&mmc_req, 0, sizeof(struct mmc_request));
429 memset(&mmc_cmd, 0, sizeof(struct mmc_command));
430 memset(&mmc_dat, 0, sizeof(struct mmc_data));
431
432 mmc_dat.sg = sdiodev->sgtable.sgl;
433 mmc_dat.blksz = func_blk_sz;
434 mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
435 mmc_cmd.opcode = SD_IO_RW_EXTENDED;
436 mmc_cmd.arg = write ? 1<<31 : 0; /* write flag */
437 mmc_cmd.arg |= (func->num & 0x7) << 28; /* SDIO func num */
438 mmc_cmd.arg |= 1 << 27; /* block mode */
439 /* for function 1 the addr will be incremented */
440 mmc_cmd.arg |= (func->num == 1) ? 1 << 26 : 0;
441 mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
442 mmc_req.cmd = &mmc_cmd;
443 mmc_req.data = &mmc_dat;
444
445 req_sz = 0;
446 sg_cnt = 0;
447 sgl = sdiodev->sgtable.sgl;
448 skb_queue_walk(target_list, pkt_next) {
449 pkt_offset = 0;
450 while (pkt_offset < pkt_next->len) {
451 pkt_data = pkt_next->data + pkt_offset;
452 sg_data_sz = pkt_next->len - pkt_offset;
453 if (sg_data_sz > sdiodev->max_segment_size)
454 sg_data_sz = sdiodev->max_segment_size;
455 if (sg_data_sz > max_req_sz - req_sz)
456 sg_data_sz = max_req_sz - req_sz;
457
458 sg_set_buf(sgl, pkt_data, sg_data_sz);
459 sg_cnt++;
460
461 sgl = sg_next(sgl);
462 req_sz += sg_data_sz;
463 pkt_offset += sg_data_sz;
464 if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt) {
465 ret = mmc_submit_one(&mmc_dat, &mmc_req, &mmc_cmd,
466 sg_cnt, req_sz, func_blk_sz,
467 &addr, sdiodev, func, write);
468 if (ret)
469 goto exit_queue_walk;
470 req_sz = 0;
471 sg_cnt = 0;
472 sgl = sdiodev->sgtable.sgl;
473 }
474 }
475 }
476 if (sg_cnt)
477 ret = mmc_submit_one(&mmc_dat, &mmc_req, &mmc_cmd,
478 sg_cnt, req_sz, func_blk_sz,
479 &addr, sdiodev, func, write);
480 exit_queue_walk:
481 if (!write && sdiodev->settings->bus.sdio.broken_sg_support) {
482 src = __skb_peek(&local_list);
483 src_offset = 0;
484 skb_queue_walk(pktlist, pkt_next) {
485 dst_offset = 0;
486
487 /* This is safe because we must have enough SKB data
488 * in the local list to cover everything in pktlist.
489 */
490 while (1) {
491 req_sz = pkt_next->len - dst_offset;
492 if (req_sz > src->len - src_offset)
493 req_sz = src->len - src_offset;
494
495 orig_data = src->data + src_offset;
496 dst_data = pkt_next->data + dst_offset;
497 memcpy(dst_data, orig_data, req_sz);
498
499 src_offset += req_sz;
500 if (src_offset == src->len) {
501 src_offset = 0;
502 src = skb_peek_next(src, &local_list);
503 }
504 dst_offset += req_sz;
505 if (dst_offset == pkt_next->len)
506 break;
507 }
508 }
509 }
510
511 exit:
512 sg_init_table(sdiodev->sgtable.sgl, sdiodev->sgtable.orig_nents);
513 while ((pkt_next = __skb_dequeue(&local_list)) != NULL)
514 brcmu_pkt_buf_free_skb(pkt_next);
515
516 return ret;
517 }
518
brcmf_sdiod_recv_buf(struct brcmf_sdio_dev * sdiodev,u8 * buf,uint nbytes)519 int brcmf_sdiod_recv_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
520 {
521 struct sk_buff *mypkt;
522 int err;
523
524 mypkt = brcmu_pkt_buf_get_skb(nbytes);
525 if (!mypkt) {
526 brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
527 nbytes);
528 return -EIO;
529 }
530
531 err = brcmf_sdiod_recv_pkt(sdiodev, mypkt);
532 if (!err)
533 memcpy(buf, mypkt->data, nbytes);
534
535 brcmu_pkt_buf_free_skb(mypkt);
536 return err;
537 }
538
brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev * sdiodev,struct sk_buff * pkt)539 int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt)
540 {
541 u32 addr = sdiodev->cc_core->base;
542 int err = 0;
543
544 brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pkt->len);
545
546 err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
547 if (err)
548 goto done;
549
550 addr &= SBSDIO_SB_OFT_ADDR_MASK;
551 addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
552
553 err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr, pkt);
554
555 done:
556 return err;
557 }
558
brcmf_sdiod_recv_chain(struct brcmf_sdio_dev * sdiodev,struct sk_buff_head * pktq,uint totlen)559 int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
560 struct sk_buff_head *pktq, uint totlen)
561 {
562 struct sk_buff *glom_skb = NULL;
563 struct sk_buff *skb;
564 u32 addr = sdiodev->cc_core->base;
565 int err = 0;
566
567 brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n",
568 addr, pktq->qlen);
569
570 err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
571 if (err)
572 goto done;
573
574 addr &= SBSDIO_SB_OFT_ADDR_MASK;
575 addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
576
577 if (pktq->qlen == 1)
578 err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr,
579 __skb_peek(pktq));
580 else if (!sdiodev->sg_support) {
581 glom_skb = brcmu_pkt_buf_get_skb(totlen);
582 if (!glom_skb)
583 return -ENOMEM;
584 err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr,
585 glom_skb);
586 if (err)
587 goto done;
588
589 skb_queue_walk(pktq, skb) {
590 memcpy(skb->data, glom_skb->data, skb->len);
591 skb_pull(glom_skb, skb->len);
592 }
593 } else
594 err = brcmf_sdiod_sglist_rw(sdiodev, sdiodev->func2, false,
595 addr, pktq);
596
597 done:
598 brcmu_pkt_buf_free_skb(glom_skb);
599 return err;
600 }
601
brcmf_sdiod_send_buf(struct brcmf_sdio_dev * sdiodev,u8 * buf,uint nbytes)602 int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
603 {
604 struct sk_buff *mypkt;
605 u32 addr = sdiodev->cc_core->base;
606 int err;
607
608 mypkt = brcmu_pkt_buf_get_skb(nbytes);
609
610 if (!mypkt) {
611 brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
612 nbytes);
613 return -EIO;
614 }
615
616 memcpy(mypkt->data, buf, nbytes);
617
618 err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
619 if (err)
620 goto out;
621
622 addr &= SBSDIO_SB_OFT_ADDR_MASK;
623 addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
624
625 err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func2, addr, mypkt);
626 out:
627 brcmu_pkt_buf_free_skb(mypkt);
628
629 return err;
630 }
631
brcmf_sdiod_send_pkt(struct brcmf_sdio_dev * sdiodev,struct sk_buff_head * pktq)632 int brcmf_sdiod_send_pkt(struct brcmf_sdio_dev *sdiodev,
633 struct sk_buff_head *pktq)
634 {
635 struct sk_buff *skb;
636 u32 addr = sdiodev->cc_core->base;
637 int err;
638
639 brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pktq->qlen);
640
641 err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
642 if (err)
643 return err;
644
645 addr &= SBSDIO_SB_OFT_ADDR_MASK;
646 addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
647
648 if (pktq->qlen == 1 || !sdiodev->sg_support) {
649 skb_queue_walk(pktq, skb) {
650 err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func2,
651 addr, skb);
652 if (err)
653 break;
654 }
655 } else {
656 err = brcmf_sdiod_sglist_rw(sdiodev, sdiodev->func2, true,
657 addr, pktq);
658 }
659
660 return err;
661 }
662
663 int
brcmf_sdiod_ramrw(struct brcmf_sdio_dev * sdiodev,bool write,u32 address,u8 * data,uint size)664 brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
665 u8 *data, uint size)
666 {
667 int err = 0;
668 struct sk_buff *pkt;
669 u32 sdaddr;
670 uint dsize;
671
672 dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
673 pkt = dev_alloc_skb(dsize);
674 if (!pkt) {
675 brcmf_err("dev_alloc_skb failed: len %d\n", dsize);
676 return -EIO;
677 }
678 pkt->priority = 0;
679
680 /* Determine initial transfer parameters */
681 sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
682 if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK)
683 dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr);
684 else
685 dsize = size;
686
687 sdio_claim_host(sdiodev->func1);
688
689 /* Do the transfer(s) */
690 while (size) {
691 /* Set the backplane window to include the start address */
692 err = brcmf_sdiod_set_backplane_window(sdiodev, address);
693 if (err)
694 break;
695
696 brcmf_dbg(SDIO, "%s %d bytes at offset 0x%08x in window 0x%08x\n",
697 write ? "write" : "read", dsize,
698 sdaddr, address & SBSDIO_SBWINDOW_MASK);
699
700 sdaddr &= SBSDIO_SB_OFT_ADDR_MASK;
701 sdaddr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
702
703 skb_put(pkt, dsize);
704
705 if (write) {
706 memcpy(pkt->data, data, dsize);
707 err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func1,
708 sdaddr, pkt);
709 } else {
710 err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func1,
711 sdaddr, pkt);
712 }
713
714 if (err) {
715 brcmf_err("membytes transfer failed\n");
716 break;
717 }
718 if (!write)
719 memcpy(data, pkt->data, dsize);
720 skb_trim(pkt, 0);
721
722 /* Adjust for next transfer (if any) */
723 size -= dsize;
724 if (size) {
725 data += dsize;
726 address += dsize;
727 sdaddr = 0;
728 dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
729 }
730 }
731
732 dev_kfree_skb(pkt);
733
734 sdio_release_host(sdiodev->func1);
735
736 return err;
737 }
738
brcmf_sdiod_abort(struct brcmf_sdio_dev * sdiodev,struct sdio_func * func)739 int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, struct sdio_func *func)
740 {
741 brcmf_dbg(SDIO, "Enter\n");
742
743 /* Issue abort cmd52 command through F0 */
744 brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_ABORT, func->num, NULL);
745
746 brcmf_dbg(SDIO, "Exit\n");
747 return 0;
748 }
749
brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev * sdiodev)750 void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
751 {
752 struct sdio_func *func;
753 struct mmc_host *host;
754 uint max_blocks;
755 uint nents;
756 int err;
757
758 func = sdiodev->func2;
759 host = func->card->host;
760 sdiodev->sg_support = host->max_segs > 1;
761 max_blocks = min_t(uint, host->max_blk_count, 511u);
762 sdiodev->max_request_size = min_t(uint, host->max_req_size,
763 max_blocks * func->cur_blksize);
764 sdiodev->max_segment_count = min_t(uint, host->max_segs,
765 SG_MAX_SINGLE_ALLOC);
766 sdiodev->max_segment_size = host->max_seg_size;
767
768 if (!sdiodev->sg_support)
769 return;
770
771 nents = max_t(uint, BRCMF_DEFAULT_RXGLOM_SIZE,
772 sdiodev->settings->bus.sdio.txglomsz);
773 nents += (nents >> 4) + 1;
774
775 WARN_ON(nents > sdiodev->max_segment_count);
776
777 brcmf_dbg(TRACE, "nents=%d\n", nents);
778 err = sg_alloc_table(&sdiodev->sgtable, nents, GFP_KERNEL);
779 if (err < 0) {
780 brcmf_err("allocation failed: disable scatter-gather");
781 sdiodev->sg_support = false;
782 }
783
784 sdiodev->txglomsz = sdiodev->settings->bus.sdio.txglomsz;
785 }
786
brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev * sdiodev)787 static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev)
788 {
789 if (!IS_ENABLED(CONFIG_PM_SLEEP))
790 return 0;
791
792 sdiodev->freezer = kzalloc(sizeof(*sdiodev->freezer), GFP_KERNEL);
793 if (!sdiodev->freezer)
794 return -ENOMEM;
795 atomic_set(&sdiodev->freezer->thread_count, 0);
796 atomic_set(&sdiodev->freezer->freezing, 0);
797 init_waitqueue_head(&sdiodev->freezer->thread_freeze);
798 init_completion(&sdiodev->freezer->resumed);
799 return 0;
800 }
801
brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev * sdiodev)802 static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
803 {
804 if (sdiodev->freezer) {
805 WARN_ON(atomic_read(&sdiodev->freezer->freezing));
806 kfree(sdiodev->freezer);
807 sdiodev->freezer = NULL;
808 }
809 }
810
brcmf_sdiod_freezer_on(struct brcmf_sdio_dev * sdiodev)811 static int brcmf_sdiod_freezer_on(struct brcmf_sdio_dev *sdiodev)
812 {
813 atomic_t *expect = &sdiodev->freezer->thread_count;
814 int res = 0;
815
816 sdiodev->freezer->frozen_count = 0;
817 reinit_completion(&sdiodev->freezer->resumed);
818 atomic_set(&sdiodev->freezer->freezing, 1);
819 brcmf_sdio_trigger_dpc(sdiodev->bus);
820 wait_event(sdiodev->freezer->thread_freeze,
821 atomic_read(expect) == sdiodev->freezer->frozen_count);
822 sdio_claim_host(sdiodev->func1);
823 res = brcmf_sdio_sleep(sdiodev->bus, true);
824 sdio_release_host(sdiodev->func1);
825 return res;
826 }
827
brcmf_sdiod_freezer_off(struct brcmf_sdio_dev * sdiodev)828 static void brcmf_sdiod_freezer_off(struct brcmf_sdio_dev *sdiodev)
829 {
830 sdio_claim_host(sdiodev->func1);
831 brcmf_sdio_sleep(sdiodev->bus, false);
832 sdio_release_host(sdiodev->func1);
833 atomic_set(&sdiodev->freezer->freezing, 0);
834 complete_all(&sdiodev->freezer->resumed);
835 }
836
brcmf_sdiod_freezing(struct brcmf_sdio_dev * sdiodev)837 bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev)
838 {
839 return IS_ENABLED(CONFIG_PM_SLEEP) &&
840 atomic_read(&sdiodev->freezer->freezing);
841 }
842
brcmf_sdiod_try_freeze(struct brcmf_sdio_dev * sdiodev)843 void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev)
844 {
845 if (!brcmf_sdiod_freezing(sdiodev))
846 return;
847 sdiodev->freezer->frozen_count++;
848 wake_up(&sdiodev->freezer->thread_freeze);
849 wait_for_completion(&sdiodev->freezer->resumed);
850 }
851
brcmf_sdiod_freezer_count(struct brcmf_sdio_dev * sdiodev)852 void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev)
853 {
854 if (IS_ENABLED(CONFIG_PM_SLEEP))
855 atomic_inc(&sdiodev->freezer->thread_count);
856 }
857
brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev * sdiodev)858 void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev)
859 {
860 if (IS_ENABLED(CONFIG_PM_SLEEP))
861 atomic_dec(&sdiodev->freezer->thread_count);
862 }
863
brcmf_sdiod_remove(struct brcmf_sdio_dev * sdiodev)864 int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
865 {
866 sdiodev->state = BRCMF_SDIOD_DOWN;
867 if (sdiodev->bus) {
868 brcmf_sdio_remove(sdiodev->bus);
869 sdiodev->bus = NULL;
870 }
871
872 brcmf_sdiod_freezer_detach(sdiodev);
873
874 /* Disable functions 2 then 1. */
875 sdio_claim_host(sdiodev->func1);
876 sdio_disable_func(sdiodev->func2);
877 sdio_disable_func(sdiodev->func1);
878 sdio_release_host(sdiodev->func1);
879
880 sg_free_table(&sdiodev->sgtable);
881 sdiodev->sbwad = 0;
882
883 pm_runtime_allow(sdiodev->func1->card->host->parent);
884 return 0;
885 }
886
brcmf_sdiod_host_fixup(struct mmc_host * host)887 static void brcmf_sdiod_host_fixup(struct mmc_host *host)
888 {
889 /* runtime-pm powers off the device */
890 pm_runtime_forbid(host->parent);
891 /* avoid removal detection upon resume */
892 host->caps |= MMC_CAP_NONREMOVABLE;
893 }
894
brcmf_sdiod_probe(struct brcmf_sdio_dev * sdiodev)895 int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
896 {
897 int ret = 0;
898 unsigned int f2_blksz = SDIO_FUNC2_BLOCKSIZE;
899
900 sdio_claim_host(sdiodev->func1);
901
902 ret = sdio_set_block_size(sdiodev->func1, SDIO_FUNC1_BLOCKSIZE);
903 if (ret) {
904 brcmf_err("Failed to set F1 blocksize\n");
905 sdio_release_host(sdiodev->func1);
906 return ret;
907 }
908 switch (sdiodev->func2->device) {
909 case SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373:
910 f2_blksz = SDIO_4373_FUNC2_BLOCKSIZE;
911 break;
912 case SDIO_DEVICE_ID_BROADCOM_4359:
913 case SDIO_DEVICE_ID_BROADCOM_4354:
914 case SDIO_DEVICE_ID_BROADCOM_4356:
915 f2_blksz = SDIO_435X_FUNC2_BLOCKSIZE;
916 break;
917 case SDIO_DEVICE_ID_BROADCOM_4329:
918 f2_blksz = SDIO_4329_FUNC2_BLOCKSIZE;
919 break;
920 default:
921 break;
922 }
923
924 ret = sdio_set_block_size(sdiodev->func2, f2_blksz);
925 if (ret) {
926 brcmf_err("Failed to set F2 blocksize\n");
927 sdio_release_host(sdiodev->func1);
928 return ret;
929 } else {
930 brcmf_dbg(SDIO, "set F2 blocksize to %d\n", f2_blksz);
931 }
932
933 /* increase F2 timeout */
934 sdiodev->func2->enable_timeout = SDIO_WAIT_F2RDY;
935
936 /* Enable Function 1 */
937 ret = sdio_enable_func(sdiodev->func1);
938 sdio_release_host(sdiodev->func1);
939 if (ret) {
940 brcmf_err("Failed to enable F1: err=%d\n", ret);
941 goto out;
942 }
943
944 ret = brcmf_sdiod_freezer_attach(sdiodev);
945 if (ret)
946 goto out;
947
948 /* try to attach to the target device */
949 sdiodev->bus = brcmf_sdio_probe(sdiodev);
950 if (!sdiodev->bus) {
951 ret = -ENODEV;
952 goto out;
953 }
954 brcmf_sdiod_host_fixup(sdiodev->func2->card->host);
955 out:
956 if (ret)
957 brcmf_sdiod_remove(sdiodev);
958
959 return ret;
960 }
961
962 #define BRCMF_SDIO_DEVICE(dev_id, fw_vend) \
963 { \
964 SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, dev_id), \
965 .driver_data = BRCMF_FWVENDOR_ ## fw_vend \
966 }
967
968 #define CYW_SDIO_DEVICE(dev_id, fw_vend) \
969 { \
970 SDIO_DEVICE(SDIO_VENDOR_ID_CYPRESS, dev_id), \
971 .driver_data = BRCMF_FWVENDOR_ ## fw_vend \
972 }
973
974 /* devices we support, null terminated */
975 static const struct sdio_device_id brcmf_sdmmc_ids[] = {
976 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43143, WCC),
977 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43241, WCC),
978 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4329, WCC),
979 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4330, WCC),
980 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4334, WCC),
981 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43340, WCC),
982 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341, WCC),
983 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362, WCC),
984 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43364, WCC),
985 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339, WCC),
986 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4339, WCC),
987 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430, WCC),
988 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43439, WCC),
989 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4345, WCC),
990 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43455, WCC),
991 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354, WCC),
992 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4356, WCC),
993 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4359, WCC),
994 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373, CYW),
995 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_43012, CYW),
996 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_43752, CYW),
997 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_89359, CYW),
998 CYW_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_43439, CYW),
999 { /* end: all zeroes */ }
1000 };
1001 MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
1002
1003
brcmf_sdiod_acpi_save_power_manageable(struct brcmf_sdio_dev * sdiodev)1004 static void brcmf_sdiod_acpi_save_power_manageable(struct brcmf_sdio_dev *sdiodev)
1005 {
1006 #if IS_ENABLED(CONFIG_ACPI)
1007 struct acpi_device *adev;
1008
1009 adev = ACPI_COMPANION(&sdiodev->func1->dev);
1010 if (adev)
1011 sdiodev->func1_power_manageable = adev->flags.power_manageable;
1012
1013 adev = ACPI_COMPANION(&sdiodev->func2->dev);
1014 if (adev)
1015 sdiodev->func2_power_manageable = adev->flags.power_manageable;
1016 #endif
1017 }
1018
brcmf_sdiod_acpi_set_power_manageable(struct brcmf_sdio_dev * sdiodev,int enable)1019 static void brcmf_sdiod_acpi_set_power_manageable(struct brcmf_sdio_dev *sdiodev,
1020 int enable)
1021 {
1022 #if IS_ENABLED(CONFIG_ACPI)
1023 struct acpi_device *adev;
1024
1025 adev = ACPI_COMPANION(&sdiodev->func1->dev);
1026 if (adev)
1027 adev->flags.power_manageable = enable ? sdiodev->func1_power_manageable : 0;
1028
1029 adev = ACPI_COMPANION(&sdiodev->func2->dev);
1030 if (adev)
1031 adev->flags.power_manageable = enable ? sdiodev->func2_power_manageable : 0;
1032 #endif
1033 }
1034
brcmf_ops_sdio_probe(struct sdio_func * func,const struct sdio_device_id * id)1035 static int brcmf_ops_sdio_probe(struct sdio_func *func,
1036 const struct sdio_device_id *id)
1037 {
1038 int err;
1039 struct brcmf_sdio_dev *sdiodev;
1040 struct brcmf_bus *bus_if;
1041
1042 if (!id) {
1043 dev_err(&func->dev, "Error no sdio_device_id passed for %x:%x\n", func->vendor, func->device);
1044 return -ENODEV;
1045 }
1046
1047 brcmf_dbg(SDIO, "Enter\n");
1048 brcmf_dbg(SDIO, "Class=%x\n", func->class);
1049 brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
1050 brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
1051 brcmf_dbg(SDIO, "Function#: %d\n", func->num);
1052
1053 /* Set MMC_QUIRK_LENIENT_FN0 for this card */
1054 func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
1055
1056 /* Consume func num 1 but dont do anything with it. */
1057 if (func->num == 1)
1058 return 0;
1059
1060 /* Ignore anything but func 2 */
1061 if (func->num != 2)
1062 return -ENODEV;
1063
1064 bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL);
1065 if (!bus_if)
1066 return -ENOMEM;
1067 sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
1068 if (!sdiodev) {
1069 kfree(bus_if);
1070 return -ENOMEM;
1071 }
1072
1073 /* store refs to functions used. mmc_card does
1074 * not hold the F0 function pointer.
1075 */
1076 sdiodev->func1 = func->card->sdio_func[0];
1077 sdiodev->func2 = func;
1078
1079 sdiodev->bus_if = bus_if;
1080 bus_if->bus_priv.sdio = sdiodev;
1081 bus_if->proto_type = BRCMF_PROTO_BCDC;
1082 bus_if->fwvid = id->driver_data;
1083 dev_set_drvdata(&func->dev, bus_if);
1084 dev_set_drvdata(&sdiodev->func1->dev, bus_if);
1085 sdiodev->dev = &sdiodev->func1->dev;
1086
1087 brcmf_sdiod_acpi_save_power_manageable(sdiodev);
1088 brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_DOWN);
1089
1090 brcmf_dbg(SDIO, "F2 found, calling brcmf_sdiod_probe...\n");
1091 err = brcmf_sdiod_probe(sdiodev);
1092 if (err) {
1093 brcmf_err("F2 error, probe failed %d...\n", err);
1094 goto fail;
1095 }
1096
1097 brcmf_dbg(SDIO, "F2 init completed...\n");
1098 return 0;
1099
1100 fail:
1101 dev_set_drvdata(&func->dev, NULL);
1102 dev_set_drvdata(&sdiodev->func1->dev, NULL);
1103 kfree(sdiodev);
1104 kfree(bus_if);
1105 return err;
1106 }
1107
brcmf_ops_sdio_remove(struct sdio_func * func)1108 static void brcmf_ops_sdio_remove(struct sdio_func *func)
1109 {
1110 struct brcmf_bus *bus_if;
1111 struct brcmf_sdio_dev *sdiodev;
1112
1113 brcmf_dbg(SDIO, "Enter\n");
1114 brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
1115 brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
1116 brcmf_dbg(SDIO, "Function: %d\n", func->num);
1117
1118 bus_if = dev_get_drvdata(&func->dev);
1119 if (bus_if) {
1120 sdiodev = bus_if->bus_priv.sdio;
1121
1122 /* start by unregistering irqs */
1123 brcmf_sdiod_intr_unregister(sdiodev);
1124
1125 if (func->num != 1)
1126 return;
1127
1128 /* only proceed with rest of cleanup if func 1 */
1129 brcmf_sdiod_remove(sdiodev);
1130
1131 dev_set_drvdata(&sdiodev->func1->dev, NULL);
1132 dev_set_drvdata(&sdiodev->func2->dev, NULL);
1133
1134 kfree(bus_if);
1135 kfree(sdiodev);
1136 }
1137
1138 brcmf_dbg(SDIO, "Exit\n");
1139 }
1140
brcmf_sdio_wowl_config(struct device * dev,bool enabled)1141 void brcmf_sdio_wowl_config(struct device *dev, bool enabled)
1142 {
1143 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1144 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
1145 mmc_pm_flag_t pm_caps = sdio_get_host_pm_caps(sdiodev->func1);
1146
1147 /* Power must be preserved to be able to support WOWL. */
1148 if (!(pm_caps & MMC_PM_KEEP_POWER))
1149 goto notsup;
1150
1151 if (sdiodev->settings->bus.sdio.oob_irq_supported ||
1152 pm_caps & MMC_PM_WAKE_SDIO_IRQ) {
1153 /* Stop ACPI from turning off the device when wowl is enabled */
1154 brcmf_sdiod_acpi_set_power_manageable(sdiodev, !enabled);
1155 sdiodev->wowl_enabled = enabled;
1156 brcmf_dbg(SDIO, "Configuring WOWL, enabled=%d\n", enabled);
1157 return;
1158 }
1159
1160 notsup:
1161 brcmf_dbg(SDIO, "WOWL not supported\n");
1162 }
1163
brcmf_ops_sdio_suspend(struct device * dev)1164 static int brcmf_ops_sdio_suspend(struct device *dev)
1165 {
1166 struct sdio_func *func;
1167 struct brcmf_bus *bus_if;
1168 struct brcmf_sdio_dev *sdiodev;
1169 mmc_pm_flag_t sdio_flags;
1170 int ret = 0;
1171
1172 func = container_of(dev, struct sdio_func, dev);
1173 brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
1174 if (func->num != 1)
1175 return 0;
1176
1177
1178 bus_if = dev_get_drvdata(dev);
1179 sdiodev = bus_if->bus_priv.sdio;
1180
1181 if (sdiodev->wowl_enabled) {
1182 brcmf_sdiod_freezer_on(sdiodev);
1183 brcmf_sdio_wd_timer(sdiodev->bus, 0);
1184
1185 sdio_flags = MMC_PM_KEEP_POWER;
1186 if (sdiodev->settings->bus.sdio.oob_irq_supported)
1187 enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
1188 else
1189 sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
1190
1191 if (sdio_set_host_pm_flags(sdiodev->func1, sdio_flags))
1192 brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
1193
1194 } else {
1195 /* power will be cut so remove device, probe again in resume */
1196 brcmf_sdiod_intr_unregister(sdiodev);
1197 ret = brcmf_sdiod_remove(sdiodev);
1198 if (ret)
1199 brcmf_err("Failed to remove device on suspend\n");
1200 }
1201
1202 return ret;
1203 }
1204
brcmf_ops_sdio_resume(struct device * dev)1205 static int brcmf_ops_sdio_resume(struct device *dev)
1206 {
1207 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1208 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
1209 struct sdio_func *func = container_of(dev, struct sdio_func, dev);
1210 int ret = 0;
1211
1212 brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
1213 if (func->num != 2)
1214 return 0;
1215
1216 if (!sdiodev->wowl_enabled) {
1217 /* bus was powered off and device removed, probe again */
1218 ret = brcmf_sdiod_probe(sdiodev);
1219 if (ret)
1220 brcmf_err("Failed to probe device on resume\n");
1221 } else {
1222 if (sdiodev->settings->bus.sdio.oob_irq_supported)
1223 disable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
1224
1225 brcmf_sdiod_freezer_off(sdiodev);
1226 }
1227
1228 return ret;
1229 }
1230
1231 static DEFINE_SIMPLE_DEV_PM_OPS(brcmf_sdio_pm_ops,
1232 brcmf_ops_sdio_suspend,
1233 brcmf_ops_sdio_resume);
1234
1235 static struct sdio_driver brcmf_sdmmc_driver = {
1236 .probe = brcmf_ops_sdio_probe,
1237 .remove = brcmf_ops_sdio_remove,
1238 .name = KBUILD_MODNAME,
1239 .id_table = brcmf_sdmmc_ids,
1240 .drv = {
1241 .owner = THIS_MODULE,
1242 .pm = pm_sleep_ptr(&brcmf_sdio_pm_ops),
1243 .coredump = brcmf_dev_coredump,
1244 },
1245 };
1246
brcmf_sdio_register(void)1247 int brcmf_sdio_register(void)
1248 {
1249 return sdio_register_driver(&brcmf_sdmmc_driver);
1250 }
1251
brcmf_sdio_exit(void)1252 void brcmf_sdio_exit(void)
1253 {
1254 brcmf_dbg(SDIO, "Enter\n");
1255
1256 sdio_unregister_driver(&brcmf_sdmmc_driver);
1257 }
1258
1259