1 /*
2 * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
3 *
4 * Copyright (C) 1999-2013, Broadcom Corporation
5 *
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
11 *
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
19 *
20 * Notwithstanding the above, under no circumstances may you combine this
21 * software in any way with any other Broadcom software provided under a license
22 * other than the GPL, without Broadcom's express prior written consent.
23 *
24 * $Id: bcmsdh_sdmmc.c 418714 2013-08-16 13:21:09Z $
25 */
26 #include <typedefs.h>
27
28 #include <bcmdevs.h>
29 #include <bcmendian.h>
30 #include <bcmutils.h>
31 #include <osl.h>
32 #include <sdio.h> /* SDIO Device and Protocol Specs */
33 #include <sdioh.h> /* Standard SDIO Host Controller Specification */
34 #include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
35 #include <sdiovar.h> /* ioctl/iovars */
36
37 #include <linux/mmc/core.h>
38 #include <linux/mmc/card.h>
39 #include <linux/mmc/sdio_func.h>
40 #include <linux/mmc/sdio_ids.h>
41
42 #include <dngl_stats.h>
43 #include <dhd.h>
44
45 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
46 #include <linux/suspend.h>
47 extern volatile bool dhd_mmc_suspend;
48 #endif
49 #include "bcmsdh_sdmmc.h"
50
51 #ifndef BCMSDH_MODULE
52 extern int sdio_function_init(void);
53 extern void sdio_function_cleanup(void);
54 #endif /* BCMSDH_MODULE */
55
56 #if !defined(OOB_INTR_ONLY)
57 static void IRQHandler(struct sdio_func *func);
58 static void IRQHandlerF2(struct sdio_func *func);
59 #endif /* !defined(OOB_INTR_ONLY) */
60 static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr);
61 extern int sdio_reset_comm(struct mmc_card *card);
62
63 extern PBCMSDH_SDMMC_INSTANCE gInstance;
64
65 #define DEFAULT_SDIO_F2_BLKSIZE 512
66 #ifndef CUSTOM_SDIO_F2_BLKSIZE
67 #define CUSTOM_SDIO_F2_BLKSIZE DEFAULT_SDIO_F2_BLKSIZE
68 #endif
69
70 uint sd_sdmode = SDIOH_MODE_SD4; /* Use SD4 mode by default */
71 uint sd_f2_blocksize = CUSTOM_SDIO_F2_BLKSIZE;
72 uint sd_divisor = 2; /* Default 48MHz/2 = 24MHz */
73
74 uint sd_power = 1; /* Default to SD Slot powered ON */
75 uint sd_clock = 1; /* Default to SD Clock turned ON */
76 uint sd_hiok = FALSE; /* Don't use hi-speed mode by default */
77 uint sd_msglevel = 0x01;
78 uint sd_use_dma = TRUE;
79
80 #ifdef BCMSDIOH_TXGLOM
81 #ifndef CUSTOM_TXGLOM
82 #define CUSTOM_TXGLOM 0
83 #endif
84 uint sd_txglom = CUSTOM_TXGLOM;
85 #endif /* BCMSDIOH_TXGLOM */
86
87 #ifndef CUSTOM_RXCHAIN
88 #define CUSTOM_RXCHAIN 0
89 #endif
90
91 DHD_PM_RESUME_WAIT_INIT(sdioh_request_byte_wait);
92 DHD_PM_RESUME_WAIT_INIT(sdioh_request_word_wait);
93 DHD_PM_RESUME_WAIT_INIT(sdioh_request_packet_wait);
94 DHD_PM_RESUME_WAIT_INIT(sdioh_request_buffer_wait);
95
96 #define DMA_ALIGN_MASK 0x03
97 #define MMC_SDIO_ABORT_RETRY_LIMIT 5
98
99 int sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data);
100
101 static int
sdioh_sdmmc_card_enablefuncs(sdioh_info_t * sd)102 sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd)
103 {
104 int err_ret;
105 uint32 fbraddr;
106 uint8 func;
107
108 sd_trace(("%s\n", __FUNCTION__));
109
110 /* Get the Card's common CIS address */
111 sd->com_cis_ptr = sdioh_sdmmc_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0);
112 sd->func_cis_ptr[0] = sd->com_cis_ptr;
113 sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
114
115 /* Get the Card's function CIS (for each function) */
116 for (fbraddr = SDIOD_FBR_STARTADDR, func = 1;
117 func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
118 sd->func_cis_ptr[func] = sdioh_sdmmc_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr);
119 sd_info(("%s: Function %d CIS Ptr = 0x%x\n",
120 __FUNCTION__, func, sd->func_cis_ptr[func]));
121 }
122
123 sd->func_cis_ptr[0] = sd->com_cis_ptr;
124 sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
125
126 /* Enable Function 1 */
127 sdio_claim_host(gInstance->func[1]);
128 err_ret = sdio_enable_func(gInstance->func[1]);
129 sdio_release_host(gInstance->func[1]);
130 if (err_ret) {
131 sd_err(("bcmsdh_sdmmc: Failed to enable F1 Err: 0x%08x", err_ret));
132 }
133
134 return FALSE;
135 }
136
137 /*
138 * Public entry points & extern's
139 */
140 extern sdioh_info_t *
sdioh_attach(osl_t * osh,void * bar0,uint irq)141 sdioh_attach(osl_t *osh, void *bar0, uint irq)
142 {
143 sdioh_info_t *sd;
144 int err_ret;
145
146 sd_trace(("%s\n", __FUNCTION__));
147
148 if (gInstance == NULL) {
149 sd_err(("%s: SDIO Device not present\n", __FUNCTION__));
150 return NULL;
151 }
152
153 if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
154 sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
155 return NULL;
156 }
157 bzero((char *)sd, sizeof(sdioh_info_t));
158 sd->osh = osh;
159 if (sdioh_sdmmc_osinit(sd) != 0) {
160 sd_err(("%s:sdioh_sdmmc_osinit() failed\n", __FUNCTION__));
161 MFREE(sd->osh, sd, sizeof(sdioh_info_t));
162 return NULL;
163 }
164
165 sd->num_funcs = 2;
166 sd->sd_blockmode = TRUE;
167 sd->use_client_ints = TRUE;
168 sd->client_block_size[0] = 64;
169 sd->use_rxchain = CUSTOM_RXCHAIN;
170
171 gInstance->sd = sd;
172
173 /* Claim host controller */
174 if (gInstance->func[1]) {
175 sdio_claim_host(gInstance->func[1]);
176
177 sd->client_block_size[1] = 64;
178 err_ret = sdio_set_block_size(gInstance->func[1], 64);
179 /* Release host controller F1 */
180 sdio_release_host(gInstance->func[1]);
181 if (err_ret) {
182 sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize\n"));
183 MFREE(sd->osh, sd, sizeof(sdioh_info_t));
184 return NULL;
185 }
186
187 } else {
188 sd_err(("%s:gInstance->func[1] is null\n", __FUNCTION__));
189 MFREE(sd->osh, sd, sizeof(sdioh_info_t));
190 return NULL;
191 }
192
193 if (gInstance->func[2]) {
194 /* Claim host controller F2 */
195 sdio_claim_host(gInstance->func[2]);
196
197 sd->client_block_size[2] = sd_f2_blocksize;
198 err_ret = sdio_set_block_size(gInstance->func[2], sd_f2_blocksize);
199 /* Release host controller F2 */
200 sdio_release_host(gInstance->func[2]);
201 if (err_ret) {
202 sd_err(("bcmsdh_sdmmc: Failed to set F2 blocksize to %d\n",
203 sd_f2_blocksize));
204 MFREE(sd->osh, sd, sizeof(sdioh_info_t));
205 return NULL;
206 }
207
208 } else {
209 sd_err(("%s:gInstance->func[2] is null\n", __FUNCTION__));
210 MFREE(sd->osh, sd, sizeof(sdioh_info_t));
211 return NULL;
212 }
213
214 sdioh_sdmmc_card_enablefuncs(sd);
215
216 sd_trace(("%s: Done\n", __FUNCTION__));
217 return sd;
218 }
219
220
221 extern SDIOH_API_RC
sdioh_detach(osl_t * osh,sdioh_info_t * sd)222 sdioh_detach(osl_t *osh, sdioh_info_t *sd)
223 {
224 sd_trace(("%s\n", __FUNCTION__));
225
226 if (sd) {
227
228 /* Disable Function 2 */
229 sdio_claim_host(gInstance->func[2]);
230 sdio_disable_func(gInstance->func[2]);
231 sdio_release_host(gInstance->func[2]);
232
233 /* Disable Function 1 */
234 if (gInstance->func[1]) {
235 sdio_claim_host(gInstance->func[1]);
236 sdio_disable_func(gInstance->func[1]);
237 sdio_release_host(gInstance->func[1]);
238 }
239
240 gInstance->func[1] = NULL;
241 gInstance->func[2] = NULL;
242
243 /* deregister irq */
244 sdioh_sdmmc_osfree(sd);
245
246 MFREE(sd->osh, sd, sizeof(sdioh_info_t));
247 }
248 return SDIOH_API_RC_SUCCESS;
249 }
250
251 #if defined(OOB_INTR_ONLY) && defined(HW_OOB)
252
253 extern SDIOH_API_RC
sdioh_enable_func_intr(void)254 sdioh_enable_func_intr(void)
255 {
256 uint8 reg;
257 int err;
258
259 if (gInstance->func[0]) {
260 sdio_claim_host(gInstance->func[0]);
261
262 reg = sdio_readb(gInstance->func[0], SDIOD_CCCR_INTEN, &err);
263 if (err) {
264 sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
265 sdio_release_host(gInstance->func[0]);
266 return SDIOH_API_RC_FAIL;
267 }
268
269 /* Enable F1 and F2 interrupts, clear master enable */
270 reg &= ~INTR_CTL_MASTER_EN;
271 reg |= (INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
272 sdio_writeb(gInstance->func[0], reg, SDIOD_CCCR_INTEN, &err);
273 sdio_release_host(gInstance->func[0]);
274
275 if (err) {
276 sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
277 return SDIOH_API_RC_FAIL;
278 }
279 }
280
281 return SDIOH_API_RC_SUCCESS;
282 }
283
284 extern SDIOH_API_RC
sdioh_disable_func_intr(void)285 sdioh_disable_func_intr(void)
286 {
287 uint8 reg;
288 int err;
289
290 if (gInstance->func[0]) {
291 sdio_claim_host(gInstance->func[0]);
292 reg = sdio_readb(gInstance->func[0], SDIOD_CCCR_INTEN, &err);
293 if (err) {
294 sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
295 sdio_release_host(gInstance->func[0]);
296 return SDIOH_API_RC_FAIL;
297 }
298
299 reg &= ~(INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
300 /* Disable master interrupt with the last function interrupt */
301 if (!(reg & 0xFE))
302 reg = 0;
303 sdio_writeb(gInstance->func[0], reg, SDIOD_CCCR_INTEN, &err);
304
305 sdio_release_host(gInstance->func[0]);
306 if (err) {
307 sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
308 return SDIOH_API_RC_FAIL;
309 }
310 }
311 return SDIOH_API_RC_SUCCESS;
312 }
313 #endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
314
315 /* Configure callback to client when we recieve client interrupt */
316 extern SDIOH_API_RC
sdioh_interrupt_register(sdioh_info_t * sd,sdioh_cb_fn_t fn,void * argh)317 sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
318 {
319 sd_trace(("%s: Entering\n", __FUNCTION__));
320 if (fn == NULL) {
321 sd_err(("%s: interrupt handler is NULL, not registering\n", __FUNCTION__));
322 return SDIOH_API_RC_FAIL;
323 }
324 #if !defined(OOB_INTR_ONLY)
325 sd->intr_handler = fn;
326 sd->intr_handler_arg = argh;
327 sd->intr_handler_valid = TRUE;
328
329 /* register and unmask irq */
330 if (gInstance->func[2]) {
331 sdio_claim_host(gInstance->func[2]);
332 sdio_claim_irq(gInstance->func[2], IRQHandlerF2);
333 sdio_release_host(gInstance->func[2]);
334 }
335
336 if (gInstance->func[1]) {
337 sdio_claim_host(gInstance->func[1]);
338 sdio_claim_irq(gInstance->func[1], IRQHandler);
339 sdio_release_host(gInstance->func[1]);
340 }
341 #elif defined(HW_OOB)
342 sdioh_enable_func_intr();
343 #endif /* !defined(OOB_INTR_ONLY) */
344
345 return SDIOH_API_RC_SUCCESS;
346 }
347
348 extern SDIOH_API_RC
sdioh_interrupt_deregister(sdioh_info_t * sd)349 sdioh_interrupt_deregister(sdioh_info_t *sd)
350 {
351 sd_trace(("%s: Entering\n", __FUNCTION__));
352
353 #if !defined(OOB_INTR_ONLY)
354 if (gInstance->func[1]) {
355 /* register and unmask irq */
356 sdio_claim_host(gInstance->func[1]);
357 sdio_release_irq(gInstance->func[1]);
358 sdio_release_host(gInstance->func[1]);
359 }
360
361 if (gInstance->func[2]) {
362 /* Claim host controller F2 */
363 sdio_claim_host(gInstance->func[2]);
364 sdio_release_irq(gInstance->func[2]);
365 /* Release host controller F2 */
366 sdio_release_host(gInstance->func[2]);
367 }
368
369 sd->intr_handler_valid = FALSE;
370 sd->intr_handler = NULL;
371 sd->intr_handler_arg = NULL;
372 #elif defined(HW_OOB)
373 sdioh_disable_func_intr();
374 #endif /* !defined(OOB_INTR_ONLY) */
375 return SDIOH_API_RC_SUCCESS;
376 }
377
378 extern SDIOH_API_RC
sdioh_interrupt_query(sdioh_info_t * sd,bool * onoff)379 sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
380 {
381 sd_trace(("%s: Entering\n", __FUNCTION__));
382 *onoff = sd->client_intr_enabled;
383 return SDIOH_API_RC_SUCCESS;
384 }
385
386 #if defined(DHD_DEBUG)
387 extern bool
sdioh_interrupt_pending(sdioh_info_t * sd)388 sdioh_interrupt_pending(sdioh_info_t *sd)
389 {
390 return (0);
391 }
392 #endif
393
394 uint
sdioh_query_iofnum(sdioh_info_t * sd)395 sdioh_query_iofnum(sdioh_info_t *sd)
396 {
397 return sd->num_funcs;
398 }
399
400 /* IOVar table */
401 enum {
402 IOV_MSGLEVEL = 1,
403 IOV_BLOCKMODE,
404 IOV_BLOCKSIZE,
405 IOV_DMA,
406 IOV_USEINTS,
407 IOV_NUMINTS,
408 IOV_NUMLOCALINTS,
409 IOV_HOSTREG,
410 IOV_DEVREG,
411 IOV_DIVISOR,
412 IOV_SDMODE,
413 IOV_HISPEED,
414 IOV_HCIREGS,
415 IOV_POWER,
416 IOV_CLOCK,
417 IOV_RXCHAIN
418 };
419
420 const bcm_iovar_t sdioh_iovars[] = {
421 {"sd_msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 },
422 {"sd_blockmode", IOV_BLOCKMODE, 0, IOVT_BOOL, 0 },
423 {"sd_blocksize", IOV_BLOCKSIZE, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */
424 {"sd_dma", IOV_DMA, 0, IOVT_BOOL, 0 },
425 {"sd_ints", IOV_USEINTS, 0, IOVT_BOOL, 0 },
426 {"sd_numints", IOV_NUMINTS, 0, IOVT_UINT32, 0 },
427 {"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32, 0 },
428 {"sd_hostreg", IOV_HOSTREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
429 {"sd_devreg", IOV_DEVREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
430 {"sd_divisor", IOV_DIVISOR, 0, IOVT_UINT32, 0 },
431 {"sd_power", IOV_POWER, 0, IOVT_UINT32, 0 },
432 {"sd_clock", IOV_CLOCK, 0, IOVT_UINT32, 0 },
433 {"sd_mode", IOV_SDMODE, 0, IOVT_UINT32, 100},
434 {"sd_highspeed", IOV_HISPEED, 0, IOVT_UINT32, 0 },
435 {"sd_rxchain", IOV_RXCHAIN, 0, IOVT_BOOL, 0 },
436 {NULL, 0, 0, 0, 0 }
437 };
438
439 int
sdioh_iovar_op(sdioh_info_t * si,const char * name,void * params,int plen,void * arg,int len,bool set)440 sdioh_iovar_op(sdioh_info_t *si, const char *name,
441 void *params, int plen, void *arg, int len, bool set)
442 {
443 const bcm_iovar_t *vi = NULL;
444 int bcmerror = 0;
445 int val_size;
446 int32 int_val = 0;
447 bool bool_val;
448 uint32 actionid;
449
450 ASSERT(name);
451 ASSERT(len >= 0);
452
453 /* Get must have return space; Set does not take qualifiers */
454 ASSERT(set || (arg && len));
455 ASSERT(!set || (!params && !plen));
456
457 sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
458
459 if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
460 bcmerror = BCME_UNSUPPORTED;
461 goto exit;
462 }
463
464 if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
465 goto exit;
466
467 /* Set up params so get and set can share the convenience variables */
468 if (params == NULL) {
469 params = arg;
470 plen = len;
471 }
472
473 if (vi->type == IOVT_VOID)
474 val_size = 0;
475 else if (vi->type == IOVT_BUFFER)
476 val_size = len;
477 else
478 val_size = sizeof(int);
479
480 if (plen >= (int)sizeof(int_val))
481 bcopy(params, &int_val, sizeof(int_val));
482
483 bool_val = (int_val != 0) ? TRUE : FALSE;
484 BCM_REFERENCE(bool_val);
485
486 actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
487 switch (actionid) {
488 case IOV_GVAL(IOV_MSGLEVEL):
489 int_val = (int32)sd_msglevel;
490 bcopy(&int_val, arg, val_size);
491 break;
492
493 case IOV_SVAL(IOV_MSGLEVEL):
494 sd_msglevel = int_val;
495 break;
496
497 case IOV_GVAL(IOV_BLOCKMODE):
498 int_val = (int32)si->sd_blockmode;
499 bcopy(&int_val, arg, val_size);
500 break;
501
502 case IOV_SVAL(IOV_BLOCKMODE):
503 si->sd_blockmode = (bool)int_val;
504 /* Haven't figured out how to make non-block mode with DMA */
505 break;
506
507 case IOV_GVAL(IOV_BLOCKSIZE):
508 if ((uint32)int_val > si->num_funcs) {
509 bcmerror = BCME_BADARG;
510 break;
511 }
512 int_val = (int32)si->client_block_size[int_val];
513 bcopy(&int_val, arg, val_size);
514 break;
515
516 case IOV_SVAL(IOV_BLOCKSIZE):
517 {
518 uint func = ((uint32)int_val >> 16);
519 uint blksize = (uint16)int_val;
520 uint maxsize;
521
522 if (func > si->num_funcs) {
523 bcmerror = BCME_BADARG;
524 break;
525 }
526
527 switch (func) {
528 case 0: maxsize = 32; break;
529 case 1: maxsize = BLOCK_SIZE_4318; break;
530 case 2: maxsize = BLOCK_SIZE_4328; break;
531 default: maxsize = 0;
532 }
533 if (blksize > maxsize) {
534 bcmerror = BCME_BADARG;
535 break;
536 }
537 if (!blksize) {
538 blksize = maxsize;
539 }
540
541 /* Now set it */
542 si->client_block_size[func] = blksize;
543
544 break;
545 }
546
547 case IOV_GVAL(IOV_RXCHAIN):
548 int_val = (int32)si->use_rxchain;
549 bcopy(&int_val, arg, val_size);
550 break;
551
552 case IOV_GVAL(IOV_DMA):
553 int_val = (int32)si->sd_use_dma;
554 bcopy(&int_val, arg, val_size);
555 break;
556
557 case IOV_SVAL(IOV_DMA):
558 si->sd_use_dma = (bool)int_val;
559 break;
560
561 case IOV_GVAL(IOV_USEINTS):
562 int_val = (int32)si->use_client_ints;
563 bcopy(&int_val, arg, val_size);
564 break;
565
566 case IOV_SVAL(IOV_USEINTS):
567 si->use_client_ints = (bool)int_val;
568 if (si->use_client_ints)
569 si->intmask |= CLIENT_INTR;
570 else
571 si->intmask &= ~CLIENT_INTR;
572
573 break;
574
575 case IOV_GVAL(IOV_DIVISOR):
576 int_val = (uint32)sd_divisor;
577 bcopy(&int_val, arg, val_size);
578 break;
579
580 case IOV_SVAL(IOV_DIVISOR):
581 sd_divisor = int_val;
582 break;
583
584 case IOV_GVAL(IOV_POWER):
585 int_val = (uint32)sd_power;
586 bcopy(&int_val, arg, val_size);
587 break;
588
589 case IOV_SVAL(IOV_POWER):
590 sd_power = int_val;
591 break;
592
593 case IOV_GVAL(IOV_CLOCK):
594 int_val = (uint32)sd_clock;
595 bcopy(&int_val, arg, val_size);
596 break;
597
598 case IOV_SVAL(IOV_CLOCK):
599 sd_clock = int_val;
600 break;
601
602 case IOV_GVAL(IOV_SDMODE):
603 int_val = (uint32)sd_sdmode;
604 bcopy(&int_val, arg, val_size);
605 break;
606
607 case IOV_SVAL(IOV_SDMODE):
608 sd_sdmode = int_val;
609 break;
610
611 case IOV_GVAL(IOV_HISPEED):
612 int_val = (uint32)sd_hiok;
613 bcopy(&int_val, arg, val_size);
614 break;
615
616 case IOV_SVAL(IOV_HISPEED):
617 sd_hiok = int_val;
618 break;
619
620 case IOV_GVAL(IOV_NUMINTS):
621 int_val = (int32)si->intrcount;
622 bcopy(&int_val, arg, val_size);
623 break;
624
625 case IOV_GVAL(IOV_NUMLOCALINTS):
626 int_val = (int32)0;
627 bcopy(&int_val, arg, val_size);
628 break;
629
630 case IOV_GVAL(IOV_HOSTREG):
631 {
632 sdreg_t *sd_ptr = (sdreg_t *)params;
633
634 if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
635 sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
636 bcmerror = BCME_BADARG;
637 break;
638 }
639
640 sd_trace(("%s: rreg%d at offset %d\n", __FUNCTION__,
641 (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
642 sd_ptr->offset));
643 if (sd_ptr->offset & 1)
644 int_val = 8; /* sdioh_sdmmc_rreg8(si, sd_ptr->offset); */
645 else if (sd_ptr->offset & 2)
646 int_val = 16; /* sdioh_sdmmc_rreg16(si, sd_ptr->offset); */
647 else
648 int_val = 32; /* sdioh_sdmmc_rreg(si, sd_ptr->offset); */
649
650 bcopy(&int_val, arg, sizeof(int_val));
651 break;
652 }
653
654 case IOV_SVAL(IOV_HOSTREG):
655 {
656 sdreg_t *sd_ptr = (sdreg_t *)params;
657
658 if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
659 sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
660 bcmerror = BCME_BADARG;
661 break;
662 }
663
664 sd_trace(("%s: wreg%d value 0x%08x at offset %d\n", __FUNCTION__, sd_ptr->value,
665 (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
666 sd_ptr->offset));
667 break;
668 }
669
670 case IOV_GVAL(IOV_DEVREG):
671 {
672 sdreg_t *sd_ptr = (sdreg_t *)params;
673 uint8 data = 0;
674
675 if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) {
676 bcmerror = BCME_SDIO_ERROR;
677 break;
678 }
679
680 int_val = (int)data;
681 bcopy(&int_val, arg, sizeof(int_val));
682 break;
683 }
684
685 case IOV_SVAL(IOV_DEVREG):
686 {
687 sdreg_t *sd_ptr = (sdreg_t *)params;
688 uint8 data = (uint8)sd_ptr->value;
689
690 if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) {
691 bcmerror = BCME_SDIO_ERROR;
692 break;
693 }
694 break;
695 }
696
697 default:
698 bcmerror = BCME_UNSUPPORTED;
699 break;
700 }
701 exit:
702
703 return bcmerror;
704 }
705
706 #if defined(OOB_INTR_ONLY) && defined(HW_OOB)
707
708 SDIOH_API_RC
sdioh_enable_hw_oob_intr(sdioh_info_t * sd,bool enable)709 sdioh_enable_hw_oob_intr(sdioh_info_t *sd, bool enable)
710 {
711 SDIOH_API_RC status;
712 uint8 data;
713
714 if (enable)
715 data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE | SDIO_SEPINT_ACT_HI;
716 else
717 data = SDIO_SEPINT_ACT_HI; /* disable hw oob interrupt */
718
719 status = sdioh_request_byte(sd, SDIOH_WRITE, 0, SDIOD_CCCR_BRCM_SEPINT, &data);
720 return status;
721 }
722 #endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
723
724 extern SDIOH_API_RC
sdioh_cfg_read(sdioh_info_t * sd,uint fnc_num,uint32 addr,uint8 * data)725 sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
726 {
727 SDIOH_API_RC status;
728 /* No lock needed since sdioh_request_byte does locking */
729 status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
730 return status;
731 }
732
733 extern SDIOH_API_RC
sdioh_cfg_write(sdioh_info_t * sd,uint fnc_num,uint32 addr,uint8 * data)734 sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
735 {
736 /* No lock needed since sdioh_request_byte does locking */
737 SDIOH_API_RC status;
738 status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
739 return status;
740 }
741
742 static int
sdioh_sdmmc_get_cisaddr(sdioh_info_t * sd,uint32 regaddr)743 sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr)
744 {
745 /* read 24 bits and return valid 17 bit addr */
746 int i;
747 uint32 scratch, regdata;
748 uint8 *ptr = (uint8 *)&scratch;
749 for (i = 0; i < 3; i++) {
750 if ((sdioh_sdmmc_card_regread (sd, 0, regaddr, 1, ®data)) != SUCCESS)
751 sd_err(("%s: Can't read!\n", __FUNCTION__));
752
753 *ptr++ = (uint8) regdata;
754 regaddr++;
755 }
756
757 /* Only the lower 17-bits are valid */
758 scratch = ltoh32(scratch);
759 scratch &= 0x0001FFFF;
760 return (scratch);
761 }
762
763 extern SDIOH_API_RC
sdioh_cis_read(sdioh_info_t * sd,uint func,uint8 * cisd,uint32 length)764 sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
765 {
766 uint32 count;
767 int offset;
768 uint32 foo;
769 uint8 *cis = cisd;
770
771 sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
772
773 if (!sd->func_cis_ptr[func]) {
774 bzero(cis, length);
775 sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func));
776 return SDIOH_API_RC_FAIL;
777 }
778
779 sd_err(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func]));
780
781 for (count = 0; count < length; count++) {
782 offset = sd->func_cis_ptr[func] + count;
783 if (sdioh_sdmmc_card_regread (sd, 0, offset, 1, &foo) < 0) {
784 sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
785 return SDIOH_API_RC_FAIL;
786 }
787
788 *cis = (uint8)(foo & 0xff);
789 cis++;
790 }
791
792 return SDIOH_API_RC_SUCCESS;
793 }
794
795 extern SDIOH_API_RC
sdioh_request_byte(sdioh_info_t * sd,uint rw,uint func,uint regaddr,uint8 * byte)796 sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
797 {
798 int err_ret = 0;
799 #if defined(MMC_SDIO_ABORT)
800 int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT;
801 #endif
802
803 sd_info(("%s: rw=%d, func=%d, addr=0x%05x\n", __FUNCTION__, rw, func, regaddr));
804
805 DHD_PM_RESUME_WAIT(sdioh_request_byte_wait);
806 DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
807 if(rw) { /* CMD52 Write */
808 if (func == 0) {
809 /* Can only directly write to some F0 registers. Handle F2 enable
810 * as a special case.
811 */
812 if (regaddr == SDIOD_CCCR_IOEN) {
813 if (gInstance->func[2]) {
814 sdio_claim_host(gInstance->func[2]);
815 if (*byte & SDIO_FUNC_ENABLE_2) {
816 /* Enable Function 2 */
817 err_ret = sdio_enable_func(gInstance->func[2]);
818 if (err_ret) {
819 sd_err(("bcmsdh_sdmmc: enable F2 failed:%d",
820 err_ret));
821 }
822 } else {
823 /* Disable Function 2 */
824 err_ret = sdio_disable_func(gInstance->func[2]);
825 if (err_ret) {
826 sd_err(("bcmsdh_sdmmc: Disab F2 failed:%d",
827 err_ret));
828 }
829 }
830 sdio_release_host(gInstance->func[2]);
831 }
832 }
833 #if defined(MMC_SDIO_ABORT)
834 /* to allow abort command through F1 */
835 else if (regaddr == SDIOD_CCCR_IOABORT) {
836 while (sdio_abort_retry--) {
837 if (gInstance->func[func]) {
838 sdio_claim_host(gInstance->func[func]);
839 /*
840 * this sdio_f0_writeb() can be replaced with
841 * another api depending upon MMC driver change.
842 * As of this time, this is temporaray one
843 */
844 sdio_writeb(gInstance->func[func],
845 *byte, regaddr, &err_ret);
846 sdio_release_host(gInstance->func[func]);
847 }
848 if (!err_ret)
849 break;
850 }
851 }
852 #endif /* MMC_SDIO_ABORT */
853 else if (regaddr < 0xF0) {
854 sd_err(("bcmsdh_sdmmc: F0 Wr:0x%02x: write disallowed\n", regaddr));
855 } else {
856 /* Claim host controller, perform F0 write, and release */
857 if (gInstance->func[func]) {
858 sdio_claim_host(gInstance->func[func]);
859 sdio_f0_writeb(gInstance->func[func],
860 *byte, regaddr, &err_ret);
861 sdio_release_host(gInstance->func[func]);
862 }
863 }
864 } else {
865 /* Claim host controller, perform Fn write, and release */
866 if (gInstance->func[func]) {
867 sdio_claim_host(gInstance->func[func]);
868 sdio_writeb(gInstance->func[func], *byte, regaddr, &err_ret);
869 sdio_release_host(gInstance->func[func]);
870 }
871 }
872 } else { /* CMD52 Read */
873 /* Claim host controller, perform Fn read, and release */
874 if (gInstance->func[func]) {
875 sdio_claim_host(gInstance->func[func]);
876 if (func == 0) {
877 *byte = sdio_f0_readb(gInstance->func[func], regaddr, &err_ret);
878 } else {
879 *byte = sdio_readb(gInstance->func[func], regaddr, &err_ret);
880 }
881 sdio_release_host(gInstance->func[func]);
882 }
883 }
884
885 if (err_ret) {
886 if ((regaddr == 0x1001F) && (err_ret == -110)) {
887 } else {
888 sd_err(("bcmsdh_sdmmc: Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n",
889 rw ? "Write" : "Read", func, regaddr, *byte, err_ret));
890 }
891 }
892
893 return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
894 }
895
896 extern SDIOH_API_RC
sdioh_request_word(sdioh_info_t * sd,uint cmd_type,uint rw,uint func,uint addr,uint32 * word,uint nbytes)897 sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
898 uint32 *word, uint nbytes)
899 {
900 int err_ret = SDIOH_API_RC_FAIL;
901 #if defined(MMC_SDIO_ABORT)
902 int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT;
903 #endif
904
905 if (func == 0) {
906 sd_err(("%s: Only CMD52 allowed to F0.\n", __FUNCTION__));
907 return SDIOH_API_RC_FAIL;
908 }
909
910 sd_info(("%s: cmd_type=%d, rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
911 __FUNCTION__, cmd_type, rw, func, addr, nbytes));
912
913 DHD_PM_RESUME_WAIT(sdioh_request_word_wait);
914 DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
915 /* Claim host controller */
916 sdio_claim_host(gInstance->func[func]);
917
918 if(rw) { /* CMD52 Write */
919 if (nbytes == 4) {
920 sdio_writel(gInstance->func[func], *word, addr, &err_ret);
921 } else if (nbytes == 2) {
922 sdio_writew(gInstance->func[func], (*word & 0xFFFF), addr, &err_ret);
923 } else {
924 sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
925 }
926 } else { /* CMD52 Read */
927 if (nbytes == 4) {
928 *word = sdio_readl(gInstance->func[func], addr, &err_ret);
929 } else if (nbytes == 2) {
930 *word = sdio_readw(gInstance->func[func], addr, &err_ret) & 0xFFFF;
931 } else {
932 sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
933 }
934 }
935
936 /* Release host controller */
937 sdio_release_host(gInstance->func[func]);
938
939 if (err_ret) {
940 #if defined(MMC_SDIO_ABORT)
941 /* Any error on CMD53 transaction should abort that function using function 0. */
942 while (sdio_abort_retry--) {
943 if (gInstance->func[0]) {
944 sdio_claim_host(gInstance->func[0]);
945 /*
946 * this sdio_f0_writeb() can be replaced with another api
947 * depending upon MMC driver change.
948 * As of this time, this is temporaray one
949 */
950 sdio_writeb(gInstance->func[0],
951 func, SDIOD_CCCR_IOABORT, &err_ret);
952 sdio_release_host(gInstance->func[0]);
953 }
954 if (!err_ret)
955 break;
956 }
957 if (err_ret)
958 #endif /* MMC_SDIO_ABORT */
959 {
960 sd_err(("bcmsdh_sdmmc: Failed to %s word, Err: 0x%08x",
961 rw ? "Write" : "Read", err_ret));
962 }
963 }
964
965 return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
966 }
967
968 #ifdef BCMSDIOH_TXGLOM
969 void
sdioh_glom_post(sdioh_info_t * sd,uint8 * frame,void * pkt,uint len)970 sdioh_glom_post(sdioh_info_t *sd, uint8 *frame, void *pkt, uint len)
971 {
972 void *phead = sd->glom_info.glom_pkt_head;
973 void *ptail = sd->glom_info.glom_pkt_tail;
974
975 BCM_REFERENCE(frame);
976
977 ASSERT(!PKTLINK(pkt));
978 if (!phead) {
979 ASSERT(!phead);
980 sd->glom_info.glom_pkt_head = sd->glom_info.glom_pkt_tail = pkt;
981 }
982 else {
983 ASSERT(ptail);
984 PKTSETNEXT(sd->osh, ptail, pkt);
985 sd->glom_info.glom_pkt_tail = pkt;
986 }
987 sd->glom_info.count++;
988 }
989
990 void
sdioh_glom_clear(sdioh_info_t * sd)991 sdioh_glom_clear(sdioh_info_t *sd)
992 {
993 void *pnow, *pnext;
994
995 pnext = sd->glom_info.glom_pkt_head;
996
997 if (!pnext) {
998 sd_err(("sdioh_glom_clear: no first packet to clear!\n"));
999 return;
1000 }
1001
1002 while (pnext) {
1003 pnow = pnext;
1004 pnext = PKTNEXT(sd->osh, pnow);
1005 PKTSETNEXT(sd->osh, pnow, NULL);
1006 sd->glom_info.count--;
1007 }
1008
1009 sd->glom_info.glom_pkt_head = NULL;
1010 sd->glom_info.glom_pkt_tail = NULL;
1011 if (sd->glom_info.count != 0) {
1012 sd_err(("sdioh_glom_clear: glom count mismatch!\n"));
1013 sd->glom_info.count = 0;
1014 }
1015 }
1016
1017 uint
sdioh_set_mode(sdioh_info_t * sd,uint mode)1018 sdioh_set_mode(sdioh_info_t *sd, uint mode)
1019 {
1020 if (mode == SDPCM_TXGLOM_CPY)
1021 sd->txglom_mode = mode;
1022 else if (mode == SDPCM_TXGLOM_MDESC)
1023 sd->txglom_mode = mode;
1024
1025 return (sd->txglom_mode);
1026 }
1027
1028 bool
sdioh_glom_enabled(void)1029 sdioh_glom_enabled(void)
1030 {
1031 return sd_txglom;
1032 }
1033 #endif /* BCMSDIOH_TXGLOM */
1034
sdioh_request_packet_align(uint pkt_len,uint write,uint func,int blk_size)1035 static INLINE int sdioh_request_packet_align(uint pkt_len, uint write, uint func, int blk_size)
1036 {
1037 /* Align Patch */
1038 if (!write || pkt_len < 32)
1039 pkt_len = (pkt_len + 3) & 0xFFFFFFFC;
1040 else if ((pkt_len > blk_size) && (pkt_len % blk_size)) {
1041 if (func == SDIO_FUNC_2) {
1042 sd_err(("%s: [%s] dhd_sdio must align %d bytes"
1043 " packet larger than a %d bytes blk size by a blk size\n",
1044 __FUNCTION__, write ? "W" : "R", pkt_len, blk_size));
1045 }
1046 pkt_len += blk_size - (pkt_len % blk_size);
1047 }
1048 #ifdef CONFIG_MMC_MSM7X00A
1049 if ((pkt_len % 64) == 32) {
1050 sd_err(("%s: Rounding up TX packet +=32\n", __FUNCTION__));
1051 pkt_len += 32;
1052 }
1053 #endif /* CONFIG_MMC_MSM7X00A */
1054 return pkt_len;
1055 }
1056
1057 static SDIOH_API_RC
sdioh_request_packet(sdioh_info_t * sd,uint fix_inc,uint write,uint func,uint addr,void * pkt)1058 sdioh_request_packet(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
1059 uint addr, void *pkt)
1060 {
1061 bool fifo = (fix_inc == SDIOH_DATA_FIX);
1062 uint32 SGCount = 0;
1063 int err_ret = 0;
1064 void *pnext;
1065 uint ttl_len, dma_len, lft_len, xfred_len, pkt_len;
1066 uint blk_num;
1067 int blk_size;
1068 struct mmc_request mmc_req;
1069 struct mmc_command mmc_cmd;
1070 struct mmc_data mmc_dat;
1071 #ifdef BCMSDIOH_TXGLOM
1072 uint8 *localbuf = NULL;
1073 uint local_plen = 0;
1074 bool need_txglom = write && sdioh_glom_enabled() &&
1075 (pkt == sd->glom_info.glom_pkt_tail) &&
1076 (sd->glom_info.glom_pkt_head != sd->glom_info.glom_pkt_tail);
1077 #endif /* BCMSDIOH_TXGLOM */
1078
1079 sd_trace(("%s: Enter\n", __FUNCTION__));
1080
1081 ASSERT(pkt);
1082 DHD_PM_RESUME_WAIT(sdioh_request_packet_wait);
1083 DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
1084
1085 ttl_len = xfred_len = 0;
1086 #ifdef BCMSDIOH_TXGLOM
1087 if (need_txglom) {
1088 pkt = sd->glom_info.glom_pkt_head;
1089 }
1090 #endif /* BCMSDIOH_TXGLOM */
1091
1092 /* at least 4 bytes alignment of skb buff is guaranteed */
1093 for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext))
1094 ttl_len += PKTLEN(sd->osh, pnext);
1095
1096 blk_size = sd->client_block_size[func];
1097 if (((!write && sd->use_rxchain) ||
1098 #ifdef BCMSDIOH_TXGLOM
1099 (need_txglom && sd->txglom_mode == SDPCM_TXGLOM_MDESC) ||
1100 #endif
1101 0) && (ttl_len >= blk_size)) {
1102 blk_num = ttl_len / blk_size;
1103 dma_len = blk_num * blk_size;
1104 } else {
1105 blk_num = 0;
1106 dma_len = 0;
1107 }
1108
1109 lft_len = ttl_len - dma_len;
1110
1111 sd_trace(("%s: %s %dB to func%d:%08x, %d blks with DMA, %dB leftover\n",
1112 __FUNCTION__, write ? "W" : "R",
1113 ttl_len, func, addr, blk_num, lft_len));
1114
1115 if (0 != dma_len) {
1116 memset(&mmc_req, 0, sizeof(struct mmc_request));
1117 memset(&mmc_cmd, 0, sizeof(struct mmc_command));
1118 memset(&mmc_dat, 0, sizeof(struct mmc_data));
1119
1120 /* Set up DMA descriptors */
1121 for (pnext = pkt;
1122 pnext && dma_len;
1123 pnext = PKTNEXT(sd->osh, pnext)) {
1124 pkt_len = PKTLEN(sd->osh, pnext);
1125
1126 if (dma_len > pkt_len)
1127 dma_len -= pkt_len;
1128 else {
1129 pkt_len = xfred_len = dma_len;
1130 dma_len = 0;
1131 pkt = pnext;
1132 }
1133
1134 sg_set_buf(&sd->sg_list[SGCount++],
1135 (uint8*)PKTDATA(sd->osh, pnext),
1136 pkt_len);
1137
1138 if (SGCount >= SDIOH_SDMMC_MAX_SG_ENTRIES) {
1139 sd_err(("%s: sg list entries exceed limit\n",
1140 __FUNCTION__));
1141 return (SDIOH_API_RC_FAIL);
1142 }
1143 }
1144
1145 mmc_dat.sg = sd->sg_list;
1146 mmc_dat.sg_len = SGCount;
1147 mmc_dat.blksz = blk_size;
1148 mmc_dat.blocks = blk_num;
1149 mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
1150
1151 mmc_cmd.opcode = 53; /* SD_IO_RW_EXTENDED */
1152 mmc_cmd.arg = write ? 1<<31 : 0;
1153 mmc_cmd.arg |= (func & 0x7) << 28;
1154 mmc_cmd.arg |= 1<<27;
1155 mmc_cmd.arg |= fifo ? 0 : 1<<26;
1156 mmc_cmd.arg |= (addr & 0x1FFFF) << 9;
1157 mmc_cmd.arg |= blk_num & 0x1FF;
1158 mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
1159
1160 mmc_req.cmd = &mmc_cmd;
1161 mmc_req.data = &mmc_dat;
1162
1163 sdio_claim_host(gInstance->func[func]);
1164 mmc_set_data_timeout(&mmc_dat, gInstance->func[func]->card);
1165 mmc_wait_for_req(gInstance->func[func]->card->host, &mmc_req);
1166 sdio_release_host(gInstance->func[func]);
1167
1168 err_ret = mmc_cmd.error? mmc_cmd.error : mmc_dat.error;
1169 if (0 != err_ret) {
1170 sd_err(("%s:CMD53 %s failed with code %d\n",
1171 __FUNCTION__,
1172 write ? "write" : "read",
1173 err_ret));
1174 }
1175 if (!fifo) {
1176 addr = addr + ttl_len - lft_len - dma_len;
1177 }
1178 }
1179
1180 /* PIO mode */
1181 if (0 != lft_len) {
1182 /* Claim host controller */
1183 sdio_claim_host(gInstance->func[func]);
1184 for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) {
1185 uint8 *buf = (uint8*)PKTDATA(sd->osh, pnext) +
1186 xfred_len;
1187 int pad = 0;
1188 pkt_len = PKTLEN(sd->osh, pnext);
1189 if (0 != xfred_len) {
1190 pkt_len -= xfred_len;
1191 xfred_len = 0;
1192 }
1193 #ifdef BCMSDIOH_TXGLOM
1194 if (need_txglom) {
1195 if (!localbuf) {
1196 uint prev_lft_len = lft_len;
1197 lft_len = sdioh_request_packet_align(lft_len, write,
1198 func, blk_size);
1199
1200 if (lft_len > prev_lft_len) {
1201 sd_err(("%s: padding is unexpected! lft_len %d,"
1202 " prev_lft_len %d %s\n",
1203 __FUNCTION__, lft_len, prev_lft_len,
1204 write ? "Write" : "Read"));
1205 }
1206
1207 localbuf = (uint8 *)MALLOC(sd->osh, lft_len);
1208 if (localbuf == NULL) {
1209 sd_err(("%s: %s TXGLOM: localbuf malloc FAILED\n",
1210 __FUNCTION__, (write) ? "TX" : "RX"));
1211 need_txglom = FALSE;
1212 goto txglomfail;
1213 }
1214 }
1215 bcopy(buf, (localbuf + local_plen), pkt_len);
1216 local_plen += pkt_len;
1217
1218 if (PKTNEXT(sd->osh, pnext)) {
1219 continue;
1220 }
1221
1222 buf = localbuf;
1223 pkt_len = local_plen;
1224 }
1225
1226 txglomfail:
1227 #endif /* BCMSDIOH_TXGLOM */
1228
1229 if (
1230 #ifdef BCMSDIOH_TXGLOM
1231 !need_txglom &&
1232 #endif
1233 TRUE) {
1234 int align_pkt_len = 0;
1235 align_pkt_len = sdioh_request_packet_align(pkt_len, write,
1236 func, blk_size);
1237
1238 pad = align_pkt_len - pkt_len;
1239 if (pad > 0) {
1240 if (func == SDIO_FUNC_2) {
1241 sd_err(("%s: padding is unexpected! pkt_len %d,"
1242 " PKTLEN %d lft_len %d %s\n",
1243 __FUNCTION__, pkt_len, PKTLEN(sd->osh, pnext),
1244 lft_len, write ? "Write" : "Read"));
1245 }
1246 if (PKTTAILROOM(sd->osh, pkt) < pad) {
1247 sd_info(("%s: insufficient tailroom %d, pad %d,"
1248 " lft_len %d pktlen %d, func %d %s\n",
1249 __FUNCTION__, (int)PKTTAILROOM(sd->osh, pkt),
1250 pad, lft_len, PKTLEN(sd->osh, pnext), func,
1251 write ? "W" : "R"));
1252 if (PKTPADTAILROOM(sd->osh, pkt, pad)) {
1253 sd_err(("%s: padding error size %d.\n",
1254 __FUNCTION__, pad));
1255 return SDIOH_API_RC_FAIL;
1256 }
1257 }
1258 }
1259 }
1260
1261 if ((write) && (!fifo))
1262 err_ret = sdio_memcpy_toio(
1263 gInstance->func[func],
1264 addr, buf, pkt_len);
1265 else if (write)
1266 err_ret = sdio_memcpy_toio(
1267 gInstance->func[func],
1268 addr, buf, pkt_len);
1269 else if (fifo)
1270 err_ret = sdio_readsb(
1271 gInstance->func[func],
1272 buf, addr, pkt_len);
1273 else
1274 err_ret = sdio_memcpy_fromio(
1275 gInstance->func[func],
1276 buf, addr, pkt_len);
1277
1278 if (err_ret)
1279 sd_err(("%s: %s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=%d\n",
1280 __FUNCTION__,
1281 (write) ? "TX" : "RX",
1282 pnext, SGCount, addr, pkt_len, err_ret));
1283 else
1284 sd_trace(("%s: %s xfr'd %p[%d], addr=0x%05x, len=%d\n",
1285 __FUNCTION__,
1286 (write) ? "TX" : "RX",
1287 pnext, SGCount, addr, pkt_len));
1288
1289 if (!fifo)
1290 addr += pkt_len;
1291 SGCount ++;
1292 }
1293 sdio_release_host(gInstance->func[func]);
1294 }
1295 #ifdef BCMSDIOH_TXGLOM
1296 if (localbuf)
1297 MFREE(sd->osh, localbuf, lft_len);
1298 #endif /* BCMSDIOH_TXGLOM */
1299
1300 sd_trace(("%s: Exit\n", __FUNCTION__));
1301 return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
1302 }
1303
1304
1305 /*
1306 * This function takes a buffer or packet, and fixes everything up so that in the
1307 * end, a DMA-able packet is created.
1308 *
1309 * A buffer does not have an associated packet pointer, and may or may not be aligned.
1310 * A packet may consist of a single packet, or a packet chain. If it is a packet chain,
1311 * then all the packets in the chain must be properly aligned. If the packet data is not
1312 * aligned, then there may only be one packet, and in this case, it is copied to a new
1313 * aligned packet.
1314 *
1315 */
1316 extern SDIOH_API_RC
sdioh_request_buffer(sdioh_info_t * sd,uint pio_dma,uint fix_inc,uint write,uint func,uint addr,uint reg_width,uint buflen_u,uint8 * buffer,void * pkt)1317 sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, uint func,
1318 uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt)
1319 {
1320 SDIOH_API_RC Status;
1321 void *tmppkt;
1322 void *orig_buf = NULL;
1323 uint copylen = 0;
1324
1325 sd_trace(("%s: Enter\n", __FUNCTION__));
1326
1327 DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait);
1328 DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
1329
1330 if (pkt == NULL) {
1331 /* Case 1: we don't have a packet. */
1332 orig_buf = buffer;
1333 copylen = buflen_u;
1334 } else if ((ulong)PKTDATA(sd->osh, pkt) & DMA_ALIGN_MASK) {
1335 /* Case 2: We have a packet, but it is unaligned.
1336 * in this case, we cannot have a chain.
1337 */
1338 ASSERT(PKTNEXT(sd->osh, pkt) == NULL);
1339
1340 orig_buf = PKTDATA(sd->osh, pkt);
1341 copylen = PKTLEN(sd->osh, pkt);
1342 }
1343
1344 tmppkt = pkt;
1345 if (copylen) {
1346 tmppkt = PKTGET_STATIC(sd->osh, copylen, write ? TRUE : FALSE);
1347 if (tmppkt == NULL) {
1348 sd_err(("%s: PKTGET failed: len %d\n", __FUNCTION__, copylen));
1349 return SDIOH_API_RC_FAIL;
1350 }
1351 /* For a write, copy the buffer data into the packet. */
1352 if (write)
1353 bcopy(orig_buf, PKTDATA(sd->osh, tmppkt), copylen);
1354 }
1355
1356 Status = sdioh_request_packet(sd, fix_inc, write, func, addr, tmppkt);
1357
1358 if (copylen) {
1359 /* For a read, copy the packet data back to the buffer. */
1360 if (!write)
1361 bcopy(PKTDATA(sd->osh, tmppkt), orig_buf, PKTLEN(sd->osh, tmppkt));
1362 PKTFREE_STATIC(sd->osh, tmppkt, write ? TRUE : FALSE);
1363 }
1364
1365 return (Status);
1366 }
1367
1368 /* this function performs "abort" for both of host & device */
1369 extern int
sdioh_abort(sdioh_info_t * sd,uint func)1370 sdioh_abort(sdioh_info_t *sd, uint func)
1371 {
1372 #if defined(MMC_SDIO_ABORT)
1373 char t_func = (char) func;
1374 #endif /* defined(MMC_SDIO_ABORT) */
1375 sd_trace(("%s: Enter\n", __FUNCTION__));
1376
1377 #if defined(MMC_SDIO_ABORT)
1378 /* issue abort cmd52 command through F1 */
1379 sdioh_request_byte(sd, SD_IO_OP_WRITE, SDIO_FUNC_0, SDIOD_CCCR_IOABORT, &t_func);
1380 #endif /* defined(MMC_SDIO_ABORT) */
1381
1382 sd_trace(("%s: Exit\n", __FUNCTION__));
1383 return SDIOH_API_RC_SUCCESS;
1384 }
1385
1386 /* Reset and re-initialize the device */
sdioh_sdio_reset(sdioh_info_t * si)1387 int sdioh_sdio_reset(sdioh_info_t *si)
1388 {
1389 sd_trace(("%s: Enter\n", __FUNCTION__));
1390 sd_trace(("%s: Exit\n", __FUNCTION__));
1391 return SDIOH_API_RC_SUCCESS;
1392 }
1393
1394 /* Disable device interrupt */
1395 void
sdioh_sdmmc_devintr_off(sdioh_info_t * sd)1396 sdioh_sdmmc_devintr_off(sdioh_info_t *sd)
1397 {
1398 sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
1399 sd->intmask &= ~CLIENT_INTR;
1400 }
1401
1402 /* Enable device interrupt */
1403 void
sdioh_sdmmc_devintr_on(sdioh_info_t * sd)1404 sdioh_sdmmc_devintr_on(sdioh_info_t *sd)
1405 {
1406 sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
1407 sd->intmask |= CLIENT_INTR;
1408 }
1409
1410 /* Read client card reg */
1411 int
sdioh_sdmmc_card_regread(sdioh_info_t * sd,int func,uint32 regaddr,int regsize,uint32 * data)1412 sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
1413 {
1414
1415 if ((func == 0) || (regsize == 1)) {
1416 uint8 temp = 0;
1417
1418 sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
1419 *data = temp;
1420 *data &= 0xff;
1421 sd_data(("%s: byte read data=0x%02x\n",
1422 __FUNCTION__, *data));
1423 } else {
1424 sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, data, regsize);
1425 if (regsize == 2)
1426 *data &= 0xffff;
1427
1428 sd_data(("%s: word read data=0x%08x\n",
1429 __FUNCTION__, *data));
1430 }
1431
1432 return SUCCESS;
1433 }
1434
1435 #if !defined(OOB_INTR_ONLY)
1436 /* bcmsdh_sdmmc interrupt handler */
IRQHandler(struct sdio_func * func)1437 static void IRQHandler(struct sdio_func *func)
1438 {
1439 sdioh_info_t *sd;
1440
1441 sd_trace(("bcmsdh_sdmmc: ***IRQHandler\n"));
1442 sd = gInstance->sd;
1443
1444 ASSERT(sd != NULL);
1445 sdio_release_host(gInstance->func[0]);
1446
1447 if (sd->use_client_ints) {
1448 sd->intrcount++;
1449 ASSERT(sd->intr_handler);
1450 ASSERT(sd->intr_handler_arg);
1451 (sd->intr_handler)(sd->intr_handler_arg);
1452 } else {
1453 sd_err(("bcmsdh_sdmmc: ***IRQHandler\n"));
1454
1455 sd_err(("%s: Not ready for intr: enabled %d, handler %p\n",
1456 __FUNCTION__, sd->client_intr_enabled, sd->intr_handler));
1457 }
1458
1459 sdio_claim_host(gInstance->func[0]);
1460 }
1461
1462 /* bcmsdh_sdmmc interrupt handler for F2 (dummy handler) */
IRQHandlerF2(struct sdio_func * func)1463 static void IRQHandlerF2(struct sdio_func *func)
1464 {
1465 sdioh_info_t *sd;
1466
1467 sd_trace(("bcmsdh_sdmmc: ***IRQHandlerF2\n"));
1468
1469 sd = gInstance->sd;
1470
1471 ASSERT(sd != NULL);
1472 BCM_REFERENCE(sd);
1473 }
1474 #endif /* !defined(OOB_INTR_ONLY) */
1475
1476 #ifdef NOTUSED
1477 /* Write client card reg */
1478 static int
sdioh_sdmmc_card_regwrite(sdioh_info_t * sd,int func,uint32 regaddr,int regsize,uint32 data)1479 sdioh_sdmmc_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
1480 {
1481
1482 if ((func == 0) || (regsize == 1)) {
1483 uint8 temp;
1484
1485 temp = data & 0xff;
1486 sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
1487 sd_data(("%s: byte write data=0x%02x\n",
1488 __FUNCTION__, data));
1489 } else {
1490 if (regsize == 2)
1491 data &= 0xffff;
1492
1493 sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, &data, regsize);
1494
1495 sd_data(("%s: word write data=0x%08x\n",
1496 __FUNCTION__, data));
1497 }
1498
1499 return SUCCESS;
1500 }
1501 #endif /* NOTUSED */
1502
1503 int
sdioh_start(sdioh_info_t * si,int stage)1504 sdioh_start(sdioh_info_t *si, int stage)
1505 {
1506 int ret;
1507 sdioh_info_t *sd = gInstance->sd;
1508
1509 if (!sd) {
1510 sd_err(("%s Failed, sd is NULL\n", __FUNCTION__));
1511 return (0);
1512 }
1513
1514 /* Need to do this stages as we can't enable the interrupt till
1515 downloading of the firmware is complete, other wise polling
1516 sdio access will come in way
1517 */
1518 if (gInstance->func[0]) {
1519 if (stage == 0) {
1520 /* Since the power to the chip is killed, we will have
1521 re enumerate the device again. Set the block size
1522 and enable the fucntion 1 for in preparation for
1523 downloading the code
1524 */
1525 /* sdio_reset_comm() - has been fixed in latest kernel/msm.git for Linux
1526 2.6.27. The implementation prior to that is buggy, and needs broadcom's
1527 patch for it
1528 */
1529 if ((ret = sdio_reset_comm(gInstance->func[0]->card))) {
1530 sd_err(("%s Failed, error = %d\n", __FUNCTION__, ret));
1531 return ret;
1532 }
1533 else {
1534 sd->num_funcs = 2;
1535 sd->sd_blockmode = TRUE;
1536 sd->use_client_ints = TRUE;
1537 sd->client_block_size[0] = 64;
1538
1539 if (gInstance->func[1]) {
1540 /* Claim host controller */
1541 sdio_claim_host(gInstance->func[1]);
1542
1543 sd->client_block_size[1] = 64;
1544 if (sdio_set_block_size(gInstance->func[1], 64)) {
1545 sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize\n"));
1546 }
1547
1548 /* Release host controller F1 */
1549 sdio_release_host(gInstance->func[1]);
1550 }
1551
1552 if (gInstance->func[2]) {
1553 /* Claim host controller F2 */
1554 sdio_claim_host(gInstance->func[2]);
1555
1556 sd->client_block_size[2] = sd_f2_blocksize;
1557 if (sdio_set_block_size(gInstance->func[2],
1558 sd_f2_blocksize)) {
1559 sd_err(("bcmsdh_sdmmc: Failed to set F2 "
1560 "blocksize to %d\n", sd_f2_blocksize));
1561 }
1562
1563 /* Release host controller F2 */
1564 sdio_release_host(gInstance->func[2]);
1565 }
1566
1567 sdioh_sdmmc_card_enablefuncs(sd);
1568 }
1569 } else {
1570 #if !defined(OOB_INTR_ONLY)
1571 sdio_claim_host(gInstance->func[0]);
1572 if (gInstance->func[2])
1573 sdio_claim_irq(gInstance->func[2], IRQHandlerF2);
1574 if (gInstance->func[1])
1575 sdio_claim_irq(gInstance->func[1], IRQHandler);
1576 sdio_release_host(gInstance->func[0]);
1577 #else /* defined(OOB_INTR_ONLY) */
1578 #if defined(HW_OOB)
1579 sdioh_enable_func_intr();
1580 #endif
1581 bcmsdh_oob_intr_set(TRUE);
1582 #endif /* !defined(OOB_INTR_ONLY) */
1583 }
1584 }
1585 else
1586 sd_err(("%s Failed\n", __FUNCTION__));
1587
1588 return (0);
1589 }
1590
1591 int
sdioh_stop(sdioh_info_t * si)1592 sdioh_stop(sdioh_info_t *si)
1593 {
1594 /* MSM7201A Android sdio stack has bug with interrupt
1595 So internaly within SDIO stack they are polling
1596 which cause issue when device is turned off. So
1597 unregister interrupt with SDIO stack to stop the
1598 polling
1599 */
1600 if (gInstance->func[0]) {
1601 #if !defined(OOB_INTR_ONLY)
1602 sdio_claim_host(gInstance->func[0]);
1603 if (gInstance->func[1])
1604 sdio_release_irq(gInstance->func[1]);
1605 if (gInstance->func[2])
1606 sdio_release_irq(gInstance->func[2]);
1607 sdio_release_host(gInstance->func[0]);
1608 #else /* defined(OOB_INTR_ONLY) */
1609 #if defined(HW_OOB)
1610 sdioh_disable_func_intr();
1611 #endif
1612 bcmsdh_oob_intr_set(FALSE);
1613 #endif /* !defined(OOB_INTR_ONLY) */
1614 }
1615 else
1616 sd_err(("%s Failed\n", __FUNCTION__));
1617 return (0);
1618 }
1619
1620 int
sdioh_waitlockfree(sdioh_info_t * sd)1621 sdioh_waitlockfree(sdioh_info_t *sd)
1622 {
1623 return (1);
1624 }
1625
1626
1627 SDIOH_API_RC
sdioh_gpioouten(sdioh_info_t * sd,uint32 gpio)1628 sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio)
1629 {
1630 return SDIOH_API_RC_FAIL;
1631 }
1632
1633 SDIOH_API_RC
sdioh_gpioout(sdioh_info_t * sd,uint32 gpio,bool enab)1634 sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab)
1635 {
1636 return SDIOH_API_RC_FAIL;
1637 }
1638
1639 bool
sdioh_gpioin(sdioh_info_t * sd,uint32 gpio)1640 sdioh_gpioin(sdioh_info_t *sd, uint32 gpio)
1641 {
1642 return FALSE;
1643 }
1644
1645 SDIOH_API_RC
sdioh_gpio_init(sdioh_info_t * sd)1646 sdioh_gpio_init(sdioh_info_t *sd)
1647 {
1648 return SDIOH_API_RC_FAIL;
1649 }
1650