1 /*
2 * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
3 *
4 * Copyright (C) 1999-2017, Broadcom Corporation
5 *
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
11 *
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
19 *
20 * Notwithstanding the above, under no circumstances may you combine this
21 * software in any way with any other Broadcom software provided under a license
22 * other than the GPL, without Broadcom's express prior written consent.
23 *
24 *
25 * <<Broadcom-WL-IPTag/Proprietary,Open:>>
26 *
27 * $Id: bcmsdh_sdmmc.c 710913 2017-07-14 10:17:51Z $
28 */
29 #include <typedefs.h>
30
31 #include <bcmdevs.h>
32 #include <bcmendian.h>
33 #include <bcmutils.h>
34 #include <osl.h>
35 #include <sdio.h> /* SDIO Device and Protocol Specs */
36 #include <sdioh.h> /* Standard SDIO Host Controller Specification */
37 #include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
38 #include <sdiovar.h> /* ioctl/iovars */
39
40 #include <linux/mmc/core.h>
41 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 0))
42 #include <drivers/mmc/core/host.h>
43 void
mmc_host_clk_hold(struct mmc_host * host)44 mmc_host_clk_hold(struct mmc_host *host)
45 {
46 BCM_REFERENCE(host);
47 return;
48 }
49
50 void
mmc_host_clk_release(struct mmc_host * host)51 mmc_host_clk_release(struct mmc_host *host)
52 {
53 BCM_REFERENCE(host);
54 return;
55 }
56 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 8))
57 #include <drivers/mmc/core/host.h>
58 #else
59 #include <linux/mmc/host.h>
60 #include <drivers/mmc/core/host.h>
61 #include <drivers/mmc/core/core.h>
62 #endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 0)) */
63 #include <linux/mmc/card.h>
64 #include <linux/mmc/sdio_func.h>
65 #include <linux/mmc/sdio_ids.h>
66
67 #include <dngl_stats.h>
68 #include <dhd.h>
69 #include <dhd_dbg.h>
70
71 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
72 #include <linux/suspend.h>
73 extern volatile bool dhd_mmc_suspend;
74 #endif
75 #include "bcmsdh_sdmmc.h"
76
77 #ifndef BCMSDH_MODULE
78 extern int sdio_function_init(void);
79 extern void sdio_function_cleanup(void);
80 #endif /* BCMSDH_MODULE */
81
82 #if !defined(OOB_INTR_ONLY)
83 static void IRQHandler(struct sdio_func *func);
84 static void IRQHandlerF2(struct sdio_func *func);
85 #endif /* !defined(OOB_INTR_ONLY) */
86 static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr);
87 #if defined(ENABLE_INSMOD_NO_FW_LOAD) && !defined(BUS_POWER_RESTORE) && LINUX_VERSION_CODE <= KERNEL_VERSION(4, 9, 0)
88 extern int sdio_reset_comm(struct mmc_card *card);
89 #else
sdio_reset_comm(struct mmc_card * card)90 int sdio_reset_comm(struct mmc_card *card)
91 {
92 int ret;
93 struct mmc_host *host = card->host;
94 __mmc_claim_host(host, NULL, NULL);
95 mmc_retune_unpause(host);
96 host->can_retune = 0;
97 mmc_retune_timer_stop(host);
98 host->retune_now = 0;
99 host->need_retune = 0;
100 ret = mmc_sw_reset(host);
101 mmc_release_host(host);
102 return ret;
103 }
104 #endif
105 #ifdef GLOBAL_SDMMC_INSTANCE
106 extern PBCMSDH_SDMMC_INSTANCE gInstance;
107 #endif
108 #ifdef CUSTOMER_HW_ALLWINNER
109 extern int sunxi_mmc_check_r1_ready(struct mmc_host* mmc, unsigned ms);
110 #endif
111
112 #define DEFAULT_SDIO_F2_BLKSIZE 512
113 #ifndef CUSTOM_SDIO_F2_BLKSIZE
114 #define CUSTOM_SDIO_F2_BLKSIZE DEFAULT_SDIO_F2_BLKSIZE
115 #endif
116
117 #define DEFAULT_SDIO_F1_BLKSIZE 64
118 #ifndef CUSTOM_SDIO_F1_BLKSIZE
119 #define CUSTOM_SDIO_F1_BLKSIZE DEFAULT_SDIO_F1_BLKSIZE
120 #endif
121
122 #define MAX_IO_RW_EXTENDED_BLK 511
123
124 uint sd_sdmode = SDIOH_MODE_SD4; /* Use SD4 mode by default */
125 uint sd_f2_blocksize = CUSTOM_SDIO_F2_BLKSIZE;
126 uint sd_f1_blocksize = CUSTOM_SDIO_F1_BLKSIZE;
127
128 #if defined(BT_OVER_SDIO)
129 uint sd_f3_blocksize = 64;
130 #endif /* defined (BT_OVER_SDIO) */
131
132 uint sd_divisor = 2; /* Default 48MHz/2 = 24MHz */
133
134 uint sd_power = 1; /* Default to SD Slot powered ON */
135 uint sd_clock = 1; /* Default to SD Clock turned ON */
136 uint sd_hiok = FALSE; /* Don't use hi-speed mode by default */
137 uint sd_msglevel = 0x01;
138 uint sd_use_dma = TRUE;
139
140 #ifndef CUSTOM_RXCHAIN
141 #define CUSTOM_RXCHAIN 0
142 #endif
143
144 DHD_PM_RESUME_WAIT_INIT(sdioh_request_byte_wait);
145 DHD_PM_RESUME_WAIT_INIT(sdioh_request_word_wait);
146 DHD_PM_RESUME_WAIT_INIT(sdioh_request_packet_wait);
147 DHD_PM_RESUME_WAIT_INIT(sdioh_request_buffer_wait);
148
149 #define DMA_ALIGN_MASK 0x03
150 #define MMC_SDIO_ABORT_RETRY_LIMIT 5
151
152 int sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data);
153
154 void sdmmc_set_clock_rate(sdioh_info_t *sd, uint hz);
155 uint sdmmc_get_clock_rate(sdioh_info_t *sd);
156 void sdmmc_set_clock_divisor(sdioh_info_t *sd, uint sd_div);
157 #if defined(BT_OVER_SDIO)
158 extern
sdioh_sdmmc_card_enable_func_f3(sdioh_info_t * sd,struct sdio_func * func)159 void sdioh_sdmmc_card_enable_func_f3(sdioh_info_t *sd, struct sdio_func *func)
160 {
161 sd->func[3] = func;
162 sd_info(("%s sd->func[3] %p\n", __FUNCTION__, sd->func[3]));
163 }
164 #endif /* defined (BT_OVER_SDIO) */
165
166 static int
sdioh_sdmmc_card_enablefuncs(sdioh_info_t * sd)167 sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd)
168 {
169 int err_ret;
170 uint32 fbraddr;
171 uint8 func;
172
173 sd_trace(("%s\n", __FUNCTION__));
174
175 /* Get the Card's common CIS address */
176 sd->com_cis_ptr = sdioh_sdmmc_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0);
177 sd->func_cis_ptr[0] = sd->com_cis_ptr;
178 sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
179
180 /* Get the Card's function CIS (for each function) */
181 for (fbraddr = SDIOD_FBR_STARTADDR, func = 1;
182 func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
183 sd->func_cis_ptr[func] = sdioh_sdmmc_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr);
184 sd_info(("%s: Function %d CIS Ptr = 0x%x\n",
185 __FUNCTION__, func, sd->func_cis_ptr[func]));
186 }
187
188 sd->func_cis_ptr[0] = sd->com_cis_ptr;
189 sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
190
191 /* Enable Function 1 */
192 sdio_claim_host(sd->func[1]);
193 err_ret = sdio_enable_func(sd->func[1]);
194 sdio_release_host(sd->func[1]);
195 if (err_ret) {
196 sd_err(("bcmsdh_sdmmc: Failed to enable F1 Err: 0x%08x\n", err_ret));
197 }
198
199 return FALSE;
200 }
201
202 /*
203 * Public entry points & extern's
204 */
205 extern sdioh_info_t *
sdioh_attach(osl_t * osh,struct sdio_func * func)206 sdioh_attach(osl_t *osh, struct sdio_func *func)
207 {
208 sdioh_info_t *sd = NULL;
209 int err_ret;
210
211 sd_trace(("%s\n", __FUNCTION__));
212
213 if (func == NULL) {
214 sd_err(("%s: sdio function device is NULL\n", __FUNCTION__));
215 return NULL;
216 }
217
218 if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
219 sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
220 return NULL;
221 }
222 bzero((char *)sd, sizeof(sdioh_info_t));
223 sd->osh = osh;
224 sd->fake_func0.num = 0;
225 sd->fake_func0.card = func->card;
226 sd->func[0] = &sd->fake_func0;
227 #ifdef GLOBAL_SDMMC_INSTANCE
228 if (func->num == 2)
229 sd->func[1] = gInstance->func[1];
230 #else
231 sd->func[1] = func->card->sdio_func[0];
232 #endif
233 sd->func[2] = func->card->sdio_func[1];
234 #ifdef GLOBAL_SDMMC_INSTANCE
235 sd->func[func->num] = func;
236 #endif
237
238 #if defined(BT_OVER_SDIO)
239 sd->func[3] = NULL;
240 #endif /* defined (BT_OVER_SDIO) */
241
242 sd->num_funcs = 2;
243 sd->sd_blockmode = TRUE;
244 sd->use_client_ints = TRUE;
245 sd->client_block_size[0] = 64;
246 sd->use_rxchain = CUSTOM_RXCHAIN;
247 if (sd->func[1] == NULL || sd->func[2] == NULL) {
248 sd_err(("%s: func 1 or 2 is null \n", __FUNCTION__));
249 goto fail;
250 }
251 sdio_set_drvdata(sd->func[1], sd);
252
253 sdio_claim_host(sd->func[1]);
254 sd->client_block_size[1] = sd_f1_blocksize;
255 err_ret = sdio_set_block_size(sd->func[1], sd_f1_blocksize);
256 sdio_release_host(sd->func[1]);
257 if (err_ret) {
258 sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize(%d)\n", err_ret));
259 goto fail;
260 }
261
262 sdio_claim_host(sd->func[2]);
263 sd->client_block_size[2] = sd_f2_blocksize;
264 printf("%s: set sd_f2_blocksize %d\n", __FUNCTION__, sd_f2_blocksize);
265 err_ret = sdio_set_block_size(sd->func[2], sd_f2_blocksize);
266 sdio_release_host(sd->func[2]);
267 if (err_ret) {
268 sd_err(("bcmsdh_sdmmc: Failed to set F2 blocksize to %d(%d)\n",
269 sd_f2_blocksize, err_ret));
270 goto fail;
271 }
272
273 sd->sd_clk_rate = sdmmc_get_clock_rate(sd);
274 printf("%s: sd clock rate = %u\n", __FUNCTION__, sd->sd_clk_rate);
275 sdioh_sdmmc_card_enablefuncs(sd);
276
277 sd_trace(("%s: Done\n", __FUNCTION__));
278 return sd;
279
280 fail:
281 MFREE(sd->osh, sd, sizeof(sdioh_info_t));
282 return NULL;
283 }
284
285
286 extern SDIOH_API_RC
sdioh_detach(osl_t * osh,sdioh_info_t * sd)287 sdioh_detach(osl_t *osh, sdioh_info_t *sd)
288 {
289 sd_trace(("%s\n", __FUNCTION__));
290
291 if (sd) {
292 /* Disable Function 2 */
293 if (sd->func[2]) {
294 sdio_claim_host(sd->func[2]);
295 sdio_disable_func(sd->func[2]);
296 sdio_release_host(sd->func[2]);
297 }
298
299 /* Disable Function 1 */
300 if (sd->func[1]) {
301 sdio_claim_host(sd->func[1]);
302 sdio_disable_func(sd->func[1]);
303 sdio_release_host(sd->func[1]);
304 }
305
306 sd->func[1] = NULL;
307 sd->func[2] = NULL;
308
309 MFREE(sd->osh, sd, sizeof(sdioh_info_t));
310 }
311 return SDIOH_API_RC_SUCCESS;
312 }
313
314 #if defined(OOB_INTR_ONLY) && defined(HW_OOB)
315
316 extern SDIOH_API_RC
sdioh_enable_func_intr(sdioh_info_t * sd)317 sdioh_enable_func_intr(sdioh_info_t *sd)
318 {
319 uint8 reg;
320 int err;
321
322 if (sd->func[0] == NULL) {
323 sd_err(("%s: function 0 pointer is NULL\n", __FUNCTION__));
324 return SDIOH_API_RC_FAIL;
325 }
326
327 sdio_claim_host(sd->func[0]);
328 reg = sdio_readb(sd->func[0], SDIOD_CCCR_INTEN, &err);
329 if (err) {
330 sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
331 sdio_release_host(sd->func[0]);
332 return SDIOH_API_RC_FAIL;
333 }
334 /* Enable F1 and F2 interrupts, clear master enable */
335 reg &= ~INTR_CTL_MASTER_EN;
336 reg |= (INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
337 #if defined(BT_OVER_SDIO)
338 reg |= (INTR_CTL_FUNC3_EN);
339 #endif /* defined (BT_OVER_SDIO) */
340 sdio_writeb(sd->func[0], reg, SDIOD_CCCR_INTEN, &err);
341 sdio_release_host(sd->func[0]);
342
343 if (err) {
344 sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
345 return SDIOH_API_RC_FAIL;
346 }
347
348 return SDIOH_API_RC_SUCCESS;
349 }
350
351 extern SDIOH_API_RC
sdioh_disable_func_intr(sdioh_info_t * sd)352 sdioh_disable_func_intr(sdioh_info_t *sd)
353 {
354 uint8 reg;
355 int err;
356
357 if (sd->func[0] == NULL) {
358 sd_err(("%s: function 0 pointer is NULL\n", __FUNCTION__));
359 return SDIOH_API_RC_FAIL;
360 }
361
362 sdio_claim_host(sd->func[0]);
363 reg = sdio_readb(sd->func[0], SDIOD_CCCR_INTEN, &err);
364 if (err) {
365 sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
366 sdio_release_host(sd->func[0]);
367 return SDIOH_API_RC_FAIL;
368 }
369 reg &= ~(INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
370 #if defined(BT_OVER_SDIO)
371 reg &= ~INTR_CTL_FUNC3_EN;
372 #endif
373 /* Disable master interrupt with the last function interrupt */
374 if (!(reg & 0xFE))
375 reg = 0;
376 sdio_writeb(sd->func[0], reg, SDIOD_CCCR_INTEN, &err);
377 sdio_release_host(sd->func[0]);
378
379 if (err) {
380 sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
381 return SDIOH_API_RC_FAIL;
382 }
383
384 return SDIOH_API_RC_SUCCESS;
385 }
386 #endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
387
388 /* Configure callback to client when we recieve client interrupt */
389 extern SDIOH_API_RC
sdioh_interrupt_register(sdioh_info_t * sd,sdioh_cb_fn_t fn,void * argh)390 sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
391 {
392 sd_trace(("%s: Entering\n", __FUNCTION__));
393 if (fn == NULL) {
394 sd_err(("%s: interrupt handler is NULL, not registering\n", __FUNCTION__));
395 return SDIOH_API_RC_FAIL;
396 }
397 #if !defined(OOB_INTR_ONLY)
398 sd->intr_handler = fn;
399 sd->intr_handler_arg = argh;
400 sd->intr_handler_valid = TRUE;
401
402 /* register and unmask irq */
403 if (sd->func[2]) {
404 sdio_claim_host(sd->func[2]);
405 sdio_claim_irq(sd->func[2], IRQHandlerF2);
406 sdio_release_host(sd->func[2]);
407 }
408
409 if (sd->func[1]) {
410 sdio_claim_host(sd->func[1]);
411 sdio_claim_irq(sd->func[1], IRQHandler);
412 sdio_release_host(sd->func[1]);
413 }
414 #elif defined(HW_OOB)
415 sdioh_enable_func_intr(sd);
416 #endif /* !defined(OOB_INTR_ONLY) */
417
418 return SDIOH_API_RC_SUCCESS;
419 }
420
421 extern SDIOH_API_RC
sdioh_interrupt_deregister(sdioh_info_t * sd)422 sdioh_interrupt_deregister(sdioh_info_t *sd)
423 {
424 sd_trace(("%s: Entering\n", __FUNCTION__));
425
426 #if !defined(OOB_INTR_ONLY)
427 if (sd->func[1]) {
428 /* register and unmask irq */
429 sdio_claim_host(sd->func[1]);
430 sdio_release_irq(sd->func[1]);
431 sdio_release_host(sd->func[1]);
432 }
433
434 if (sd->func[2]) {
435 /* Claim host controller F2 */
436 sdio_claim_host(sd->func[2]);
437 sdio_release_irq(sd->func[2]);
438 /* Release host controller F2 */
439 sdio_release_host(sd->func[2]);
440 }
441
442 sd->intr_handler_valid = FALSE;
443 sd->intr_handler = NULL;
444 sd->intr_handler_arg = NULL;
445 #elif defined(HW_OOB)
446 if (dhd_download_fw_on_driverload)
447 sdioh_disable_func_intr(sd);
448 #endif /* !defined(OOB_INTR_ONLY) */
449 return SDIOH_API_RC_SUCCESS;
450 }
451
452 extern SDIOH_API_RC
sdioh_interrupt_query(sdioh_info_t * sd,bool * onoff)453 sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
454 {
455 sd_trace(("%s: Entering\n", __FUNCTION__));
456 *onoff = sd->client_intr_enabled;
457 return SDIOH_API_RC_SUCCESS;
458 }
459
460 #if defined(DHD_DEBUG)
461 extern bool
sdioh_interrupt_pending(sdioh_info_t * sd)462 sdioh_interrupt_pending(sdioh_info_t *sd)
463 {
464 return (0);
465 }
466 #endif
467
468 uint
sdioh_query_iofnum(sdioh_info_t * sd)469 sdioh_query_iofnum(sdioh_info_t *sd)
470 {
471 return sd->num_funcs;
472 }
473
474 /* IOVar table */
475 enum {
476 IOV_MSGLEVEL = 1,
477 IOV_BLOCKMODE,
478 IOV_BLOCKSIZE,
479 IOV_DMA,
480 IOV_USEINTS,
481 IOV_NUMINTS,
482 IOV_NUMLOCALINTS,
483 IOV_HOSTREG,
484 IOV_DEVREG,
485 IOV_DIVISOR,
486 IOV_SDMODE,
487 IOV_HISPEED,
488 IOV_HCIREGS,
489 IOV_POWER,
490 IOV_CLOCK,
491 IOV_RXCHAIN
492 };
493
494 const bcm_iovar_t sdioh_iovars[] = {
495 {"sd_msglevel", IOV_MSGLEVEL, 0, 0, IOVT_UINT32, 0 },
496 {"sd_blockmode", IOV_BLOCKMODE, 0, 0, IOVT_BOOL, 0 },
497 {"sd_blocksize", IOV_BLOCKSIZE, 0, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */
498 {"sd_dma", IOV_DMA, 0, 0, IOVT_BOOL, 0 },
499 {"sd_ints", IOV_USEINTS, 0, 0, IOVT_BOOL, 0 },
500 {"sd_numints", IOV_NUMINTS, 0, 0, IOVT_UINT32, 0 },
501 {"sd_numlocalints", IOV_NUMLOCALINTS, 0, 0, IOVT_UINT32, 0 },
502 {"sd_divisor", IOV_DIVISOR, 0, 0, IOVT_UINT32, 0 },
503 {"sd_power", IOV_POWER, 0, 0, IOVT_UINT32, 0 },
504 {"sd_clock", IOV_CLOCK, 0, 0, IOVT_UINT32, 0 },
505 {"sd_mode", IOV_SDMODE, 0, 0, IOVT_UINT32, 100},
506 {"sd_highspeed", IOV_HISPEED, 0, 0, IOVT_UINT32, 0 },
507 {"sd_rxchain", IOV_RXCHAIN, 0, 0, IOVT_BOOL, 0 },
508 {NULL, 0, 0, 0, 0, 0 }
509 };
510
511 int
sdioh_iovar_op(sdioh_info_t * si,const char * name,void * params,int plen,void * arg,int len,bool set)512 sdioh_iovar_op(sdioh_info_t *si, const char *name,
513 void *params, int plen, void *arg, int len, bool set)
514 {
515 const bcm_iovar_t *vi = NULL;
516 int bcmerror = 0;
517 int val_size;
518 int32 int_val = 0;
519 bool bool_val;
520 uint32 actionid;
521
522 ASSERT(name);
523 ASSERT(len >= 0);
524
525 /* Get must have return space; Set does not take qualifiers */
526 ASSERT(set || (arg && len));
527 ASSERT(!set || (!params && !plen));
528
529 sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
530
531 if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
532 bcmerror = BCME_UNSUPPORTED;
533 goto exit;
534 }
535
536 if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
537 goto exit;
538
539 /* Set up params so get and set can share the convenience variables */
540 if (params == NULL) {
541 params = arg;
542 plen = len;
543 }
544
545 if (vi->type == IOVT_VOID)
546 val_size = 0;
547 else if (vi->type == IOVT_BUFFER)
548 val_size = len;
549 else
550 val_size = sizeof(int);
551
552 if (plen >= (int)sizeof(int_val))
553 bcopy(params, &int_val, sizeof(int_val));
554
555 bool_val = (int_val != 0) ? TRUE : FALSE;
556 BCM_REFERENCE(bool_val);
557
558 actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
559 switch (actionid) {
560 case IOV_GVAL(IOV_MSGLEVEL):
561 int_val = (int32)sd_msglevel;
562 bcopy(&int_val, arg, val_size);
563 break;
564
565 case IOV_SVAL(IOV_MSGLEVEL):
566 sd_msglevel = int_val;
567 break;
568
569 case IOV_GVAL(IOV_BLOCKMODE):
570 int_val = (int32)si->sd_blockmode;
571 bcopy(&int_val, arg, val_size);
572 break;
573
574 case IOV_SVAL(IOV_BLOCKMODE):
575 si->sd_blockmode = (bool)int_val;
576 /* Haven't figured out how to make non-block mode with DMA */
577 break;
578
579 case IOV_GVAL(IOV_BLOCKSIZE):
580 if ((uint32)int_val > si->num_funcs) {
581 bcmerror = BCME_BADARG;
582 break;
583 }
584 int_val = (int32)si->client_block_size[int_val];
585 bcopy(&int_val, arg, val_size);
586 break;
587
588 case IOV_SVAL(IOV_BLOCKSIZE):
589 {
590 uint func = ((uint32)int_val >> 16);
591 uint blksize = (uint16)int_val;
592 uint maxsize;
593
594 if (func > si->num_funcs) {
595 bcmerror = BCME_BADARG;
596 break;
597 }
598
599 switch (func) {
600 case 0: maxsize = 32; break;
601 case 1: maxsize = BLOCK_SIZE_4318; break;
602 case 2: maxsize = BLOCK_SIZE_4328; break;
603 default: maxsize = 0;
604 }
605 if (blksize > maxsize) {
606 bcmerror = BCME_BADARG;
607 break;
608 }
609 if (!blksize) {
610 blksize = maxsize;
611 }
612
613 /* Now set it */
614 si->client_block_size[func] = blksize;
615
616 if (si->func[func] == NULL) {
617 sd_err(("%s: SDIO Device not present\n", __FUNCTION__));
618 bcmerror = BCME_NORESOURCE;
619 break;
620 }
621 sdio_claim_host(si->func[func]);
622 bcmerror = sdio_set_block_size(si->func[func], blksize);
623 if (bcmerror)
624 sd_err(("%s: Failed to set F%d blocksize to %d(%d)\n",
625 __FUNCTION__, func, blksize, bcmerror));
626 sdio_release_host(si->func[func]);
627 break;
628 }
629
630 case IOV_GVAL(IOV_RXCHAIN):
631 int_val = (int32)si->use_rxchain;
632 bcopy(&int_val, arg, val_size);
633 break;
634
635 case IOV_GVAL(IOV_DMA):
636 int_val = (int32)si->sd_use_dma;
637 bcopy(&int_val, arg, val_size);
638 break;
639
640 case IOV_SVAL(IOV_DMA):
641 si->sd_use_dma = (bool)int_val;
642 break;
643
644 case IOV_GVAL(IOV_USEINTS):
645 int_val = (int32)si->use_client_ints;
646 bcopy(&int_val, arg, val_size);
647 break;
648
649 case IOV_SVAL(IOV_USEINTS):
650 si->use_client_ints = (bool)int_val;
651 if (si->use_client_ints)
652 si->intmask |= CLIENT_INTR;
653 else
654 si->intmask &= ~CLIENT_INTR;
655
656 break;
657
658 case IOV_GVAL(IOV_DIVISOR):
659 int_val = (uint32)sd_divisor;
660 bcopy(&int_val, arg, val_size);
661 break;
662
663 case IOV_SVAL(IOV_DIVISOR):
664 /* set the clock to divisor, if value is non-zero & power of 2 */
665 if (int_val && !(int_val & (int_val - 1))) {
666 sd_divisor = int_val;
667 sdmmc_set_clock_divisor(si, sd_divisor);
668 } else {
669 DHD_ERROR(("%s: Invalid sd_divisor value, should be power of 2!\n",
670 __FUNCTION__));
671 }
672 break;
673
674 case IOV_GVAL(IOV_POWER):
675 int_val = (uint32)sd_power;
676 bcopy(&int_val, arg, val_size);
677 break;
678
679 case IOV_SVAL(IOV_POWER):
680 sd_power = int_val;
681 break;
682
683 case IOV_GVAL(IOV_CLOCK):
684 int_val = (uint32)sd_clock;
685 bcopy(&int_val, arg, val_size);
686 break;
687
688 case IOV_SVAL(IOV_CLOCK):
689 sd_clock = int_val;
690 break;
691
692 case IOV_GVAL(IOV_SDMODE):
693 int_val = (uint32)sd_sdmode;
694 bcopy(&int_val, arg, val_size);
695 break;
696
697 case IOV_SVAL(IOV_SDMODE):
698 sd_sdmode = int_val;
699 break;
700
701 case IOV_GVAL(IOV_HISPEED):
702 int_val = (uint32)sd_hiok;
703 bcopy(&int_val, arg, val_size);
704 break;
705
706 case IOV_SVAL(IOV_HISPEED):
707 sd_hiok = int_val;
708 break;
709
710 case IOV_GVAL(IOV_NUMINTS):
711 int_val = (int32)si->intrcount;
712 bcopy(&int_val, arg, val_size);
713 break;
714
715 case IOV_GVAL(IOV_NUMLOCALINTS):
716 int_val = (int32)0;
717 bcopy(&int_val, arg, val_size);
718 break;
719 default:
720 bcmerror = BCME_UNSUPPORTED;
721 break;
722 }
723 exit:
724
725 return bcmerror;
726 }
727
728 #if (defined(OOB_INTR_ONLY) && defined(HW_OOB)) || defined(FORCE_WOWLAN)
729
730 SDIOH_API_RC
sdioh_enable_hw_oob_intr(sdioh_info_t * sd,bool enable)731 sdioh_enable_hw_oob_intr(sdioh_info_t *sd, bool enable)
732 {
733 SDIOH_API_RC status;
734 uint8 data;
735
736 if (enable)
737 #ifdef HW_OOB_LOW_LEVEL
738 data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
739 #else
740 data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE | SDIO_SEPINT_ACT_HI;
741 #endif
742 else
743 data = SDIO_SEPINT_ACT_HI; /* disable hw oob interrupt */
744
745 status = sdioh_request_byte(sd, SDIOH_WRITE, 0, SDIOD_CCCR_BRCM_SEPINT, &data);
746 return status;
747 }
748 #endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
749
750 extern SDIOH_API_RC
sdioh_cfg_read(sdioh_info_t * sd,uint fnc_num,uint32 addr,uint8 * data)751 sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
752 {
753 SDIOH_API_RC status;
754 /* No lock needed since sdioh_request_byte does locking */
755 status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
756 return status;
757 }
758
759 extern SDIOH_API_RC
sdioh_cfg_write(sdioh_info_t * sd,uint fnc_num,uint32 addr,uint8 * data)760 sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
761 {
762 /* No lock needed since sdioh_request_byte does locking */
763 SDIOH_API_RC status;
764 status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
765 return status;
766 }
767
768 static int
sdioh_sdmmc_get_cisaddr(sdioh_info_t * sd,uint32 regaddr)769 sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr)
770 {
771 /* read 24 bits and return valid 17 bit addr */
772 int i;
773 uint32 scratch, regdata;
774 uint8 *ptr = (uint8 *)&scratch;
775 for (i = 0; i < 3; i++) {
776 if ((sdioh_sdmmc_card_regread (sd, 0, regaddr, 1, ®data)) != SUCCESS)
777 sd_err(("%s: Can't read!\n", __FUNCTION__));
778
779 *ptr++ = (uint8) regdata;
780 regaddr++;
781 }
782
783 /* Only the lower 17-bits are valid */
784 scratch = ltoh32(scratch);
785 scratch &= 0x0001FFFF;
786 return (scratch);
787 }
788
789 extern SDIOH_API_RC
sdioh_cis_read(sdioh_info_t * sd,uint func,uint8 * cisd,uint32 length)790 sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
791 {
792 uint32 count;
793 int offset;
794 uint32 foo;
795 uint8 *cis = cisd;
796
797 sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
798
799 if (!sd->func_cis_ptr[func]) {
800 bzero(cis, length);
801 sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func));
802 return SDIOH_API_RC_FAIL;
803 }
804
805 sd_err(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func]));
806
807 for (count = 0; count < length; count++) {
808 offset = sd->func_cis_ptr[func] + count;
809 if (sdioh_sdmmc_card_regread (sd, 0, offset, 1, &foo) < 0) {
810 sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
811 return SDIOH_API_RC_FAIL;
812 }
813
814 *cis = (uint8)(foo & 0xff);
815 cis++;
816 }
817
818 return SDIOH_API_RC_SUCCESS;
819 }
820
821 extern SDIOH_API_RC
sdioh_cisaddr_read(sdioh_info_t * sd,uint func,uint8 * cisd,uint32 offset)822 sdioh_cisaddr_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 offset)
823 {
824 uint32 foo;
825
826 sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
827
828 if (!sd->func_cis_ptr[func]) {
829 sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func));
830 return SDIOH_API_RC_FAIL;
831 }
832
833 sd_err(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func]));
834
835 if (sdioh_sdmmc_card_regread (sd, 0, sd->func_cis_ptr[func]+offset, 1, &foo) < 0) {
836 sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
837 return SDIOH_API_RC_FAIL;
838 }
839
840 *cisd = (uint8)(foo & 0xff);
841
842 return SDIOH_API_RC_SUCCESS;
843 }
844
845 extern SDIOH_API_RC
sdioh_request_byte(sdioh_info_t * sd,uint rw,uint func,uint regaddr,uint8 * byte)846 sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
847 {
848 int err_ret = 0;
849 #if defined(MMC_SDIO_ABORT)
850 int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT;
851 #endif
852 struct timespec64 now, before;
853
854 if (sd_msglevel & SDH_COST_VAL)
855 before = ktime_to_timespec64(ktime_get_boottime());
856
857 sd_info(("%s: rw=%d, func=%d, addr=0x%05x\n", __FUNCTION__, rw, func, regaddr));
858
859 DHD_PM_RESUME_WAIT(sdioh_request_byte_wait);
860 DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
861 if(rw) { /* CMD52 Write */
862 if (func == 0) {
863 /* Can only directly write to some F0 registers. Handle F2 enable
864 * as a special case.
865 */
866 if (regaddr == SDIOD_CCCR_IOEN) {
867 #if defined(BT_OVER_SDIO)
868 do {
869 if (sd->func[3]) {
870 sd_info(("bcmsdh_sdmmc F3: *byte 0x%x\n", *byte));
871
872 if (*byte & SDIO_FUNC_ENABLE_3) {
873 sdio_claim_host(sd->func[3]);
874
875 /* Set Function 3 Block Size */
876 err_ret = sdio_set_block_size(sd->func[3],
877 sd_f3_blocksize);
878 if (err_ret) {
879 sd_err(("F3 blocksize set err%d\n",
880 err_ret));
881 }
882
883 /* Enable Function 3 */
884 sd_info(("bcmsdh_sdmmc F3: enable F3 fn %p\n",
885 sd->func[3]));
886 err_ret = sdio_enable_func(sd->func[3]);
887 if (err_ret) {
888 sd_err(("bcmsdh_sdmmc: enable F3 err:%d\n",
889 err_ret));
890 }
891
892 sdio_release_host(sd->func[3]);
893
894 break;
895 } else if (*byte & SDIO_FUNC_DISABLE_3) {
896 sdio_claim_host(sd->func[3]);
897
898 /* Disable Function 3 */
899 sd_info(("bcmsdh_sdmmc F3: disable F3 fn %p\n",
900 sd->func[3]));
901 err_ret = sdio_disable_func(sd->func[3]);
902 if (err_ret) {
903 sd_err(("bcmsdh_sdmmc: Disable F3 err:%d\n",
904 err_ret));
905 }
906 sdio_release_host(sd->func[3]);
907 sd->func[3] = NULL;
908
909 break;
910 }
911 }
912 #endif /* defined (BT_OVER_SDIO) */
913 if (sd->func[2]) {
914 sdio_claim_host(sd->func[2]);
915 if (*byte & SDIO_FUNC_ENABLE_2) {
916 /* Enable Function 2 */
917 err_ret = sdio_enable_func(sd->func[2]);
918 if (err_ret) {
919 sd_err(("bcmsdh_sdmmc: enable F2 failed:%d\n",
920 err_ret));
921 }
922 } else {
923 /* Disable Function 2 */
924 err_ret = sdio_disable_func(sd->func[2]);
925 if (err_ret) {
926 sd_err(("bcmsdh_sdmmc: Disab F2 failed:%d\n",
927 err_ret));
928 }
929 }
930 sdio_release_host(sd->func[2]);
931 }
932 #if defined(BT_OVER_SDIO)
933 } while (0);
934 #endif /* defined (BT_OVER_SDIO) */
935 }
936 #if defined(MMC_SDIO_ABORT)
937 /* to allow abort command through F1 */
938 else if (regaddr == SDIOD_CCCR_IOABORT) {
939 while (sdio_abort_retry--) {
940 if (sd->func[func]) {
941 sdio_claim_host(sd->func[func]);
942 /*
943 * this sdio_f0_writeb() can be replaced with
944 * another api depending upon MMC driver change.
945 * As of this time, this is temporaray one
946 */
947 sdio_writeb(sd->func[func],
948 *byte, regaddr, &err_ret);
949 sdio_release_host(sd->func[func]);
950 }
951 if (!err_ret)
952 break;
953 }
954 }
955 #endif /* MMC_SDIO_ABORT */
956 else if (regaddr < 0xF0) {
957 sd_err(("bcmsdh_sdmmc: F0 Wr:0x%02x: write disallowed\n", regaddr));
958 } else {
959 /* Claim host controller, perform F0 write, and release */
960 if (sd->func[func]) {
961 sdio_claim_host(sd->func[func]);
962 sdio_f0_writeb(sd->func[func],
963 *byte, regaddr, &err_ret);
964 sdio_release_host(sd->func[func]);
965 }
966 }
967 } else {
968 /* Claim host controller, perform Fn write, and release */
969 if (sd->func[func]) {
970 sdio_claim_host(sd->func[func]);
971 sdio_writeb(sd->func[func], *byte, regaddr, &err_ret);
972 sdio_release_host(sd->func[func]);
973 }
974 }
975 } else { /* CMD52 Read */
976 /* Claim host controller, perform Fn read, and release */
977 if (sd->func[func]) {
978 sdio_claim_host(sd->func[func]);
979 if (func == 0) {
980 *byte = sdio_f0_readb(sd->func[func], regaddr, &err_ret);
981 } else {
982 *byte = sdio_readb(sd->func[func], regaddr, &err_ret);
983 }
984 sdio_release_host(sd->func[func]);
985 }
986 }
987
988 #ifdef CUSTOMER_HW_ALLWINNER
989 //AW judge sdio read write timeout, 1s
990 if (sunxi_mmc_check_r1_ready(sd->func[func]->card->host, 1000) != 0)
991 printk("%s data timeout.\n", __FUNCTION__);
992 #endif
993
994 if (err_ret) {
995 if ((regaddr == 0x1001F) && ((err_ret == -ETIMEDOUT) || (err_ret == -EILSEQ))) {
996 } else {
997 sd_err(("bcmsdh_sdmmc: Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n",
998 rw ? "Write" : "Read", func, regaddr, *byte, err_ret));
999 }
1000 }
1001
1002 if (sd_msglevel & SDH_COST_VAL) {
1003 now = ktime_to_timespec64(ktime_get_boottime());
1004 sd_cost(("%s: rw=%d len=1 cost=%llus %ldus\n", __FUNCTION__,
1005 rw, now.tv_sec-before.tv_sec, now.tv_nsec/1000-before.tv_nsec/1000));
1006 }
1007
1008 return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
1009 }
1010
1011 uint
sdioh_set_mode(sdioh_info_t * sd,uint mode)1012 sdioh_set_mode(sdioh_info_t *sd, uint mode)
1013 {
1014 if (mode == SDPCM_TXGLOM_CPY)
1015 sd->txglom_mode = mode;
1016 else if (mode == SDPCM_TXGLOM_MDESC)
1017 sd->txglom_mode = mode;
1018
1019 return (sd->txglom_mode);
1020 }
1021
1022 extern SDIOH_API_RC
sdioh_request_word(sdioh_info_t * sd,uint cmd_type,uint rw,uint func,uint addr,uint32 * word,uint nbytes)1023 sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
1024 uint32 *word, uint nbytes)
1025 {
1026 int err_ret = SDIOH_API_RC_FAIL;
1027 int err_ret2 = SDIOH_API_RC_SUCCESS; // terence 20130621: prevent dhd_dpc in dead lock
1028 #if defined(MMC_SDIO_ABORT)
1029 int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT;
1030 #endif
1031 struct timespec64 now, before;
1032
1033 if (sd_msglevel & SDH_COST_VAL)
1034 before = ktime_to_timespec64(ktime_get_boottime());
1035
1036 if (func == 0) {
1037 sd_err(("%s: Only CMD52 allowed to F0.\n", __FUNCTION__));
1038 return SDIOH_API_RC_FAIL;
1039 }
1040
1041 sd_info(("%s: cmd_type=%d, rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
1042 __FUNCTION__, cmd_type, rw, func, addr, nbytes));
1043
1044 DHD_PM_RESUME_WAIT(sdioh_request_word_wait);
1045 DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
1046 /* Claim host controller */
1047 sdio_claim_host(sd->func[func]);
1048
1049 if(rw) { /* CMD52 Write */
1050 if (nbytes == 4) {
1051 sdio_writel(sd->func[func], *word, addr, &err_ret);
1052 } else if (nbytes == 2) {
1053 sdio_writew(sd->func[func], (*word & 0xFFFF), addr, &err_ret);
1054 } else {
1055 sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
1056 }
1057 } else { /* CMD52 Read */
1058 if (nbytes == 4) {
1059 *word = sdio_readl(sd->func[func], addr, &err_ret);
1060 } else if (nbytes == 2) {
1061 *word = sdio_readw(sd->func[func], addr, &err_ret) & 0xFFFF;
1062 } else {
1063 sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
1064 }
1065 }
1066
1067 #ifdef CUSTOMER_HW_ALLWINNER
1068 //AW judge sdio read write timeout, 1s
1069 if (sunxi_mmc_check_r1_ready(sd->func[func]->card->host, 1000) != 0)
1070 printk("%s data timeout.\n", __FUNCTION__);
1071 #endif
1072
1073 /* Release host controller */
1074 sdio_release_host(sd->func[func]);
1075
1076 if (err_ret) {
1077 #if defined(MMC_SDIO_ABORT)
1078 /* Any error on CMD53 transaction should abort that function using function 0. */
1079 while (sdio_abort_retry--) {
1080 if (sd->func[0]) {
1081 sdio_claim_host(sd->func[0]);
1082 /*
1083 * this sdio_f0_writeb() can be replaced with another api
1084 * depending upon MMC driver change.
1085 * As of this time, this is temporaray one
1086 */
1087 sdio_writeb(sd->func[0],
1088 func, SDIOD_CCCR_IOABORT, &err_ret2);
1089
1090 #ifdef CUSTOMER_HW_ALLWINNER
1091 //AW judge sdio read write timeout, 1s
1092 if (sunxi_mmc_check_r1_ready(sd->func[func]->card->host, 1000) != 0)
1093 printk("%s data timeout, SDIO_CCCR_IOABORT.\n", __FUNCTION__);
1094 #endif
1095 sdio_release_host(sd->func[0]);
1096 }
1097 if (!err_ret2)
1098 break;
1099 }
1100 if (err_ret)
1101 #endif /* MMC_SDIO_ABORT */
1102 {
1103 sd_err(("bcmsdh_sdmmc: Failed to %s word F%d:@0x%05x=%02x, Err: 0x%08x\n",
1104 rw ? "Write" : "Read", func, addr, *word, err_ret));
1105 }
1106 }
1107
1108 if (sd_msglevel & SDH_COST_VAL) {
1109 now = ktime_to_timespec64(ktime_get_boottime());
1110 sd_cost(("%s: rw=%d, len=%d cost=%llus %ldus\n", __FUNCTION__,
1111 rw, nbytes, now.tv_sec-before.tv_sec, now.tv_nsec/1000 - before.tv_nsec/1000));
1112 }
1113
1114 return (((err_ret == 0)&&(err_ret2 == 0)) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
1115 }
1116
1117 #ifdef BCMSDIOH_TXGLOM
1118 static SDIOH_API_RC
sdioh_request_packet_chain(sdioh_info_t * sd,uint fix_inc,uint write,uint func,uint addr,void * pkt)1119 sdioh_request_packet_chain(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
1120 uint addr, void *pkt)
1121 {
1122 bool fifo = (fix_inc == SDIOH_DATA_FIX);
1123 int err_ret = 0;
1124 void *pnext;
1125 uint ttl_len, pkt_offset;
1126 uint blk_num;
1127 uint blk_size;
1128 uint max_blk_count;
1129 uint max_req_size;
1130 struct mmc_request mmc_req;
1131 struct mmc_command mmc_cmd;
1132 struct mmc_data mmc_dat;
1133 uint32 sg_count;
1134 struct sdio_func *sdio_func = sd->func[func];
1135 struct mmc_host *host = sdio_func->card->host;
1136 uint8 *localbuf = NULL;
1137 uint local_plen = 0;
1138 uint pkt_len = 0;
1139 struct timespec64 now, before;
1140
1141 sd_trace(("%s: Enter\n", __FUNCTION__));
1142 ASSERT(pkt);
1143 DHD_PM_RESUME_WAIT(sdioh_request_packet_wait);
1144 DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
1145
1146 if (sd_msglevel & SDH_COST_VAL)
1147 before = ktime_to_timespec64(ktime_get_boottime());
1148
1149 blk_size = sd->client_block_size[func];
1150 max_blk_count = min(host->max_blk_count, (uint)MAX_IO_RW_EXTENDED_BLK);
1151 max_req_size = min(max_blk_count * blk_size, host->max_req_size);
1152
1153 pkt_offset = 0;
1154 pnext = pkt;
1155
1156 ttl_len = 0;
1157 sg_count = 0;
1158 if(sd->txglom_mode == SDPCM_TXGLOM_MDESC) {
1159 while (pnext != NULL) {
1160 ttl_len = 0;
1161 sg_count = 0;
1162 memset(&mmc_req, 0, sizeof(struct mmc_request));
1163 memset(&mmc_cmd, 0, sizeof(struct mmc_command));
1164 memset(&mmc_dat, 0, sizeof(struct mmc_data));
1165 sg_init_table(sd->sg_list, ARRAYSIZE(sd->sg_list));
1166
1167 /* Set up scatter-gather DMA descriptors. this loop is to find out the max
1168 * data we can transfer with one command 53. blocks per command is limited by
1169 * host max_req_size and 9-bit max block number. when the total length of this
1170 * packet chain is bigger than max_req_size, use multiple SD_IO_RW_EXTENDED
1171 * commands (each transfer is still block aligned)
1172 */
1173 while (pnext != NULL && ttl_len < max_req_size) {
1174 int pkt_len;
1175 int sg_data_size;
1176 uint8 *pdata = (uint8*)PKTDATA(sd->osh, pnext);
1177
1178 ASSERT(pdata != NULL);
1179 pkt_len = PKTLEN(sd->osh, pnext);
1180 sd_trace(("%s[%d] data=%p, len=%d\n", __FUNCTION__, write, pdata, pkt_len));
1181 /* sg_count is unlikely larger than the array size, and this is
1182 * NOT something we can handle here, but in case it happens, PLEASE put
1183 * a restriction on max tx/glom count (based on host->max_segs).
1184 */
1185 if (sg_count >= ARRAYSIZE(sd->sg_list)) {
1186 sd_err(("%s: sg list entries exceed limit %d\n", __FUNCTION__, sg_count));
1187 return (SDIOH_API_RC_FAIL);
1188 }
1189 pdata += pkt_offset;
1190
1191 sg_data_size = pkt_len - pkt_offset;
1192 if (sg_data_size > max_req_size - ttl_len)
1193 sg_data_size = max_req_size - ttl_len;
1194 /* some platforms put a restriction on the data size of each scatter-gather
1195 * DMA descriptor, use multiple sg buffers when xfer_size is bigger than
1196 * max_seg_size
1197 */
1198 if (sg_data_size > host->max_seg_size)
1199 sg_data_size = host->max_seg_size;
1200 sg_set_buf(&sd->sg_list[sg_count++], pdata, sg_data_size);
1201
1202 ttl_len += sg_data_size;
1203 pkt_offset += sg_data_size;
1204 if (pkt_offset == pkt_len) {
1205 pnext = PKTNEXT(sd->osh, pnext);
1206 pkt_offset = 0;
1207 }
1208 }
1209
1210 if (ttl_len % blk_size != 0) {
1211 sd_err(("%s, data length %d not aligned to block size %d\n",
1212 __FUNCTION__, ttl_len, blk_size));
1213 return SDIOH_API_RC_FAIL;
1214 }
1215 blk_num = ttl_len / blk_size;
1216 mmc_dat.sg = sd->sg_list;
1217 mmc_dat.sg_len = sg_count;
1218 mmc_dat.blksz = blk_size;
1219 mmc_dat.blocks = blk_num;
1220 mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
1221 mmc_cmd.opcode = 53; /* SD_IO_RW_EXTENDED */
1222 mmc_cmd.arg = write ? 1<<31 : 0;
1223 mmc_cmd.arg |= (func & 0x7) << 28;
1224 mmc_cmd.arg |= 1<<27;
1225 mmc_cmd.arg |= fifo ? 0 : 1<<26;
1226 mmc_cmd.arg |= (addr & 0x1FFFF) << 9;
1227 mmc_cmd.arg |= blk_num & 0x1FF;
1228 mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
1229 mmc_req.cmd = &mmc_cmd;
1230 mmc_req.data = &mmc_dat;
1231 if (!fifo)
1232 addr += ttl_len;
1233
1234 sdio_claim_host(sdio_func);
1235 mmc_set_data_timeout(&mmc_dat, sdio_func->card);
1236 mmc_wait_for_req(host, &mmc_req);
1237 sdio_release_host(sdio_func);
1238
1239 err_ret = mmc_cmd.error? mmc_cmd.error : mmc_dat.error;
1240 if (0 != err_ret) {
1241 sd_err(("%s:CMD53 %s failed with code %d\n",
1242 __FUNCTION__, write ? "write" : "read", err_ret));
1243 return SDIOH_API_RC_FAIL;
1244 }
1245 }
1246 } else if(sd->txglom_mode == SDPCM_TXGLOM_CPY) {
1247 for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) {
1248 ttl_len += PKTLEN(sd->osh, pnext);
1249 }
1250 /* Claim host controller */
1251 sdio_claim_host(sd->func[func]);
1252 for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) {
1253 uint8 *buf = (uint8*)PKTDATA(sd->osh, pnext);
1254 pkt_len = PKTLEN(sd->osh, pnext);
1255
1256 if (!localbuf) {
1257 localbuf = (uint8 *)MALLOC(sd->osh, ttl_len);
1258 if (localbuf == NULL) {
1259 sd_err(("%s: %s TXGLOM: localbuf malloc FAILED\n",
1260 __FUNCTION__, (write) ? "TX" : "RX"));
1261 goto txglomfail;
1262 }
1263 }
1264
1265 bcopy(buf, (localbuf + local_plen), pkt_len);
1266 local_plen += pkt_len;
1267 if (PKTNEXT(sd->osh, pnext))
1268 continue;
1269
1270 buf = localbuf;
1271 pkt_len = local_plen;
1272 txglomfail:
1273 /* Align Patch */
1274 if (!write || pkt_len < 32)
1275 pkt_len = (pkt_len + 3) & 0xFFFFFFFC;
1276 else if (pkt_len % blk_size)
1277 pkt_len += blk_size - (pkt_len % blk_size);
1278
1279 if ((write) && (!fifo))
1280 err_ret = sdio_memcpy_toio(
1281 sd->func[func],
1282 addr, buf, pkt_len);
1283 else if (write)
1284 err_ret = sdio_memcpy_toio(
1285 sd->func[func],
1286 addr, buf, pkt_len);
1287 else if (fifo)
1288 err_ret = sdio_readsb(
1289 sd->func[func],
1290 buf, addr, pkt_len);
1291 else
1292 err_ret = sdio_memcpy_fromio(
1293 sd->func[func],
1294 buf, addr, pkt_len);
1295
1296 #ifdef CUSTOMER_HW_ALLWINNER
1297 //AW judge sdio read write timeout, 1s
1298 if (sunxi_mmc_check_r1_ready(sd->func[func]->card->host, 1000) != 0)
1299 printk("%s data timeout.\n", __FUNCTION__);
1300 #endif
1301
1302 if (err_ret)
1303 sd_err(("%s: %s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=%d\n",
1304 __FUNCTION__,
1305 (write) ? "TX" : "RX",
1306 pnext, sg_count, addr, pkt_len, err_ret));
1307 else
1308 sd_trace(("%s: %s xfr'd %p[%d], addr=0x%05x, len=%d\n",
1309 __FUNCTION__,
1310 (write) ? "TX" : "RX",
1311 pnext, sg_count, addr, pkt_len));
1312
1313 if (!fifo)
1314 addr += pkt_len;
1315 sg_count ++;
1316 }
1317 sdio_release_host(sd->func[func]);
1318 } else {
1319 sd_err(("%s: set to wrong glom mode %d\n", __FUNCTION__, sd->txglom_mode));
1320 return SDIOH_API_RC_FAIL;
1321 }
1322
1323 if (localbuf)
1324 MFREE(sd->osh, localbuf, ttl_len);
1325
1326 if (sd_msglevel & SDH_COST_VAL) {
1327 now = ktime_to_timespec64(ktime_get_boottime());
1328 sd_cost(("%s: rw=%d, ttl_len=%d, cost=%llus %ldus\n", __FUNCTION__,
1329 write, ttl_len, now.tv_sec-before.tv_sec, now.tv_nsec/1000-before.tv_nsec/1000));
1330 }
1331
1332 sd_trace(("%s: Exit\n", __FUNCTION__));
1333 return SDIOH_API_RC_SUCCESS;
1334 }
1335 #endif /* BCMSDIOH_TXGLOM */
1336
1337 static SDIOH_API_RC
sdioh_buffer_tofrom_bus(sdioh_info_t * sd,uint fix_inc,uint write,uint func,uint addr,uint8 * buf,uint len)1338 sdioh_buffer_tofrom_bus(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
1339 uint addr, uint8 *buf, uint len)
1340 {
1341 bool fifo = (fix_inc == SDIOH_DATA_FIX);
1342 int err_ret = 0;
1343 struct timespec64 now, before;
1344
1345 sd_trace(("%s: Enter\n", __FUNCTION__));
1346 ASSERT(buf);
1347
1348 if (sd_msglevel & SDH_COST_VAL)
1349 before = ktime_to_timespec64(ktime_get_boottime());
1350
1351 /* NOTE:
1352 * For all writes, each packet length is aligned to 32 (or 4)
1353 * bytes in dhdsdio_txpkt_preprocess, and for glom the last packet length
1354 * is aligned to block boundary. If you want to align each packet to
1355 * a custom size, please do it in dhdsdio_txpkt_preprocess, NOT here
1356 *
1357 * For reads, the alignment is doen in sdioh_request_buffer.
1358 *
1359 */
1360 sdio_claim_host(sd->func[func]);
1361
1362 if ((write) && (!fifo))
1363 err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, len);
1364 else if (write)
1365 err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, len);
1366 else if (fifo)
1367 err_ret = sdio_readsb(sd->func[func], buf, addr, len);
1368 else
1369 err_ret = sdio_memcpy_fromio(sd->func[func], buf, addr, len);
1370
1371 #ifdef CUSTOMER_HW_ALLWINNER
1372 //AW judge sdio read write timeout, 1s
1373 if (sunxi_mmc_check_r1_ready(sd->func[func]->card->host, 1000) != 0)
1374 printk("%s data timeout.\n", __FUNCTION__);
1375 #endif
1376
1377 sdio_release_host(sd->func[func]);
1378
1379 if (err_ret)
1380 sd_err(("%s: %s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=%d\n", __FUNCTION__,
1381 (write) ? "TX" : "RX", buf, addr, len, err_ret));
1382 else
1383 sd_trace(("%s: %s xfr'd %p, addr=0x%05x, len=%d\n", __FUNCTION__,
1384 (write) ? "TX" : "RX", buf, addr, len));
1385
1386 sd_trace(("%s: Exit\n", __FUNCTION__));
1387
1388 if (sd_msglevel & SDH_COST_VAL) {
1389 now = ktime_to_timespec64(ktime_get_boottime());
1390 sd_cost(("%s: rw=%d, len=%d cost=%llus %ldus\n", __FUNCTION__,
1391 write, len, now.tv_sec-before.tv_sec, now.tv_nsec/1000 - before.tv_nsec/1000));
1392 }
1393
1394 return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
1395 }
1396
1397
1398 /*
1399 * This function takes a buffer or packet, and fixes everything up so that in the
1400 * end, a DMA-able packet is created.
1401 *
1402 * A buffer does not have an associated packet pointer, and may or may not be aligned.
1403 * A packet may consist of a single packet, or a packet chain. If it is a packet chain,
1404 * then all the packets in the chain must be properly aligned. If the packet data is not
1405 * aligned, then there may only be one packet, and in this case, it is copied to a new
1406 * aligned packet.
1407 *
1408 */
1409 extern SDIOH_API_RC
sdioh_request_buffer(sdioh_info_t * sd,uint pio_dma,uint fix_inc,uint write,uint func,uint addr,uint reg_width,uint buf_len,uint8 * buffer,void * pkt)1410 sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, uint func,
1411 uint addr, uint reg_width, uint buf_len, uint8 *buffer, void *pkt)
1412 {
1413 SDIOH_API_RC status;
1414 void *tmppkt;
1415 struct timespec64 now, before;
1416
1417 sd_trace(("%s: Enter\n", __FUNCTION__));
1418 DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait);
1419 DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
1420
1421 if (sd_msglevel & SDH_COST_VAL)
1422 before = ktime_to_timespec64(ktime_get_boottime());
1423
1424 if (pkt) {
1425 #ifdef BCMSDIOH_TXGLOM
1426 /* packet chain, only used for tx/rx glom, all packets length
1427 * are aligned, total length is a block multiple
1428 */
1429 if (PKTNEXT(sd->osh, pkt))
1430 return sdioh_request_packet_chain(sd, fix_inc, write, func, addr, pkt);
1431 #endif /* BCMSDIOH_TXGLOM */
1432 /* non-glom mode, ignore the buffer parameter and use the packet pointer
1433 * (this shouldn't happen)
1434 */
1435 buffer = PKTDATA(sd->osh, pkt);
1436 buf_len = PKTLEN(sd->osh, pkt);
1437 }
1438
1439 ASSERT(buffer);
1440
1441 /* buffer and length are aligned, use it directly so we can avoid memory copy */
1442 if (((ulong)buffer & DMA_ALIGN_MASK) == 0 && (buf_len & DMA_ALIGN_MASK) == 0)
1443 return sdioh_buffer_tofrom_bus(sd, fix_inc, write, func, addr, buffer, buf_len);
1444
1445 sd_trace(("%s: [%d] doing memory copy buf=%p, len=%d\n",
1446 __FUNCTION__, write, buffer, buf_len));
1447
1448 /* otherwise, a memory copy is needed as the input buffer is not aligned */
1449 tmppkt = PKTGET_STATIC(sd->osh, buf_len + DEFAULT_SDIO_F2_BLKSIZE, write ? TRUE : FALSE);
1450 if (tmppkt == NULL) {
1451 sd_err(("%s: PKTGET failed: len %d\n", __FUNCTION__, buf_len));
1452 return SDIOH_API_RC_FAIL;
1453 }
1454
1455 if (write)
1456 bcopy(buffer, PKTDATA(sd->osh, tmppkt), buf_len);
1457
1458 status = sdioh_buffer_tofrom_bus(sd, fix_inc, write, func, addr,
1459 PKTDATA(sd->osh, tmppkt), ROUNDUP(buf_len, (DMA_ALIGN_MASK+1)));
1460
1461 if (!write)
1462 bcopy(PKTDATA(sd->osh, tmppkt), buffer, buf_len);
1463
1464 PKTFREE_STATIC(sd->osh, tmppkt, write ? TRUE : FALSE);
1465
1466 if (sd_msglevel & SDH_COST_VAL) {
1467 now = ktime_to_timespec64(ktime_get_boottime());
1468 sd_cost(("%s: len=%d cost=%llus %ldus\n", __FUNCTION__,
1469 buf_len, now.tv_sec-before.tv_sec, now.tv_nsec/1000 - before.tv_nsec/1000));
1470 }
1471
1472 return status;
1473 }
1474
1475 /* this function performs "abort" for both of host & device */
1476 extern int
sdioh_abort(sdioh_info_t * sd,uint func)1477 sdioh_abort(sdioh_info_t *sd, uint func)
1478 {
1479 #if defined(MMC_SDIO_ABORT)
1480 char t_func = (char) func;
1481 #endif /* defined(MMC_SDIO_ABORT) */
1482 sd_trace(("%s: Enter\n", __FUNCTION__));
1483
1484 #if defined(MMC_SDIO_ABORT)
1485 /* issue abort cmd52 command through F1 */
1486 sdioh_request_byte(sd, SD_IO_OP_WRITE, SDIO_FUNC_0, SDIOD_CCCR_IOABORT, &t_func);
1487 #endif /* defined(MMC_SDIO_ABORT) */
1488
1489 sd_trace(("%s: Exit\n", __FUNCTION__));
1490 return SDIOH_API_RC_SUCCESS;
1491 }
1492
1493 /* Reset and re-initialize the device */
sdioh_sdio_reset(sdioh_info_t * si)1494 int sdioh_sdio_reset(sdioh_info_t *si)
1495 {
1496 sd_trace(("%s: Enter\n", __FUNCTION__));
1497 sd_trace(("%s: Exit\n", __FUNCTION__));
1498 return SDIOH_API_RC_SUCCESS;
1499 }
1500
1501 /* Disable device interrupt */
1502 void
sdioh_sdmmc_devintr_off(sdioh_info_t * sd)1503 sdioh_sdmmc_devintr_off(sdioh_info_t *sd)
1504 {
1505 sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
1506 sd->intmask &= ~CLIENT_INTR;
1507 }
1508
1509 /* Enable device interrupt */
1510 void
sdioh_sdmmc_devintr_on(sdioh_info_t * sd)1511 sdioh_sdmmc_devintr_on(sdioh_info_t *sd)
1512 {
1513 sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
1514 sd->intmask |= CLIENT_INTR;
1515 }
1516
1517 /* Read client card reg */
1518 int
sdioh_sdmmc_card_regread(sdioh_info_t * sd,int func,uint32 regaddr,int regsize,uint32 * data)1519 sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
1520 {
1521 if ((func == 0) || (regsize == 1)) {
1522 uint8 temp = 0;
1523
1524 sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
1525 *data = temp;
1526 *data &= 0xff;
1527 sd_data(("%s: byte read data=0x%02x\n",
1528 __FUNCTION__, *data));
1529 } else {
1530 if (sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, data, regsize)) {
1531 return BCME_SDIO_ERROR;
1532 }
1533
1534 if (regsize == 2)
1535 *data &= 0xffff;
1536
1537 sd_data(("%s: word read data=0x%08x\n",
1538 __FUNCTION__, *data));
1539 }
1540
1541 return SUCCESS;
1542 }
1543
1544 #if !defined(OOB_INTR_ONLY)
1545 /* bcmsdh_sdmmc interrupt handler */
IRQHandler(struct sdio_func * func)1546 static void IRQHandler(struct sdio_func *func)
1547 {
1548 sdioh_info_t *sd;
1549
1550 sd = sdio_get_drvdata(func);
1551
1552 ASSERT(sd != NULL);
1553 sdio_release_host(sd->func[0]);
1554
1555 if (sd->use_client_ints) {
1556 sd->intrcount++;
1557 ASSERT(sd->intr_handler);
1558 ASSERT(sd->intr_handler_arg);
1559 (sd->intr_handler)(sd->intr_handler_arg);
1560 } else {
1561 sd_err(("bcmsdh_sdmmc: ***IRQHandler\n"));
1562
1563 sd_err(("%s: Not ready for intr: enabled %d, handler %p\n",
1564 __FUNCTION__, sd->client_intr_enabled, sd->intr_handler));
1565 }
1566
1567 sdio_claim_host(sd->func[0]);
1568 }
1569
1570 /* bcmsdh_sdmmc interrupt handler for F2 (dummy handler) */
IRQHandlerF2(struct sdio_func * func)1571 static void IRQHandlerF2(struct sdio_func *func)
1572 {
1573 sd_trace(("bcmsdh_sdmmc: ***IRQHandlerF2\n"));
1574 }
1575 #endif /* !defined(OOB_INTR_ONLY) */
1576
1577 #ifdef NOTUSED
1578 /* Write client card reg */
1579 static int
sdioh_sdmmc_card_regwrite(sdioh_info_t * sd,int func,uint32 regaddr,int regsize,uint32 data)1580 sdioh_sdmmc_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
1581 {
1582 if ((func == 0) || (regsize == 1)) {
1583 uint8 temp;
1584
1585 temp = data & 0xff;
1586 sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
1587 sd_data(("%s: byte write data=0x%02x\n",
1588 __FUNCTION__, data));
1589 } else {
1590 if (regsize == 2)
1591 data &= 0xffff;
1592
1593 sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, &data, regsize);
1594
1595 sd_data(("%s: word write data=0x%08x\n",
1596 __FUNCTION__, data));
1597 }
1598
1599 return SUCCESS;
1600 }
1601 #endif /* NOTUSED */
1602
1603 int
sdioh_start(sdioh_info_t * sd,int stage)1604 sdioh_start(sdioh_info_t *sd, int stage)
1605 {
1606 int ret;
1607
1608 if (!sd) {
1609 sd_err(("%s Failed, sd is NULL\n", __FUNCTION__));
1610 return (0);
1611 }
1612
1613 /* Need to do this stages as we can't enable the interrupt till
1614 downloading of the firmware is complete, other wise polling
1615 sdio access will come in way
1616 */
1617 if (sd->func[0]) {
1618 if (stage == 0) {
1619 /* Since the power to the chip is killed, we will have
1620 re enumerate the device again. Set the block size
1621 and enable the fucntion 1 for in preparation for
1622 downloading the code
1623 */
1624 /* sdio_reset_comm() - has been fixed in latest kernel/msm.git for Linux
1625 2.6.27. The implementation prior to that is buggy, and needs broadcom's
1626 patch for it
1627 */
1628 if ((ret = sdio_reset_comm(sd->func[0]->card))) {
1629 sd_err(("%s Failed, error = %d\n", __FUNCTION__, ret));
1630 return ret;
1631 }
1632 else {
1633 sd->num_funcs = 2;
1634 sd->sd_blockmode = TRUE;
1635 sd->use_client_ints = TRUE;
1636 sd->client_block_size[0] = 64;
1637
1638 if (sd->func[1]) {
1639 /* Claim host controller */
1640 sdio_claim_host(sd->func[1]);
1641
1642 sd->client_block_size[1] = 64;
1643 ret = sdio_set_block_size(sd->func[1], 64);
1644 if (ret) {
1645 sd_err(("bcmsdh_sdmmc: Failed to set F1 "
1646 "blocksize(%d)\n", ret));
1647 }
1648
1649 /* Release host controller F1 */
1650 sdio_release_host(sd->func[1]);
1651 }
1652
1653 if (sd->func[2]) {
1654 /* Claim host controller F2 */
1655 sdio_claim_host(sd->func[2]);
1656
1657 sd->client_block_size[2] = sd_f2_blocksize;
1658 printf("%s: set sd_f2_blocksize %d\n", __FUNCTION__, sd_f2_blocksize);
1659 ret = sdio_set_block_size(sd->func[2], sd_f2_blocksize);
1660 if (ret) {
1661 sd_err(("bcmsdh_sdmmc: Failed to set F2 "
1662 "blocksize to %d(%d)\n", sd_f2_blocksize, ret));
1663 }
1664
1665 /* Release host controller F2 */
1666 sdio_release_host(sd->func[2]);
1667 }
1668
1669 sdioh_sdmmc_card_enablefuncs(sd);
1670 }
1671 } else {
1672 #if !defined(OOB_INTR_ONLY)
1673 sdio_claim_host(sd->func[0]);
1674 if (sd->func[2])
1675 sdio_claim_irq(sd->func[2], IRQHandlerF2);
1676 if (sd->func[1])
1677 sdio_claim_irq(sd->func[1], IRQHandler);
1678 sdio_release_host(sd->func[0]);
1679 #else /* defined(OOB_INTR_ONLY) */
1680 #if defined(HW_OOB)
1681 sdioh_enable_func_intr(sd);
1682 #endif
1683 bcmsdh_oob_intr_set(sd->bcmsdh, TRUE);
1684 #endif /* !defined(OOB_INTR_ONLY) */
1685 }
1686 }
1687 else
1688 sd_err(("%s Failed\n", __FUNCTION__));
1689
1690 return (0);
1691 }
1692
1693 int
sdioh_stop(sdioh_info_t * sd)1694 sdioh_stop(sdioh_info_t *sd)
1695 {
1696 /* MSM7201A Android sdio stack has bug with interrupt
1697 So internaly within SDIO stack they are polling
1698 which cause issue when device is turned off. So
1699 unregister interrupt with SDIO stack to stop the
1700 polling
1701 */
1702 if (sd->func[0]) {
1703 #if !defined(OOB_INTR_ONLY)
1704 sdio_claim_host(sd->func[0]);
1705 if (sd->func[1])
1706 sdio_release_irq(sd->func[1]);
1707 if (sd->func[2])
1708 sdio_release_irq(sd->func[2]);
1709 sdio_release_host(sd->func[0]);
1710 #else /* defined(OOB_INTR_ONLY) */
1711 #if defined(HW_OOB)
1712 sdioh_disable_func_intr(sd);
1713 #endif
1714 bcmsdh_oob_intr_set(sd->bcmsdh, FALSE);
1715 #endif /* !defined(OOB_INTR_ONLY) */
1716 }
1717 else
1718 sd_err(("%s Failed\n", __FUNCTION__));
1719 return (0);
1720 }
1721
1722 int
sdioh_waitlockfree(sdioh_info_t * sd)1723 sdioh_waitlockfree(sdioh_info_t *sd)
1724 {
1725 return (1);
1726 }
1727
1728
1729 SDIOH_API_RC
sdioh_gpioouten(sdioh_info_t * sd,uint32 gpio)1730 sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio)
1731 {
1732 return SDIOH_API_RC_FAIL;
1733 }
1734
1735 SDIOH_API_RC
sdioh_gpioout(sdioh_info_t * sd,uint32 gpio,bool enab)1736 sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab)
1737 {
1738 return SDIOH_API_RC_FAIL;
1739 }
1740
1741 bool
sdioh_gpioin(sdioh_info_t * sd,uint32 gpio)1742 sdioh_gpioin(sdioh_info_t *sd, uint32 gpio)
1743 {
1744 return FALSE;
1745 }
1746
1747 SDIOH_API_RC
sdioh_gpio_init(sdioh_info_t * sd)1748 sdioh_gpio_init(sdioh_info_t *sd)
1749 {
1750 return SDIOH_API_RC_FAIL;
1751 }
1752
1753 uint
sdmmc_get_clock_rate(sdioh_info_t * sd)1754 sdmmc_get_clock_rate(sdioh_info_t *sd)
1755 {
1756 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0))
1757 return 0;
1758 #else
1759 struct sdio_func *sdio_func = sd->func[0];
1760 struct mmc_host *host = sdio_func->card->host;
1761 return mmc_host_clk_rate(host);
1762 #endif
1763 }
1764
1765
1766 void
sdmmc_set_clock_rate(sdioh_info_t * sd,uint hz)1767 sdmmc_set_clock_rate(sdioh_info_t *sd, uint hz)
1768 {
1769 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0))
1770 return;
1771 #else
1772 struct sdio_func *sdio_func = sd->func[0];
1773 struct mmc_host *host = sdio_func->card->host;
1774 struct mmc_ios *ios = &host->ios;
1775
1776 mmc_host_clk_hold(host);
1777 DHD_INFO(("%s: Before change: sd clock rate is %u\n", __FUNCTION__, ios->clock));
1778 if (hz < host->f_min) {
1779 DHD_ERROR(("%s: Intended rate is below min rate, setting to min\n", __FUNCTION__));
1780 hz = host->f_min;
1781 }
1782
1783 if (hz > host->f_max) {
1784 DHD_ERROR(("%s: Intended rate exceeds max rate, setting to max\n", __FUNCTION__));
1785 hz = host->f_max;
1786 }
1787 ios->clock = hz;
1788 host->ops->set_ios(host, ios);
1789 DHD_ERROR(("%s: After change: sd clock rate is %u\n", __FUNCTION__, ios->clock));
1790 mmc_host_clk_release(host);
1791 #endif
1792 }
1793
1794 void
sdmmc_set_clock_divisor(sdioh_info_t * sd,uint sd_div)1795 sdmmc_set_clock_divisor(sdioh_info_t *sd, uint sd_div)
1796 {
1797 uint hz;
1798 uint old_div = sdmmc_get_clock_rate(sd);
1799 if (old_div == sd_div) {
1800 return;
1801 }
1802
1803 hz = sd->sd_clk_rate / sd_div;
1804 sdmmc_set_clock_rate(sd, hz);
1805 }
1806