1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * linux/drivers/mmc/core/mmc_ops.h
4 *
5 * Copyright 2006-2007 Pierre Ossman
6 */
7
8 #include <linux/slab.h>
9 #include <linux/export.h>
10 #include <linux/types.h>
11 #include <linux/scatterlist.h>
12
13 #include <linux/mmc/host.h>
14 #include <linux/mmc/card.h>
15 #include <linux/mmc/mmc.h>
16
17 #include "core.h"
18 #include "card.h"
19 #include "host.h"
20 #include "mmc_ops.h"
21
22 #define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */
23 #define MMC_SANITIZE_TIMEOUT_MS (240 * 1000) /* 240s */
24 #define MMC_OP_COND_PERIOD_US (4 * 1000) /* 4ms */
25 #define MMC_OP_COND_TIMEOUT_MS 1000 /* 1s */
26
27 static const u8 tuning_blk_pattern_4bit[] = {
28 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
29 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
30 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
31 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
32 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
33 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
34 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
35 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
36 };
37
38 static const u8 tuning_blk_pattern_8bit[] = {
39 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
40 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
41 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
42 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
43 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
44 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
45 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
46 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
47 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
48 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
49 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
50 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
51 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
52 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
53 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
54 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
55 };
56
57 struct mmc_busy_data {
58 struct mmc_card *card;
59 bool retry_crc_err;
60 enum mmc_busy_cmd busy_cmd;
61 };
62
63 struct mmc_op_cond_busy_data {
64 struct mmc_host *host;
65 u32 ocr;
66 struct mmc_command *cmd;
67 };
68
__mmc_send_status(struct mmc_card * card,u32 * status,unsigned int retries)69 int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
70 {
71 int err;
72 struct mmc_command cmd = {};
73
74 cmd.opcode = MMC_SEND_STATUS;
75 if (!mmc_host_is_spi(card->host))
76 cmd.arg = card->rca << 16;
77 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
78
79 err = mmc_wait_for_cmd(card->host, &cmd, retries);
80 if (err)
81 return err;
82
83 /* NOTE: callers are required to understand the difference
84 * between "native" and SPI format status words!
85 */
86 if (status)
87 *status = cmd.resp[0];
88
89 return 0;
90 }
91 EXPORT_SYMBOL_GPL(__mmc_send_status);
92
mmc_send_status(struct mmc_card * card,u32 * status)93 int mmc_send_status(struct mmc_card *card, u32 *status)
94 {
95 return __mmc_send_status(card, status, MMC_CMD_RETRIES);
96 }
97 EXPORT_SYMBOL_GPL(mmc_send_status);
98
_mmc_select_card(struct mmc_host * host,struct mmc_card * card)99 static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
100 {
101 struct mmc_command cmd = {};
102
103 cmd.opcode = MMC_SELECT_CARD;
104
105 if (card) {
106 cmd.arg = card->rca << 16;
107 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
108 } else {
109 cmd.arg = 0;
110 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
111 }
112
113 return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
114 }
115
mmc_select_card(struct mmc_card * card)116 int mmc_select_card(struct mmc_card *card)
117 {
118
119 return _mmc_select_card(card->host, card);
120 }
121 EXPORT_SYMBOL_GPL(mmc_select_card);
122
mmc_deselect_cards(struct mmc_host * host)123 int mmc_deselect_cards(struct mmc_host *host)
124 {
125 return _mmc_select_card(host, NULL);
126 }
127
128 /*
129 * Write the value specified in the device tree or board code into the optional
130 * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
131 * drive strength of the DAT and CMD outputs. The actual meaning of a given
132 * value is hardware dependant.
133 * The presence of the DSR register can be determined from the CSD register,
134 * bit 76.
135 */
mmc_set_dsr(struct mmc_host * host)136 int mmc_set_dsr(struct mmc_host *host)
137 {
138 struct mmc_command cmd = {};
139
140 cmd.opcode = MMC_SET_DSR;
141
142 cmd.arg = (host->dsr << 16) | 0xffff;
143 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
144
145 return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
146 }
147
mmc_go_idle(struct mmc_host * host)148 int mmc_go_idle(struct mmc_host *host)
149 {
150 int err;
151 struct mmc_command cmd = {};
152
153 /*
154 * Non-SPI hosts need to prevent chipselect going active during
155 * GO_IDLE; that would put chips into SPI mode. Remind them of
156 * that in case of hardware that won't pull up DAT3/nCS otherwise.
157 *
158 * SPI hosts ignore ios.chip_select; it's managed according to
159 * rules that must accommodate non-MMC slaves which this layer
160 * won't even know about.
161 */
162 if (!mmc_host_is_spi(host)) {
163 mmc_set_chip_select(host, MMC_CS_HIGH);
164 mmc_delay(1);
165 }
166
167 cmd.opcode = MMC_GO_IDLE_STATE;
168 cmd.arg = 0;
169 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
170
171 err = mmc_wait_for_cmd(host, &cmd, 0);
172
173 mmc_delay(1);
174
175 if (!mmc_host_is_spi(host)) {
176 mmc_set_chip_select(host, MMC_CS_DONTCARE);
177 mmc_delay(1);
178 }
179
180 host->use_spi_crc = 0;
181
182 return err;
183 }
184
__mmc_send_op_cond_cb(void * cb_data,bool * busy)185 static int __mmc_send_op_cond_cb(void *cb_data, bool *busy)
186 {
187 struct mmc_op_cond_busy_data *data = cb_data;
188 struct mmc_host *host = data->host;
189 struct mmc_command *cmd = data->cmd;
190 u32 ocr = data->ocr;
191 int err = 0;
192
193 err = mmc_wait_for_cmd(host, cmd, 0);
194 if (err)
195 return err;
196
197 if (mmc_host_is_spi(host)) {
198 if (!(cmd->resp[0] & R1_SPI_IDLE)) {
199 *busy = false;
200 return 0;
201 }
202 } else {
203 if (cmd->resp[0] & MMC_CARD_BUSY) {
204 *busy = false;
205 return 0;
206 }
207 }
208
209 *busy = true;
210
211 /*
212 * According to eMMC specification v5.1 section 6.4.3, we
213 * should issue CMD1 repeatedly in the idle state until
214 * the eMMC is ready. Otherwise some eMMC devices seem to enter
215 * the inactive mode after mmc_init_card() issued CMD0 when
216 * the eMMC device is busy.
217 */
218 if (!ocr && !mmc_host_is_spi(host))
219 cmd->arg = cmd->resp[0] | BIT(30);
220
221 return 0;
222 }
223
mmc_send_op_cond(struct mmc_host * host,u32 ocr,u32 * rocr)224 int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
225 {
226 struct mmc_command cmd = {};
227 int err = 0;
228 struct mmc_op_cond_busy_data cb_data = {
229 .host = host,
230 .ocr = ocr,
231 .cmd = &cmd
232 };
233
234 cmd.opcode = MMC_SEND_OP_COND;
235 cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
236 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
237
238 err = __mmc_poll_for_busy(host, MMC_OP_COND_PERIOD_US,
239 MMC_OP_COND_TIMEOUT_MS,
240 &__mmc_send_op_cond_cb, &cb_data);
241 if (err)
242 return err;
243
244 if (rocr && !mmc_host_is_spi(host))
245 *rocr = cmd.resp[0];
246
247 return err;
248 }
249
mmc_set_relative_addr(struct mmc_card * card)250 int mmc_set_relative_addr(struct mmc_card *card)
251 {
252 struct mmc_command cmd = {};
253
254 cmd.opcode = MMC_SET_RELATIVE_ADDR;
255 cmd.arg = card->rca << 16;
256 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
257
258 return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
259 }
260
261 static int
mmc_send_cxd_native(struct mmc_host * host,u32 arg,u32 * cxd,int opcode)262 mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
263 {
264 int err;
265 struct mmc_command cmd = {};
266
267 cmd.opcode = opcode;
268 cmd.arg = arg;
269 cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
270
271 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
272 if (err)
273 return err;
274
275 memcpy(cxd, cmd.resp, sizeof(u32) * 4);
276
277 return 0;
278 }
279
280 /*
281 * NOTE: void *buf, caller for the buf is required to use DMA-capable
282 * buffer or on-stack buffer (with some overhead in callee).
283 */
mmc_send_adtc_data(struct mmc_card * card,struct mmc_host * host,u32 opcode,u32 args,void * buf,unsigned len)284 int mmc_send_adtc_data(struct mmc_card *card, struct mmc_host *host, u32 opcode,
285 u32 args, void *buf, unsigned len)
286 {
287 struct mmc_request mrq = {};
288 struct mmc_command cmd = {};
289 struct mmc_data data = {};
290 struct scatterlist sg;
291
292 mrq.cmd = &cmd;
293 mrq.data = &data;
294
295 cmd.opcode = opcode;
296 cmd.arg = args;
297
298 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
299 * rely on callers to never use this with "native" calls for reading
300 * CSD or CID. Native versions of those commands use the R2 type,
301 * not R1 plus a data block.
302 */
303 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
304
305 data.blksz = len;
306 data.blocks = 1;
307 data.flags = MMC_DATA_READ;
308 data.sg = &sg;
309 data.sg_len = 1;
310
311 sg_init_one(&sg, buf, len);
312
313 if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
314 /*
315 * The spec states that CSR and CID accesses have a timeout
316 * of 64 clock cycles.
317 */
318 data.timeout_ns = 0;
319 data.timeout_clks = 64;
320 } else
321 mmc_set_data_timeout(&data, card);
322
323 mmc_wait_for_req(host, &mrq);
324
325 if (cmd.error)
326 return cmd.error;
327 if (data.error)
328 return data.error;
329
330 return 0;
331 }
332
mmc_spi_send_cxd(struct mmc_host * host,u32 * cxd,u32 opcode)333 static int mmc_spi_send_cxd(struct mmc_host *host, u32 *cxd, u32 opcode)
334 {
335 int ret, i;
336 __be32 *cxd_tmp;
337
338 cxd_tmp = kzalloc(16, GFP_KERNEL);
339 if (!cxd_tmp)
340 return -ENOMEM;
341
342 ret = mmc_send_adtc_data(NULL, host, opcode, 0, cxd_tmp, 16);
343 if (ret)
344 goto err;
345
346 for (i = 0; i < 4; i++)
347 cxd[i] = be32_to_cpu(cxd_tmp[i]);
348
349 err:
350 kfree(cxd_tmp);
351 return ret;
352 }
353
mmc_send_csd(struct mmc_card * card,u32 * csd)354 int mmc_send_csd(struct mmc_card *card, u32 *csd)
355 {
356 if (mmc_host_is_spi(card->host))
357 return mmc_spi_send_cxd(card->host, csd, MMC_SEND_CSD);
358
359 return mmc_send_cxd_native(card->host, card->rca << 16, csd,
360 MMC_SEND_CSD);
361 }
362
mmc_send_cid(struct mmc_host * host,u32 * cid)363 int mmc_send_cid(struct mmc_host *host, u32 *cid)
364 {
365 if (mmc_host_is_spi(host))
366 return mmc_spi_send_cxd(host, cid, MMC_SEND_CID);
367
368 return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
369 }
370
mmc_get_ext_csd(struct mmc_card * card,u8 ** new_ext_csd)371 int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
372 {
373 int err;
374 u8 *ext_csd;
375
376 if (!card || !new_ext_csd)
377 return -EINVAL;
378
379 if (!mmc_can_ext_csd(card))
380 return -EOPNOTSUPP;
381
382 /*
383 * As the ext_csd is so large and mostly unused, we don't store the
384 * raw block in mmc_card.
385 */
386 ext_csd = kzalloc(512, GFP_KERNEL);
387 if (!ext_csd)
388 return -ENOMEM;
389
390 err = mmc_send_adtc_data(card, card->host, MMC_SEND_EXT_CSD, 0, ext_csd,
391 512);
392 if (err)
393 kfree(ext_csd);
394 else
395 *new_ext_csd = ext_csd;
396
397 return err;
398 }
399 EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
400
mmc_spi_read_ocr(struct mmc_host * host,int highcap,u32 * ocrp)401 int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
402 {
403 struct mmc_command cmd = {};
404 int err;
405
406 cmd.opcode = MMC_SPI_READ_OCR;
407 cmd.arg = highcap ? (1 << 30) : 0;
408 cmd.flags = MMC_RSP_SPI_R3;
409
410 err = mmc_wait_for_cmd(host, &cmd, 0);
411
412 *ocrp = cmd.resp[1];
413 return err;
414 }
415
mmc_spi_set_crc(struct mmc_host * host,int use_crc)416 int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
417 {
418 struct mmc_command cmd = {};
419 int err;
420
421 cmd.opcode = MMC_SPI_CRC_ON_OFF;
422 cmd.flags = MMC_RSP_SPI_R1;
423 cmd.arg = use_crc;
424
425 err = mmc_wait_for_cmd(host, &cmd, 0);
426 if (!err)
427 host->use_spi_crc = use_crc;
428 return err;
429 }
430
mmc_switch_status_error(struct mmc_host * host,u32 status)431 static int mmc_switch_status_error(struct mmc_host *host, u32 status)
432 {
433 if (mmc_host_is_spi(host)) {
434 if (status & R1_SPI_ILLEGAL_COMMAND)
435 return -EBADMSG;
436 } else {
437 if (R1_STATUS(status))
438 pr_warn("%s: unexpected status %#x after switch\n",
439 mmc_hostname(host), status);
440 if (status & R1_SWITCH_ERROR)
441 return -EBADMSG;
442 }
443 return 0;
444 }
445
446 /* Caller must hold re-tuning */
mmc_switch_status(struct mmc_card * card,bool crc_err_fatal)447 int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
448 {
449 u32 status;
450 int err;
451
452 err = mmc_send_status(card, &status);
453 if (!crc_err_fatal && err == -EILSEQ)
454 return 0;
455 if (err)
456 return err;
457
458 return mmc_switch_status_error(card->host, status);
459 }
460
mmc_busy_cb(void * cb_data,bool * busy)461 static int mmc_busy_cb(void *cb_data, bool *busy)
462 {
463 struct mmc_busy_data *data = cb_data;
464 struct mmc_host *host = data->card->host;
465 u32 status = 0;
466 int err;
467
468 if (data->busy_cmd != MMC_BUSY_IO && host->ops->card_busy) {
469 *busy = host->ops->card_busy(host);
470 return 0;
471 }
472
473 err = mmc_send_status(data->card, &status);
474 if (data->retry_crc_err && err == -EILSEQ) {
475 *busy = true;
476 return 0;
477 }
478 if (err)
479 return err;
480
481 switch (data->busy_cmd) {
482 case MMC_BUSY_CMD6:
483 err = mmc_switch_status_error(host, status);
484 break;
485 case MMC_BUSY_ERASE:
486 err = R1_STATUS(status) ? -EIO : 0;
487 break;
488 case MMC_BUSY_HPI:
489 case MMC_BUSY_EXTR_SINGLE:
490 case MMC_BUSY_IO:
491 break;
492 default:
493 err = -EINVAL;
494 }
495
496 if (err)
497 return err;
498
499 *busy = !mmc_ready_for_data(status);
500 return 0;
501 }
502
__mmc_poll_for_busy(struct mmc_host * host,unsigned int period_us,unsigned int timeout_ms,int (* busy_cb)(void * cb_data,bool * busy),void * cb_data)503 int __mmc_poll_for_busy(struct mmc_host *host, unsigned int period_us,
504 unsigned int timeout_ms,
505 int (*busy_cb)(void *cb_data, bool *busy),
506 void *cb_data)
507 {
508 int err;
509 unsigned long timeout;
510 unsigned int udelay = period_us ? period_us : 32, udelay_max = 32768;
511 bool expired = false;
512 bool busy = false;
513
514 timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
515 do {
516 /*
517 * Due to the possibility of being preempted while polling,
518 * check the expiration time first.
519 */
520 expired = time_after(jiffies, timeout);
521
522 err = (*busy_cb)(cb_data, &busy);
523 if (err)
524 return err;
525
526 /* Timeout if the device still remains busy. */
527 if (expired && busy) {
528 pr_err("%s: Card stuck being busy! %s\n",
529 mmc_hostname(host), __func__);
530 return -ETIMEDOUT;
531 }
532
533 /* Throttle the polling rate to avoid hogging the CPU. */
534 if (busy) {
535 usleep_range(udelay, udelay * 2);
536 if (udelay < udelay_max)
537 udelay *= 2;
538 }
539 } while (busy);
540
541 return 0;
542 }
543 EXPORT_SYMBOL_GPL(__mmc_poll_for_busy);
544
mmc_poll_for_busy(struct mmc_card * card,unsigned int timeout_ms,bool retry_crc_err,enum mmc_busy_cmd busy_cmd)545 int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
546 bool retry_crc_err, enum mmc_busy_cmd busy_cmd)
547 {
548 struct mmc_host *host = card->host;
549 struct mmc_busy_data cb_data;
550
551 cb_data.card = card;
552 cb_data.retry_crc_err = retry_crc_err;
553 cb_data.busy_cmd = busy_cmd;
554
555 return __mmc_poll_for_busy(host, 0, timeout_ms, &mmc_busy_cb, &cb_data);
556 }
557 EXPORT_SYMBOL_GPL(mmc_poll_for_busy);
558
mmc_prepare_busy_cmd(struct mmc_host * host,struct mmc_command * cmd,unsigned int timeout_ms)559 bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
560 unsigned int timeout_ms)
561 {
562 /*
563 * If the max_busy_timeout of the host is specified, make sure it's
564 * enough to fit the used timeout_ms. In case it's not, let's instruct
565 * the host to avoid HW busy detection, by converting to a R1 response
566 * instead of a R1B. Note, some hosts requires R1B, which also means
567 * they are on their own when it comes to deal with the busy timeout.
568 */
569 if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
570 (timeout_ms > host->max_busy_timeout)) {
571 cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1 | MMC_RSP_R1;
572 return false;
573 }
574
575 cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1B | MMC_RSP_R1B;
576 cmd->busy_timeout = timeout_ms;
577 return true;
578 }
579 EXPORT_SYMBOL_GPL(mmc_prepare_busy_cmd);
580
581 /**
582 * __mmc_switch - modify EXT_CSD register
583 * @card: the MMC card associated with the data transfer
584 * @set: cmd set values
585 * @index: EXT_CSD register index
586 * @value: value to program into EXT_CSD register
587 * @timeout_ms: timeout (ms) for operation performed by register write,
588 * timeout of zero implies maximum possible timeout
589 * @timing: new timing to change to
590 * @send_status: send status cmd to poll for busy
591 * @retry_crc_err: retry when CRC errors when polling with CMD13 for busy
592 * @retries: number of retries
593 *
594 * Modifies the EXT_CSD register for selected card.
595 */
__mmc_switch(struct mmc_card * card,u8 set,u8 index,u8 value,unsigned int timeout_ms,unsigned char timing,bool send_status,bool retry_crc_err,unsigned int retries)596 int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
597 unsigned int timeout_ms, unsigned char timing,
598 bool send_status, bool retry_crc_err, unsigned int retries)
599 {
600 struct mmc_host *host = card->host;
601 int err;
602 struct mmc_command cmd = {};
603 bool use_r1b_resp;
604 unsigned char old_timing = host->ios.timing;
605
606 mmc_retune_hold(host);
607
608 if (!timeout_ms) {
609 pr_warn("%s: unspecified timeout for CMD6 - use generic\n",
610 mmc_hostname(host));
611 timeout_ms = card->ext_csd.generic_cmd6_time;
612 }
613
614 cmd.opcode = MMC_SWITCH;
615 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
616 (index << 16) |
617 (value << 8) |
618 set;
619 use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, timeout_ms);
620
621 err = mmc_wait_for_cmd(host, &cmd, retries);
622 if (err)
623 goto out;
624
625 /*If SPI or used HW busy detection above, then we don't need to poll. */
626 if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
627 mmc_host_is_spi(host))
628 goto out_tim;
629
630 /*
631 * If the host doesn't support HW polling via the ->card_busy() ops and
632 * when it's not allowed to poll by using CMD13, then we need to rely on
633 * waiting the stated timeout to be sufficient.
634 */
635 if (!send_status && !host->ops->card_busy) {
636 mmc_delay(timeout_ms);
637 goto out_tim;
638 }
639
640 /* Let's try to poll to find out when the command is completed. */
641 err = mmc_poll_for_busy(card, timeout_ms, retry_crc_err, MMC_BUSY_CMD6);
642 if (err)
643 goto out;
644
645 out_tim:
646 /* Switch to new timing before check switch status. */
647 if (timing)
648 mmc_set_timing(host, timing);
649
650 if (send_status) {
651 err = mmc_switch_status(card, true);
652 if (err && timing)
653 mmc_set_timing(host, old_timing);
654 }
655 out:
656 mmc_retune_release(host);
657
658 return err;
659 }
660
mmc_switch(struct mmc_card * card,u8 set,u8 index,u8 value,unsigned int timeout_ms)661 int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
662 unsigned int timeout_ms)
663 {
664 return __mmc_switch(card, set, index, value, timeout_ms, 0,
665 true, false, MMC_CMD_RETRIES);
666 }
667 EXPORT_SYMBOL_GPL(mmc_switch);
668
mmc_send_tuning(struct mmc_host * host,u32 opcode,int * cmd_error)669 int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
670 {
671 struct mmc_request mrq = {};
672 struct mmc_command cmd = {};
673 struct mmc_data data = {};
674 struct scatterlist sg;
675 struct mmc_ios *ios = &host->ios;
676 const u8 *tuning_block_pattern;
677 int size, err = 0;
678 u8 *data_buf;
679
680 if (ios->bus_width == MMC_BUS_WIDTH_8) {
681 tuning_block_pattern = tuning_blk_pattern_8bit;
682 size = sizeof(tuning_blk_pattern_8bit);
683 } else if (ios->bus_width == MMC_BUS_WIDTH_4) {
684 tuning_block_pattern = tuning_blk_pattern_4bit;
685 size = sizeof(tuning_blk_pattern_4bit);
686 } else
687 return -EINVAL;
688
689 data_buf = kzalloc(size, GFP_KERNEL);
690 if (!data_buf)
691 return -ENOMEM;
692
693 mrq.cmd = &cmd;
694 mrq.data = &data;
695
696 cmd.opcode = opcode;
697 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
698
699 data.blksz = size;
700 data.blocks = 1;
701 data.flags = MMC_DATA_READ;
702
703 /*
704 * According to the tuning specs, Tuning process
705 * is normally shorter 40 executions of CMD19,
706 * and timeout value should be shorter than 150 ms
707 */
708 data.timeout_ns = 150 * NSEC_PER_MSEC;
709
710 data.sg = &sg;
711 data.sg_len = 1;
712 sg_init_one(&sg, data_buf, size);
713
714 mmc_wait_for_req(host, &mrq);
715
716 if (cmd_error)
717 *cmd_error = cmd.error;
718
719 if (cmd.error) {
720 err = cmd.error;
721 goto out;
722 }
723
724 if (data.error) {
725 err = data.error;
726 goto out;
727 }
728
729 if (memcmp(data_buf, tuning_block_pattern, size))
730 err = -EIO;
731
732 out:
733 kfree(data_buf);
734 return err;
735 }
736 EXPORT_SYMBOL_GPL(mmc_send_tuning);
737
mmc_send_abort_tuning(struct mmc_host * host,u32 opcode)738 int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode)
739 {
740 struct mmc_command cmd = {};
741
742 /*
743 * eMMC specification specifies that CMD12 can be used to stop a tuning
744 * command, but SD specification does not, so do nothing unless it is
745 * eMMC.
746 */
747 if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
748 return 0;
749
750 cmd.opcode = MMC_STOP_TRANSMISSION;
751 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
752
753 /*
754 * For drivers that override R1 to R1b, set an arbitrary timeout based
755 * on the tuning timeout i.e. 150ms.
756 */
757 cmd.busy_timeout = 150;
758
759 return mmc_wait_for_cmd(host, &cmd, 0);
760 }
761 EXPORT_SYMBOL_GPL(mmc_send_abort_tuning);
762
763 static int
mmc_send_bus_test(struct mmc_card * card,struct mmc_host * host,u8 opcode,u8 len)764 mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
765 u8 len)
766 {
767 struct mmc_request mrq = {};
768 struct mmc_command cmd = {};
769 struct mmc_data data = {};
770 struct scatterlist sg;
771 u8 *data_buf;
772 u8 *test_buf;
773 int i, err;
774 static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
775 static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
776
777 /* dma onto stack is unsafe/nonportable, but callers to this
778 * routine normally provide temporary on-stack buffers ...
779 */
780 data_buf = kmalloc(len, GFP_KERNEL);
781 if (!data_buf)
782 return -ENOMEM;
783
784 if (len == 8)
785 test_buf = testdata_8bit;
786 else if (len == 4)
787 test_buf = testdata_4bit;
788 else {
789 pr_err("%s: Invalid bus_width %d\n",
790 mmc_hostname(host), len);
791 kfree(data_buf);
792 return -EINVAL;
793 }
794
795 if (opcode == MMC_BUS_TEST_W)
796 memcpy(data_buf, test_buf, len);
797
798 mrq.cmd = &cmd;
799 mrq.data = &data;
800 cmd.opcode = opcode;
801 cmd.arg = 0;
802
803 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
804 * rely on callers to never use this with "native" calls for reading
805 * CSD or CID. Native versions of those commands use the R2 type,
806 * not R1 plus a data block.
807 */
808 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
809
810 data.blksz = len;
811 data.blocks = 1;
812 if (opcode == MMC_BUS_TEST_R)
813 data.flags = MMC_DATA_READ;
814 else
815 data.flags = MMC_DATA_WRITE;
816
817 data.sg = &sg;
818 data.sg_len = 1;
819 mmc_set_data_timeout(&data, card);
820 sg_init_one(&sg, data_buf, len);
821 mmc_wait_for_req(host, &mrq);
822 err = 0;
823 if (opcode == MMC_BUS_TEST_R) {
824 for (i = 0; i < len / 4; i++)
825 if ((test_buf[i] ^ data_buf[i]) != 0xff) {
826 err = -EIO;
827 break;
828 }
829 }
830 kfree(data_buf);
831
832 if (cmd.error)
833 return cmd.error;
834 if (data.error)
835 return data.error;
836
837 return err;
838 }
839
mmc_bus_test(struct mmc_card * card,u8 bus_width)840 int mmc_bus_test(struct mmc_card *card, u8 bus_width)
841 {
842 int width;
843
844 if (bus_width == MMC_BUS_WIDTH_8)
845 width = 8;
846 else if (bus_width == MMC_BUS_WIDTH_4)
847 width = 4;
848 else if (bus_width == MMC_BUS_WIDTH_1)
849 return 0; /* no need for test */
850 else
851 return -EINVAL;
852
853 /*
854 * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there
855 * is a problem. This improves chances that the test will work.
856 */
857 mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
858 return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
859 }
860
mmc_send_hpi_cmd(struct mmc_card * card)861 static int mmc_send_hpi_cmd(struct mmc_card *card)
862 {
863 unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time;
864 struct mmc_host *host = card->host;
865 bool use_r1b_resp = false;
866 struct mmc_command cmd = {};
867 int err;
868
869 cmd.opcode = card->ext_csd.hpi_cmd;
870 cmd.arg = card->rca << 16 | 1;
871 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
872
873 if (cmd.opcode == MMC_STOP_TRANSMISSION)
874 use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd,
875 busy_timeout_ms);
876
877 err = mmc_wait_for_cmd(host, &cmd, 0);
878 if (err) {
879 pr_warn("%s: HPI error %d. Command response %#x\n",
880 mmc_hostname(host), err, cmd.resp[0]);
881 return err;
882 }
883
884 /* No need to poll when using HW busy detection. */
885 if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp)
886 return 0;
887
888 /* Let's poll to find out when the HPI request completes. */
889 return mmc_poll_for_busy(card, busy_timeout_ms, false, MMC_BUSY_HPI);
890 }
891
892 /**
893 * mmc_interrupt_hpi - Issue for High priority Interrupt
894 * @card: the MMC card associated with the HPI transfer
895 *
896 * Issued High Priority Interrupt, and check for card status
897 * until out-of prg-state.
898 */
mmc_interrupt_hpi(struct mmc_card * card)899 static int mmc_interrupt_hpi(struct mmc_card *card)
900 {
901 int err;
902 u32 status;
903
904 if (!card->ext_csd.hpi_en) {
905 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
906 return 1;
907 }
908
909 err = mmc_send_status(card, &status);
910 if (err) {
911 pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
912 goto out;
913 }
914
915 switch (R1_CURRENT_STATE(status)) {
916 case R1_STATE_IDLE:
917 case R1_STATE_READY:
918 case R1_STATE_STBY:
919 case R1_STATE_TRAN:
920 /*
921 * In idle and transfer states, HPI is not needed and the caller
922 * can issue the next intended command immediately
923 */
924 goto out;
925 case R1_STATE_PRG:
926 break;
927 default:
928 /* In all other states, it's illegal to issue HPI */
929 pr_debug("%s: HPI cannot be sent. Card state=%d\n",
930 mmc_hostname(card->host), R1_CURRENT_STATE(status));
931 err = -EINVAL;
932 goto out;
933 }
934
935 err = mmc_send_hpi_cmd(card);
936 out:
937 return err;
938 }
939
mmc_can_ext_csd(struct mmc_card * card)940 int mmc_can_ext_csd(struct mmc_card *card)
941 {
942 return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
943 }
944
mmc_read_bkops_status(struct mmc_card * card)945 static int mmc_read_bkops_status(struct mmc_card *card)
946 {
947 int err;
948 u8 *ext_csd;
949
950 err = mmc_get_ext_csd(card, &ext_csd);
951 if (err)
952 return err;
953
954 card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
955 card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
956 kfree(ext_csd);
957 return 0;
958 }
959
960 /**
961 * mmc_run_bkops - Run BKOPS for supported cards
962 * @card: MMC card to run BKOPS for
963 *
964 * Run background operations synchronously for cards having manual BKOPS
965 * enabled and in case it reports urgent BKOPS level.
966 */
mmc_run_bkops(struct mmc_card * card)967 void mmc_run_bkops(struct mmc_card *card)
968 {
969 int err;
970
971 if (!card->ext_csd.man_bkops_en)
972 return;
973
974 err = mmc_read_bkops_status(card);
975 if (err) {
976 pr_err("%s: Failed to read bkops status: %d\n",
977 mmc_hostname(card->host), err);
978 return;
979 }
980
981 if (!card->ext_csd.raw_bkops_status ||
982 card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2)
983 return;
984
985 mmc_retune_hold(card->host);
986
987 /*
988 * For urgent BKOPS status, LEVEL_2 and higher, let's execute
989 * synchronously. Future wise, we may consider to start BKOPS, for less
990 * urgent levels by using an asynchronous background task, when idle.
991 */
992 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
993 EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS);
994 /*
995 * If the BKOPS timed out, the card is probably still busy in the
996 * R1_STATE_PRG. Rather than continue to wait, let's try to abort
997 * it with a HPI command to get back into R1_STATE_TRAN.
998 */
999 if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
1000 pr_warn("%s: BKOPS aborted\n", mmc_hostname(card->host));
1001 else if (err)
1002 pr_warn("%s: Error %d running bkops\n",
1003 mmc_hostname(card->host), err);
1004
1005 mmc_retune_release(card->host);
1006 }
1007 EXPORT_SYMBOL(mmc_run_bkops);
1008
mmc_cmdq_switch(struct mmc_card * card,bool enable)1009 static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
1010 {
1011 u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
1012 int err;
1013
1014 if (!card->ext_csd.cmdq_support)
1015 return -EOPNOTSUPP;
1016
1017 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
1018 val, card->ext_csd.generic_cmd6_time);
1019 if (!err)
1020 card->ext_csd.cmdq_en = enable;
1021
1022 return err;
1023 }
1024
mmc_cmdq_enable(struct mmc_card * card)1025 int mmc_cmdq_enable(struct mmc_card *card)
1026 {
1027 return mmc_cmdq_switch(card, true);
1028 }
1029 EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
1030
mmc_cmdq_disable(struct mmc_card * card)1031 int mmc_cmdq_disable(struct mmc_card *card)
1032 {
1033 return mmc_cmdq_switch(card, false);
1034 }
1035 EXPORT_SYMBOL_GPL(mmc_cmdq_disable);
1036
mmc_sanitize(struct mmc_card * card,unsigned int timeout_ms)1037 int mmc_sanitize(struct mmc_card *card, unsigned int timeout_ms)
1038 {
1039 struct mmc_host *host = card->host;
1040 int err;
1041
1042 if (!mmc_can_sanitize(card)) {
1043 pr_warn("%s: Sanitize not supported\n", mmc_hostname(host));
1044 return -EOPNOTSUPP;
1045 }
1046
1047 if (!timeout_ms)
1048 timeout_ms = MMC_SANITIZE_TIMEOUT_MS;
1049
1050 pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host));
1051
1052 mmc_retune_hold(host);
1053
1054 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START,
1055 1, timeout_ms, 0, true, false, 0);
1056 if (err)
1057 pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host), err);
1058
1059 /*
1060 * If the sanitize operation timed out, the card is probably still busy
1061 * in the R1_STATE_PRG. Rather than continue to wait, let's try to abort
1062 * it with a HPI command to get back into R1_STATE_TRAN.
1063 */
1064 if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
1065 pr_warn("%s: Sanitize aborted\n", mmc_hostname(host));
1066
1067 mmc_retune_release(host);
1068
1069 pr_debug("%s: Sanitize completed\n", mmc_hostname(host));
1070 return err;
1071 }
1072 EXPORT_SYMBOL_GPL(mmc_sanitize);
1073