1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * linux/drivers/mmc/core/mmc_ops.h
4 *
5 * Copyright 2006-2007 Pierre Ossman
6 */
7
8 #include <linux/slab.h>
9 #include <linux/export.h>
10 #include <linux/types.h>
11 #include <linux/scatterlist.h>
12
13 #include <linux/mmc/host.h>
14 #include <linux/mmc/card.h>
15 #include <linux/mmc/mmc.h>
16
17 #include "core.h"
18 #include "card.h"
19 #include "host.h"
20 #include "mmc_ops.h"
21
22 #define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */
23 #define MMC_CACHE_FLUSH_TIMEOUT_MS (30 * 1000) /* 30s */
24 #define MMC_SANITIZE_TIMEOUT_MS (240 * 1000) /* 240s */
25
26 static const u8 tuning_blk_pattern_4bit[] = {
27 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
28 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
29 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
30 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
31 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
32 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
33 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
34 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
35 };
36
37 static const u8 tuning_blk_pattern_8bit[] = {
38 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
39 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
40 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
41 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
42 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
43 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
44 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
45 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
46 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
47 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
48 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
49 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
50 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
51 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
52 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
53 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
54 };
55
__mmc_send_status(struct mmc_card * card,u32 * status,unsigned int retries)56 int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
57 {
58 int err;
59 struct mmc_command cmd = {};
60
61 cmd.opcode = MMC_SEND_STATUS;
62 if (!mmc_host_is_spi(card->host))
63 cmd.arg = card->rca << 16;
64 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
65
66 err = mmc_wait_for_cmd(card->host, &cmd, retries);
67 if (err)
68 return err;
69
70 /* NOTE: callers are required to understand the difference
71 * between "native" and SPI format status words!
72 */
73 if (status)
74 *status = cmd.resp[0];
75
76 return 0;
77 }
78 EXPORT_SYMBOL_GPL(__mmc_send_status);
79
mmc_send_status(struct mmc_card * card,u32 * status)80 int mmc_send_status(struct mmc_card *card, u32 *status)
81 {
82 return __mmc_send_status(card, status, MMC_CMD_RETRIES);
83 }
84 EXPORT_SYMBOL_GPL(mmc_send_status);
85
_mmc_select_card(struct mmc_host * host,struct mmc_card * card)86 static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
87 {
88 struct mmc_command cmd = {};
89
90 cmd.opcode = MMC_SELECT_CARD;
91
92 if (card) {
93 cmd.arg = card->rca << 16;
94 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
95 } else {
96 cmd.arg = 0;
97 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
98 }
99
100 return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
101 }
102
mmc_select_card(struct mmc_card * card)103 int mmc_select_card(struct mmc_card *card)
104 {
105
106 return _mmc_select_card(card->host, card);
107 }
108
mmc_deselect_cards(struct mmc_host * host)109 int mmc_deselect_cards(struct mmc_host *host)
110 {
111 return _mmc_select_card(host, NULL);
112 }
113
114 /*
115 * Write the value specified in the device tree or board code into the optional
116 * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
117 * drive strength of the DAT and CMD outputs. The actual meaning of a given
118 * value is hardware dependant.
119 * The presence of the DSR register can be determined from the CSD register,
120 * bit 76.
121 */
mmc_set_dsr(struct mmc_host * host)122 int mmc_set_dsr(struct mmc_host *host)
123 {
124 struct mmc_command cmd = {};
125
126 cmd.opcode = MMC_SET_DSR;
127
128 cmd.arg = (host->dsr << 16) | 0xffff;
129 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
130
131 return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
132 }
133
mmc_go_idle(struct mmc_host * host)134 int mmc_go_idle(struct mmc_host *host)
135 {
136 int err;
137 struct mmc_command cmd = {};
138
139 /*
140 * Non-SPI hosts need to prevent chipselect going active during
141 * GO_IDLE; that would put chips into SPI mode. Remind them of
142 * that in case of hardware that won't pull up DAT3/nCS otherwise.
143 *
144 * SPI hosts ignore ios.chip_select; it's managed according to
145 * rules that must accommodate non-MMC slaves which this layer
146 * won't even know about.
147 */
148 if (!mmc_host_is_spi(host)) {
149 mmc_set_chip_select(host, MMC_CS_HIGH);
150 mmc_delay(1);
151 }
152
153 cmd.opcode = MMC_GO_IDLE_STATE;
154 cmd.arg = 0;
155 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
156
157 err = mmc_wait_for_cmd(host, &cmd, 0);
158
159 mmc_delay(1);
160
161 if (!mmc_host_is_spi(host)) {
162 mmc_set_chip_select(host, MMC_CS_DONTCARE);
163 mmc_delay(1);
164 }
165
166 host->use_spi_crc = 0;
167
168 return err;
169 }
170
mmc_send_op_cond(struct mmc_host * host,u32 ocr,u32 * rocr)171 int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
172 {
173 struct mmc_command cmd = {};
174 int i, err = 0;
175
176 cmd.opcode = MMC_SEND_OP_COND;
177 cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
178 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
179
180 for (i = 100; i; i--) {
181 err = mmc_wait_for_cmd(host, &cmd, 0);
182 if (err)
183 break;
184
185 /* wait until reset completes */
186 if (mmc_host_is_spi(host)) {
187 if (!(cmd.resp[0] & R1_SPI_IDLE))
188 break;
189 } else {
190 if (cmd.resp[0] & MMC_CARD_BUSY)
191 break;
192 }
193
194 err = -ETIMEDOUT;
195
196 mmc_delay(10);
197
198 /*
199 * According to eMMC specification v5.1 section 6.4.3, we
200 * should issue CMD1 repeatedly in the idle state until
201 * the eMMC is ready. Otherwise some eMMC devices seem to enter
202 * the inactive mode after mmc_init_card() issued CMD0 when
203 * the eMMC device is busy.
204 */
205 if (!ocr && !mmc_host_is_spi(host))
206 cmd.arg = cmd.resp[0] | BIT(30);
207 }
208
209 if (rocr && !mmc_host_is_spi(host))
210 *rocr = cmd.resp[0];
211
212 return err;
213 }
214
mmc_set_relative_addr(struct mmc_card * card)215 int mmc_set_relative_addr(struct mmc_card *card)
216 {
217 struct mmc_command cmd = {};
218
219 cmd.opcode = MMC_SET_RELATIVE_ADDR;
220 cmd.arg = card->rca << 16;
221 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
222
223 return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
224 }
225
226 static int
mmc_send_cxd_native(struct mmc_host * host,u32 arg,u32 * cxd,int opcode)227 mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
228 {
229 int err;
230 struct mmc_command cmd = {};
231
232 cmd.opcode = opcode;
233 cmd.arg = arg;
234 cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
235
236 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
237 if (err)
238 return err;
239
240 memcpy(cxd, cmd.resp, sizeof(u32) * 4);
241
242 return 0;
243 }
244
245 /*
246 * NOTE: void *buf, caller for the buf is required to use DMA-capable
247 * buffer or on-stack buffer (with some overhead in callee).
248 */
249 static int
mmc_send_cxd_data(struct mmc_card * card,struct mmc_host * host,u32 opcode,void * buf,unsigned len)250 mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
251 u32 opcode, void *buf, unsigned len)
252 {
253 struct mmc_request mrq = {};
254 struct mmc_command cmd = {};
255 struct mmc_data data = {};
256 struct scatterlist sg;
257
258 mrq.cmd = &cmd;
259 mrq.data = &data;
260
261 cmd.opcode = opcode;
262 cmd.arg = 0;
263
264 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
265 * rely on callers to never use this with "native" calls for reading
266 * CSD or CID. Native versions of those commands use the R2 type,
267 * not R1 plus a data block.
268 */
269 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
270
271 data.blksz = len;
272 data.blocks = 1;
273 data.flags = MMC_DATA_READ;
274 data.sg = &sg;
275 data.sg_len = 1;
276
277 sg_init_one(&sg, buf, len);
278
279 if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
280 /*
281 * The spec states that CSR and CID accesses have a timeout
282 * of 64 clock cycles.
283 */
284 data.timeout_ns = 0;
285 data.timeout_clks = 64;
286 } else
287 mmc_set_data_timeout(&data, card);
288
289 mmc_wait_for_req(host, &mrq);
290
291 if (cmd.error)
292 return cmd.error;
293 if (data.error)
294 return data.error;
295
296 return 0;
297 }
298
mmc_spi_send_csd(struct mmc_card * card,u32 * csd)299 static int mmc_spi_send_csd(struct mmc_card *card, u32 *csd)
300 {
301 int ret, i;
302 __be32 *csd_tmp;
303
304 csd_tmp = kzalloc(16, GFP_KERNEL);
305 if (!csd_tmp)
306 return -ENOMEM;
307
308 ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
309 if (ret)
310 goto err;
311
312 for (i = 0; i < 4; i++)
313 csd[i] = be32_to_cpu(csd_tmp[i]);
314
315 err:
316 kfree(csd_tmp);
317 return ret;
318 }
319
mmc_send_csd(struct mmc_card * card,u32 * csd)320 int mmc_send_csd(struct mmc_card *card, u32 *csd)
321 {
322 if (mmc_host_is_spi(card->host))
323 return mmc_spi_send_csd(card, csd);
324
325 return mmc_send_cxd_native(card->host, card->rca << 16, csd,
326 MMC_SEND_CSD);
327 }
328
mmc_spi_send_cid(struct mmc_host * host,u32 * cid)329 static int mmc_spi_send_cid(struct mmc_host *host, u32 *cid)
330 {
331 int ret, i;
332 __be32 *cid_tmp;
333
334 cid_tmp = kzalloc(16, GFP_KERNEL);
335 if (!cid_tmp)
336 return -ENOMEM;
337
338 ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
339 if (ret)
340 goto err;
341
342 for (i = 0; i < 4; i++)
343 cid[i] = be32_to_cpu(cid_tmp[i]);
344
345 err:
346 kfree(cid_tmp);
347 return ret;
348 }
349
mmc_send_cid(struct mmc_host * host,u32 * cid)350 int mmc_send_cid(struct mmc_host *host, u32 *cid)
351 {
352 if (mmc_host_is_spi(host))
353 return mmc_spi_send_cid(host, cid);
354
355 return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
356 }
357
mmc_get_ext_csd(struct mmc_card * card,u8 ** new_ext_csd)358 int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
359 {
360 int err;
361 u8 *ext_csd;
362
363 if (!card || !new_ext_csd)
364 return -EINVAL;
365
366 if (!mmc_can_ext_csd(card))
367 return -EOPNOTSUPP;
368
369 /*
370 * As the ext_csd is so large and mostly unused, we don't store the
371 * raw block in mmc_card.
372 */
373 ext_csd = kzalloc(512, GFP_KERNEL);
374 if (!ext_csd)
375 return -ENOMEM;
376
377 err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
378 512);
379 if (err)
380 kfree(ext_csd);
381 else
382 *new_ext_csd = ext_csd;
383
384 return err;
385 }
386 EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
387
mmc_spi_read_ocr(struct mmc_host * host,int highcap,u32 * ocrp)388 int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
389 {
390 struct mmc_command cmd = {};
391 int err;
392
393 cmd.opcode = MMC_SPI_READ_OCR;
394 cmd.arg = highcap ? (1 << 30) : 0;
395 cmd.flags = MMC_RSP_SPI_R3;
396
397 err = mmc_wait_for_cmd(host, &cmd, 0);
398
399 *ocrp = cmd.resp[1];
400 return err;
401 }
402
mmc_spi_set_crc(struct mmc_host * host,int use_crc)403 int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
404 {
405 struct mmc_command cmd = {};
406 int err;
407
408 cmd.opcode = MMC_SPI_CRC_ON_OFF;
409 cmd.flags = MMC_RSP_SPI_R1;
410 cmd.arg = use_crc;
411
412 err = mmc_wait_for_cmd(host, &cmd, 0);
413 if (!err)
414 host->use_spi_crc = use_crc;
415 return err;
416 }
417
mmc_switch_status_error(struct mmc_host * host,u32 status)418 static int mmc_switch_status_error(struct mmc_host *host, u32 status)
419 {
420 if (mmc_host_is_spi(host)) {
421 if (status & R1_SPI_ILLEGAL_COMMAND)
422 return -EBADMSG;
423 } else {
424 if (R1_STATUS(status))
425 pr_warn("%s: unexpected status %#x after switch\n",
426 mmc_hostname(host), status);
427 if (status & R1_SWITCH_ERROR)
428 return -EBADMSG;
429 }
430 return 0;
431 }
432
433 /* Caller must hold re-tuning */
mmc_switch_status(struct mmc_card * card,bool crc_err_fatal)434 int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
435 {
436 u32 status;
437 int err;
438
439 err = mmc_send_status(card, &status);
440 if (!crc_err_fatal && err == -EILSEQ)
441 return 0;
442 if (err)
443 return err;
444
445 return mmc_switch_status_error(card->host, status);
446 }
447
mmc_busy_status(struct mmc_card * card,bool retry_crc_err,enum mmc_busy_cmd busy_cmd,bool * busy)448 static int mmc_busy_status(struct mmc_card *card, bool retry_crc_err,
449 enum mmc_busy_cmd busy_cmd, bool *busy)
450 {
451 struct mmc_host *host = card->host;
452 u32 status = 0;
453 int err;
454
455 if (host->ops->card_busy) {
456 *busy = host->ops->card_busy(host);
457 return 0;
458 }
459
460 err = mmc_send_status(card, &status);
461 if (retry_crc_err && err == -EILSEQ) {
462 *busy = true;
463 return 0;
464 }
465 if (err)
466 return err;
467
468 switch (busy_cmd) {
469 case MMC_BUSY_CMD6:
470 err = mmc_switch_status_error(card->host, status);
471 break;
472 case MMC_BUSY_ERASE:
473 err = R1_STATUS(status) ? -EIO : 0;
474 break;
475 case MMC_BUSY_HPI:
476 break;
477 default:
478 err = -EINVAL;
479 }
480
481 if (err)
482 return err;
483
484 *busy = !mmc_ready_for_data(status);
485 return 0;
486 }
487
__mmc_poll_for_busy(struct mmc_card * card,unsigned int timeout_ms,bool send_status,bool retry_crc_err,enum mmc_busy_cmd busy_cmd)488 static int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
489 bool send_status, bool retry_crc_err,
490 enum mmc_busy_cmd busy_cmd)
491 {
492 struct mmc_host *host = card->host;
493 int err;
494 unsigned long timeout;
495 unsigned int udelay = 32, udelay_max = 32768;
496 bool expired = false;
497 bool busy = false;
498
499 /*
500 * In cases when not allowed to poll by using CMD13 or because we aren't
501 * capable of polling by using ->card_busy(), then rely on waiting the
502 * stated timeout to be sufficient.
503 */
504 if (!send_status && !host->ops->card_busy) {
505 mmc_delay(timeout_ms);
506 return 0;
507 }
508
509 timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
510 do {
511 /*
512 * Due to the possibility of being preempted while polling,
513 * check the expiration time first.
514 */
515 expired = time_after(jiffies, timeout);
516
517 err = mmc_busy_status(card, retry_crc_err, busy_cmd, &busy);
518 if (err)
519 return err;
520
521 /* Timeout if the device still remains busy. */
522 if (expired && busy) {
523 pr_err("%s: Card stuck being busy! %s\n",
524 mmc_hostname(host), __func__);
525 return -ETIMEDOUT;
526 }
527
528 /* Throttle the polling rate to avoid hogging the CPU. */
529 if (busy) {
530 usleep_range(udelay, udelay * 2);
531 if (udelay < udelay_max)
532 udelay *= 2;
533 }
534 } while (busy);
535
536 return 0;
537 }
538
mmc_poll_for_busy(struct mmc_card * card,unsigned int timeout_ms,enum mmc_busy_cmd busy_cmd)539 int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
540 enum mmc_busy_cmd busy_cmd)
541 {
542 return __mmc_poll_for_busy(card, timeout_ms, true, false, busy_cmd);
543 }
544
545 /**
546 * __mmc_switch - modify EXT_CSD register
547 * @card: the MMC card associated with the data transfer
548 * @set: cmd set values
549 * @index: EXT_CSD register index
550 * @value: value to program into EXT_CSD register
551 * @timeout_ms: timeout (ms) for operation performed by register write,
552 * timeout of zero implies maximum possible timeout
553 * @timing: new timing to change to
554 * @send_status: send status cmd to poll for busy
555 * @retry_crc_err: retry when CRC errors when polling with CMD13 for busy
556 *
557 * Modifies the EXT_CSD register for selected card.
558 */
__mmc_switch(struct mmc_card * card,u8 set,u8 index,u8 value,unsigned int timeout_ms,unsigned char timing,bool send_status,bool retry_crc_err)559 int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
560 unsigned int timeout_ms, unsigned char timing,
561 bool send_status, bool retry_crc_err)
562 {
563 struct mmc_host *host = card->host;
564 int err;
565 struct mmc_command cmd = {};
566 bool use_r1b_resp = true;
567 unsigned char old_timing = host->ios.timing;
568
569 mmc_retune_hold(host);
570
571 if (!timeout_ms) {
572 pr_warn("%s: unspecified timeout for CMD6 - use generic\n",
573 mmc_hostname(host));
574 timeout_ms = card->ext_csd.generic_cmd6_time;
575 }
576
577 /*
578 * If the max_busy_timeout of the host is specified, make sure it's
579 * enough to fit the used timeout_ms. In case it's not, let's instruct
580 * the host to avoid HW busy detection, by converting to a R1 response
581 * instead of a R1B. Note, some hosts requires R1B, which also means
582 * they are on their own when it comes to deal with the busy timeout.
583 */
584 if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
585 (timeout_ms > host->max_busy_timeout))
586 use_r1b_resp = false;
587
588 cmd.opcode = MMC_SWITCH;
589 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
590 (index << 16) |
591 (value << 8) |
592 set;
593 cmd.flags = MMC_CMD_AC;
594 if (use_r1b_resp) {
595 cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
596 cmd.busy_timeout = timeout_ms;
597 } else {
598 cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
599 }
600
601 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
602 if (err)
603 goto out;
604
605 /*If SPI or used HW busy detection above, then we don't need to poll. */
606 if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
607 mmc_host_is_spi(host))
608 goto out_tim;
609
610 /* Let's try to poll to find out when the command is completed. */
611 err = __mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err,
612 MMC_BUSY_CMD6);
613 if (err)
614 goto out;
615
616 out_tim:
617 /* Switch to new timing before check switch status. */
618 if (timing)
619 mmc_set_timing(host, timing);
620
621 if (send_status) {
622 err = mmc_switch_status(card, true);
623 if (err && timing)
624 mmc_set_timing(host, old_timing);
625 }
626 out:
627 mmc_retune_release(host);
628
629 return err;
630 }
631
mmc_switch(struct mmc_card * card,u8 set,u8 index,u8 value,unsigned int timeout_ms)632 int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
633 unsigned int timeout_ms)
634 {
635 return __mmc_switch(card, set, index, value, timeout_ms, 0,
636 true, false);
637 }
638 EXPORT_SYMBOL_GPL(mmc_switch);
639
mmc_send_tuning(struct mmc_host * host,u32 opcode,int * cmd_error)640 int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
641 {
642 struct mmc_request mrq = {};
643 struct mmc_command cmd = {};
644 struct mmc_data data = {};
645 struct scatterlist sg;
646 struct mmc_ios *ios = &host->ios;
647 const u8 *tuning_block_pattern;
648 int size, err = 0;
649 u8 *data_buf;
650
651 if (ios->bus_width == MMC_BUS_WIDTH_8) {
652 tuning_block_pattern = tuning_blk_pattern_8bit;
653 size = sizeof(tuning_blk_pattern_8bit);
654 } else if (ios->bus_width == MMC_BUS_WIDTH_4) {
655 tuning_block_pattern = tuning_blk_pattern_4bit;
656 size = sizeof(tuning_blk_pattern_4bit);
657 } else
658 return -EINVAL;
659
660 data_buf = kzalloc(size, GFP_KERNEL);
661 if (!data_buf)
662 return -ENOMEM;
663
664 mrq.cmd = &cmd;
665 mrq.data = &data;
666
667 cmd.opcode = opcode;
668 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
669
670 data.blksz = size;
671 data.blocks = 1;
672 data.flags = MMC_DATA_READ;
673
674 /*
675 * According to the tuning specs, Tuning process
676 * is normally shorter 40 executions of CMD19,
677 * and timeout value should be shorter than 150 ms
678 */
679 data.timeout_ns = 150 * NSEC_PER_MSEC;
680
681 data.sg = &sg;
682 data.sg_len = 1;
683 sg_init_one(&sg, data_buf, size);
684
685 mmc_wait_for_req(host, &mrq);
686
687 if (cmd_error)
688 *cmd_error = cmd.error;
689
690 if (cmd.error) {
691 err = cmd.error;
692 goto out;
693 }
694
695 if (data.error) {
696 err = data.error;
697 goto out;
698 }
699
700 if (memcmp(data_buf, tuning_block_pattern, size))
701 err = -EIO;
702
703 out:
704 kfree(data_buf);
705 return err;
706 }
707 EXPORT_SYMBOL_GPL(mmc_send_tuning);
708
mmc_abort_tuning(struct mmc_host * host,u32 opcode)709 int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
710 {
711 struct mmc_command cmd = {};
712
713 /*
714 * eMMC specification specifies that CMD12 can be used to stop a tuning
715 * command, but SD specification does not, so do nothing unless it is
716 * eMMC.
717 */
718 if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
719 return 0;
720
721 cmd.opcode = MMC_STOP_TRANSMISSION;
722 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
723
724 /*
725 * For drivers that override R1 to R1b, set an arbitrary timeout based
726 * on the tuning timeout i.e. 150ms.
727 */
728 cmd.busy_timeout = 150;
729
730 return mmc_wait_for_cmd(host, &cmd, 0);
731 }
732 EXPORT_SYMBOL_GPL(mmc_abort_tuning);
733
734 static int
mmc_send_bus_test(struct mmc_card * card,struct mmc_host * host,u8 opcode,u8 len)735 mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
736 u8 len)
737 {
738 struct mmc_request mrq = {};
739 struct mmc_command cmd = {};
740 struct mmc_data data = {};
741 struct scatterlist sg;
742 u8 *data_buf;
743 u8 *test_buf;
744 int i, err;
745 static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
746 static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
747
748 /* dma onto stack is unsafe/nonportable, but callers to this
749 * routine normally provide temporary on-stack buffers ...
750 */
751 data_buf = kmalloc(len, GFP_KERNEL);
752 if (!data_buf)
753 return -ENOMEM;
754
755 if (len == 8)
756 test_buf = testdata_8bit;
757 else if (len == 4)
758 test_buf = testdata_4bit;
759 else {
760 pr_err("%s: Invalid bus_width %d\n",
761 mmc_hostname(host), len);
762 kfree(data_buf);
763 return -EINVAL;
764 }
765
766 if (opcode == MMC_BUS_TEST_W)
767 memcpy(data_buf, test_buf, len);
768
769 mrq.cmd = &cmd;
770 mrq.data = &data;
771 cmd.opcode = opcode;
772 cmd.arg = 0;
773
774 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
775 * rely on callers to never use this with "native" calls for reading
776 * CSD or CID. Native versions of those commands use the R2 type,
777 * not R1 plus a data block.
778 */
779 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
780
781 data.blksz = len;
782 data.blocks = 1;
783 if (opcode == MMC_BUS_TEST_R)
784 data.flags = MMC_DATA_READ;
785 else
786 data.flags = MMC_DATA_WRITE;
787
788 data.sg = &sg;
789 data.sg_len = 1;
790 mmc_set_data_timeout(&data, card);
791 sg_init_one(&sg, data_buf, len);
792 mmc_wait_for_req(host, &mrq);
793 err = 0;
794 if (opcode == MMC_BUS_TEST_R) {
795 for (i = 0; i < len / 4; i++)
796 if ((test_buf[i] ^ data_buf[i]) != 0xff) {
797 err = -EIO;
798 break;
799 }
800 }
801 kfree(data_buf);
802
803 if (cmd.error)
804 return cmd.error;
805 if (data.error)
806 return data.error;
807
808 return err;
809 }
810
mmc_bus_test(struct mmc_card * card,u8 bus_width)811 int mmc_bus_test(struct mmc_card *card, u8 bus_width)
812 {
813 int width;
814
815 if (bus_width == MMC_BUS_WIDTH_8)
816 width = 8;
817 else if (bus_width == MMC_BUS_WIDTH_4)
818 width = 4;
819 else if (bus_width == MMC_BUS_WIDTH_1)
820 return 0; /* no need for test */
821 else
822 return -EINVAL;
823
824 /*
825 * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there
826 * is a problem. This improves chances that the test will work.
827 */
828 mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
829 return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
830 }
831
mmc_send_hpi_cmd(struct mmc_card * card)832 static int mmc_send_hpi_cmd(struct mmc_card *card)
833 {
834 unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time;
835 struct mmc_host *host = card->host;
836 bool use_r1b_resp = true;
837 struct mmc_command cmd = {};
838 int err;
839
840 cmd.opcode = card->ext_csd.hpi_cmd;
841 cmd.arg = card->rca << 16 | 1;
842
843 /*
844 * Make sure the host's max_busy_timeout fit the needed timeout for HPI.
845 * In case it doesn't, let's instruct the host to avoid HW busy
846 * detection, by using a R1 response instead of R1B.
847 */
848 if (host->max_busy_timeout && busy_timeout_ms > host->max_busy_timeout)
849 use_r1b_resp = false;
850
851 if (cmd.opcode == MMC_STOP_TRANSMISSION && use_r1b_resp) {
852 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
853 cmd.busy_timeout = busy_timeout_ms;
854 } else {
855 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
856 use_r1b_resp = false;
857 }
858
859 err = mmc_wait_for_cmd(host, &cmd, 0);
860 if (err) {
861 pr_warn("%s: HPI error %d. Command response %#x\n",
862 mmc_hostname(host), err, cmd.resp[0]);
863 return err;
864 }
865
866 /* No need to poll when using HW busy detection. */
867 if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp)
868 return 0;
869
870 /* Let's poll to find out when the HPI request completes. */
871 return mmc_poll_for_busy(card, busy_timeout_ms, MMC_BUSY_HPI);
872 }
873
874 /**
875 * mmc_interrupt_hpi - Issue for High priority Interrupt
876 * @card: the MMC card associated with the HPI transfer
877 *
878 * Issued High Priority Interrupt, and check for card status
879 * until out-of prg-state.
880 */
mmc_interrupt_hpi(struct mmc_card * card)881 static int mmc_interrupt_hpi(struct mmc_card *card)
882 {
883 int err;
884 u32 status;
885
886 if (!card->ext_csd.hpi_en) {
887 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
888 return 1;
889 }
890
891 err = mmc_send_status(card, &status);
892 if (err) {
893 pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
894 goto out;
895 }
896
897 switch (R1_CURRENT_STATE(status)) {
898 case R1_STATE_IDLE:
899 case R1_STATE_READY:
900 case R1_STATE_STBY:
901 case R1_STATE_TRAN:
902 /*
903 * In idle and transfer states, HPI is not needed and the caller
904 * can issue the next intended command immediately
905 */
906 goto out;
907 case R1_STATE_PRG:
908 break;
909 default:
910 /* In all other states, it's illegal to issue HPI */
911 pr_debug("%s: HPI cannot be sent. Card state=%d\n",
912 mmc_hostname(card->host), R1_CURRENT_STATE(status));
913 err = -EINVAL;
914 goto out;
915 }
916
917 err = mmc_send_hpi_cmd(card);
918 out:
919 return err;
920 }
921
mmc_can_ext_csd(struct mmc_card * card)922 int mmc_can_ext_csd(struct mmc_card *card)
923 {
924 return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
925 }
926
mmc_read_bkops_status(struct mmc_card * card)927 static int mmc_read_bkops_status(struct mmc_card *card)
928 {
929 int err;
930 u8 *ext_csd;
931
932 err = mmc_get_ext_csd(card, &ext_csd);
933 if (err)
934 return err;
935
936 card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
937 card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
938 kfree(ext_csd);
939 return 0;
940 }
941
942 /**
943 * mmc_run_bkops - Run BKOPS for supported cards
944 * @card: MMC card to run BKOPS for
945 *
946 * Run background operations synchronously for cards having manual BKOPS
947 * enabled and in case it reports urgent BKOPS level.
948 */
mmc_run_bkops(struct mmc_card * card)949 void mmc_run_bkops(struct mmc_card *card)
950 {
951 int err;
952
953 if (!card->ext_csd.man_bkops_en)
954 return;
955
956 err = mmc_read_bkops_status(card);
957 if (err) {
958 pr_err("%s: Failed to read bkops status: %d\n",
959 mmc_hostname(card->host), err);
960 return;
961 }
962
963 if (!card->ext_csd.raw_bkops_status ||
964 card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2)
965 return;
966
967 mmc_retune_hold(card->host);
968
969 /*
970 * For urgent BKOPS status, LEVEL_2 and higher, let's execute
971 * synchronously. Future wise, we may consider to start BKOPS, for less
972 * urgent levels by using an asynchronous background task, when idle.
973 */
974 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
975 EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS);
976 if (err)
977 pr_warn("%s: Error %d starting bkops\n",
978 mmc_hostname(card->host), err);
979
980 mmc_retune_release(card->host);
981 }
982 EXPORT_SYMBOL(mmc_run_bkops);
983
984 /*
985 * Flush the cache to the non-volatile storage.
986 */
mmc_flush_cache(struct mmc_card * card)987 int mmc_flush_cache(struct mmc_card *card)
988 {
989 int err = 0;
990
991 if (mmc_cache_enabled(card->host)) {
992 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
993 EXT_CSD_FLUSH_CACHE, 1,
994 MMC_CACHE_FLUSH_TIMEOUT_MS);
995 if (err)
996 pr_err("%s: cache flush error %d\n",
997 mmc_hostname(card->host), err);
998 }
999
1000 return err;
1001 }
1002 EXPORT_SYMBOL(mmc_flush_cache);
1003
mmc_cmdq_switch(struct mmc_card * card,bool enable)1004 static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
1005 {
1006 u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
1007 int err;
1008
1009 if (!card->ext_csd.cmdq_support)
1010 return -EOPNOTSUPP;
1011
1012 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
1013 val, card->ext_csd.generic_cmd6_time);
1014 if (!err)
1015 card->ext_csd.cmdq_en = enable;
1016
1017 return err;
1018 }
1019
mmc_cmdq_enable(struct mmc_card * card)1020 int mmc_cmdq_enable(struct mmc_card *card)
1021 {
1022 return mmc_cmdq_switch(card, true);
1023 }
1024 EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
1025
mmc_cmdq_disable(struct mmc_card * card)1026 int mmc_cmdq_disable(struct mmc_card *card)
1027 {
1028 return mmc_cmdq_switch(card, false);
1029 }
1030 EXPORT_SYMBOL_GPL(mmc_cmdq_disable);
1031
mmc_sanitize(struct mmc_card * card)1032 int mmc_sanitize(struct mmc_card *card)
1033 {
1034 struct mmc_host *host = card->host;
1035 int err;
1036
1037 if (!mmc_can_sanitize(card)) {
1038 pr_warn("%s: Sanitize not supported\n", mmc_hostname(host));
1039 return -EOPNOTSUPP;
1040 }
1041
1042 pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host));
1043
1044 mmc_retune_hold(host);
1045
1046 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START,
1047 1, MMC_SANITIZE_TIMEOUT_MS);
1048 if (err)
1049 pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host), err);
1050
1051 /*
1052 * If the sanitize operation timed out, the card is probably still busy
1053 * in the R1_STATE_PRG. Rather than continue to wait, let's try to abort
1054 * it with a HPI command to get back into R1_STATE_TRAN.
1055 */
1056 if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
1057 pr_warn("%s: Sanitize aborted\n", mmc_hostname(host));
1058
1059 mmc_retune_release(host);
1060
1061 pr_debug("%s: Sanitize completed\n", mmc_hostname(host));
1062 return err;
1063 }
1064 EXPORT_SYMBOL_GPL(mmc_sanitize);
1065