• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   *  linux/drivers/mmc/core/mmc_ops.h
4   *
5   *  Copyright 2006-2007 Pierre Ossman
6   */
7  
8  #include <linux/slab.h>
9  #include <linux/export.h>
10  #include <linux/types.h>
11  #include <linux/scatterlist.h>
12  
13  #include <linux/mmc/host.h>
14  #include <linux/mmc/card.h>
15  #include <linux/mmc/mmc.h>
16  
17  #include "core.h"
18  #include "card.h"
19  #include "host.h"
20  #include "mmc_ops.h"
21  
22  #define MMC_BKOPS_TIMEOUT_MS		(120 * 1000) /* 120s */
23  #define MMC_SANITIZE_TIMEOUT_MS		(240 * 1000) /* 240s */
24  
25  static const u8 tuning_blk_pattern_4bit[] = {
26  	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
27  	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
28  	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
29  	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
30  	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
31  	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
32  	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
33  	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
34  };
35  
36  static const u8 tuning_blk_pattern_8bit[] = {
37  	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
38  	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
39  	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
40  	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
41  	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
42  	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
43  	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
44  	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
45  	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
46  	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
47  	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
48  	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
49  	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
50  	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
51  	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
52  	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
53  };
54  
55  struct mmc_busy_data {
56  	struct mmc_card *card;
57  	bool retry_crc_err;
58  	enum mmc_busy_cmd busy_cmd;
59  };
60  
__mmc_send_status(struct mmc_card * card,u32 * status,unsigned int retries)61  int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
62  {
63  	int err;
64  	struct mmc_command cmd = {};
65  
66  	cmd.opcode = MMC_SEND_STATUS;
67  	if (!mmc_host_is_spi(card->host))
68  		cmd.arg = card->rca << 16;
69  	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
70  
71  	err = mmc_wait_for_cmd(card->host, &cmd, retries);
72  	if (err)
73  		return err;
74  
75  	/* NOTE: callers are required to understand the difference
76  	 * between "native" and SPI format status words!
77  	 */
78  	if (status)
79  		*status = cmd.resp[0];
80  
81  	return 0;
82  }
83  EXPORT_SYMBOL_GPL(__mmc_send_status);
84  
mmc_send_status(struct mmc_card * card,u32 * status)85  int mmc_send_status(struct mmc_card *card, u32 *status)
86  {
87  	return __mmc_send_status(card, status, MMC_CMD_RETRIES);
88  }
89  EXPORT_SYMBOL_GPL(mmc_send_status);
90  
_mmc_select_card(struct mmc_host * host,struct mmc_card * card)91  static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
92  {
93  	struct mmc_command cmd = {};
94  
95  	cmd.opcode = MMC_SELECT_CARD;
96  
97  	if (card) {
98  		cmd.arg = card->rca << 16;
99  		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
100  	} else {
101  		cmd.arg = 0;
102  		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
103  	}
104  
105  	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
106  }
107  
mmc_select_card(struct mmc_card * card)108  int mmc_select_card(struct mmc_card *card)
109  {
110  
111  	return _mmc_select_card(card->host, card);
112  }
113  EXPORT_SYMBOL_GPL(mmc_select_card);
114  
mmc_deselect_cards(struct mmc_host * host)115  int mmc_deselect_cards(struct mmc_host *host)
116  {
117  	return _mmc_select_card(host, NULL);
118  }
119  
120  /*
121   * Write the value specified in the device tree or board code into the optional
122   * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
123   * drive strength of the DAT and CMD outputs. The actual meaning of a given
124   * value is hardware dependant.
125   * The presence of the DSR register can be determined from the CSD register,
126   * bit 76.
127   */
mmc_set_dsr(struct mmc_host * host)128  int mmc_set_dsr(struct mmc_host *host)
129  {
130  	struct mmc_command cmd = {};
131  
132  	cmd.opcode = MMC_SET_DSR;
133  
134  	cmd.arg = (host->dsr << 16) | 0xffff;
135  	cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
136  
137  	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
138  }
139  
mmc_go_idle(struct mmc_host * host)140  int mmc_go_idle(struct mmc_host *host)
141  {
142  	int err;
143  	struct mmc_command cmd = {};
144  
145  	/*
146  	 * Non-SPI hosts need to prevent chipselect going active during
147  	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
148  	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
149  	 *
150  	 * SPI hosts ignore ios.chip_select; it's managed according to
151  	 * rules that must accommodate non-MMC slaves which this layer
152  	 * won't even know about.
153  	 */
154  	if (!mmc_host_is_spi(host)) {
155  		mmc_set_chip_select(host, MMC_CS_HIGH);
156  		mmc_delay(1);
157  	}
158  
159  	cmd.opcode = MMC_GO_IDLE_STATE;
160  	cmd.arg = 0;
161  	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
162  
163  	err = mmc_wait_for_cmd(host, &cmd, 0);
164  
165  	mmc_delay(1);
166  
167  	if (!mmc_host_is_spi(host)) {
168  		mmc_set_chip_select(host, MMC_CS_DONTCARE);
169  		mmc_delay(1);
170  	}
171  
172  	host->use_spi_crc = 0;
173  
174  	return err;
175  }
176  
mmc_send_op_cond(struct mmc_host * host,u32 ocr,u32 * rocr)177  int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
178  {
179  	struct mmc_command cmd = {};
180  	int i, err = 0;
181  
182  	cmd.opcode = MMC_SEND_OP_COND;
183  	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
184  	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
185  
186  	for (i = 100; i; i--) {
187  		err = mmc_wait_for_cmd(host, &cmd, 0);
188  		if (err)
189  			break;
190  
191  		/* wait until reset completes */
192  		if (mmc_host_is_spi(host)) {
193  			if (!(cmd.resp[0] & R1_SPI_IDLE))
194  				break;
195  		} else {
196  			if (cmd.resp[0] & MMC_CARD_BUSY)
197  				break;
198  		}
199  
200  		err = -ETIMEDOUT;
201  
202  		mmc_delay(10);
203  
204  		/*
205  		 * According to eMMC specification v5.1 section 6.4.3, we
206  		 * should issue CMD1 repeatedly in the idle state until
207  		 * the eMMC is ready. Otherwise some eMMC devices seem to enter
208  		 * the inactive mode after mmc_init_card() issued CMD0 when
209  		 * the eMMC device is busy.
210  		 */
211  		if (!ocr && !mmc_host_is_spi(host))
212  			cmd.arg = cmd.resp[0] | BIT(30);
213  	}
214  
215  	if (rocr && !mmc_host_is_spi(host))
216  		*rocr = cmd.resp[0];
217  
218  	return err;
219  }
220  
mmc_set_relative_addr(struct mmc_card * card)221  int mmc_set_relative_addr(struct mmc_card *card)
222  {
223  	struct mmc_command cmd = {};
224  
225  	cmd.opcode = MMC_SET_RELATIVE_ADDR;
226  	cmd.arg = card->rca << 16;
227  	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
228  
229  	return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
230  }
231  
232  static int
mmc_send_cxd_native(struct mmc_host * host,u32 arg,u32 * cxd,int opcode)233  mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
234  {
235  	int err;
236  	struct mmc_command cmd = {};
237  
238  	cmd.opcode = opcode;
239  	cmd.arg = arg;
240  	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
241  
242  	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
243  	if (err)
244  		return err;
245  
246  	memcpy(cxd, cmd.resp, sizeof(u32) * 4);
247  
248  	return 0;
249  }
250  
251  /*
252   * NOTE: void *buf, caller for the buf is required to use DMA-capable
253   * buffer or on-stack buffer (with some overhead in callee).
254   */
mmc_send_adtc_data(struct mmc_card * card,struct mmc_host * host,u32 opcode,u32 args,void * buf,unsigned len)255  int mmc_send_adtc_data(struct mmc_card *card, struct mmc_host *host, u32 opcode,
256  		       u32 args, void *buf, unsigned len)
257  {
258  	struct mmc_request mrq = {};
259  	struct mmc_command cmd = {};
260  	struct mmc_data data = {};
261  	struct scatterlist sg;
262  
263  	mrq.cmd = &cmd;
264  	mrq.data = &data;
265  
266  	cmd.opcode = opcode;
267  	cmd.arg = args;
268  
269  	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
270  	 * rely on callers to never use this with "native" calls for reading
271  	 * CSD or CID.  Native versions of those commands use the R2 type,
272  	 * not R1 plus a data block.
273  	 */
274  	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
275  
276  	data.blksz = len;
277  	data.blocks = 1;
278  	data.flags = MMC_DATA_READ;
279  	data.sg = &sg;
280  	data.sg_len = 1;
281  
282  	sg_init_one(&sg, buf, len);
283  
284  	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
285  		/*
286  		 * The spec states that CSR and CID accesses have a timeout
287  		 * of 64 clock cycles.
288  		 */
289  		data.timeout_ns = 0;
290  		data.timeout_clks = 64;
291  	} else
292  		mmc_set_data_timeout(&data, card);
293  
294  	mmc_wait_for_req(host, &mrq);
295  
296  	if (cmd.error)
297  		return cmd.error;
298  	if (data.error)
299  		return data.error;
300  
301  	return 0;
302  }
303  
mmc_spi_send_cxd(struct mmc_host * host,u32 * cxd,u32 opcode)304  static int mmc_spi_send_cxd(struct mmc_host *host, u32 *cxd, u32 opcode)
305  {
306  	int ret, i;
307  	__be32 *cxd_tmp;
308  
309  	cxd_tmp = kzalloc(16, GFP_KERNEL);
310  	if (!cxd_tmp)
311  		return -ENOMEM;
312  
313  	ret = mmc_send_adtc_data(NULL, host, opcode, 0, cxd_tmp, 16);
314  	if (ret)
315  		goto err;
316  
317  	for (i = 0; i < 4; i++)
318  		cxd[i] = be32_to_cpu(cxd_tmp[i]);
319  
320  err:
321  	kfree(cxd_tmp);
322  	return ret;
323  }
324  
mmc_send_csd(struct mmc_card * card,u32 * csd)325  int mmc_send_csd(struct mmc_card *card, u32 *csd)
326  {
327  	if (mmc_host_is_spi(card->host))
328  		return mmc_spi_send_cxd(card->host, csd, MMC_SEND_CSD);
329  
330  	return mmc_send_cxd_native(card->host, card->rca << 16,	csd,
331  				MMC_SEND_CSD);
332  }
333  
mmc_send_cid(struct mmc_host * host,u32 * cid)334  int mmc_send_cid(struct mmc_host *host, u32 *cid)
335  {
336  	if (mmc_host_is_spi(host))
337  		return mmc_spi_send_cxd(host, cid, MMC_SEND_CID);
338  
339  	return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
340  }
341  
mmc_get_ext_csd(struct mmc_card * card,u8 ** new_ext_csd)342  int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
343  {
344  	int err;
345  	u8 *ext_csd;
346  
347  	if (!card || !new_ext_csd)
348  		return -EINVAL;
349  
350  	if (!mmc_can_ext_csd(card))
351  		return -EOPNOTSUPP;
352  
353  	/*
354  	 * As the ext_csd is so large and mostly unused, we don't store the
355  	 * raw block in mmc_card.
356  	 */
357  	ext_csd = kzalloc(512, GFP_KERNEL);
358  	if (!ext_csd)
359  		return -ENOMEM;
360  
361  	err = mmc_send_adtc_data(card, card->host, MMC_SEND_EXT_CSD, 0, ext_csd,
362  				512);
363  	if (err)
364  		kfree(ext_csd);
365  	else
366  		*new_ext_csd = ext_csd;
367  
368  	return err;
369  }
370  EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
371  
mmc_spi_read_ocr(struct mmc_host * host,int highcap,u32 * ocrp)372  int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
373  {
374  	struct mmc_command cmd = {};
375  	int err;
376  
377  	cmd.opcode = MMC_SPI_READ_OCR;
378  	cmd.arg = highcap ? (1 << 30) : 0;
379  	cmd.flags = MMC_RSP_SPI_R3;
380  
381  	err = mmc_wait_for_cmd(host, &cmd, 0);
382  
383  	*ocrp = cmd.resp[1];
384  	return err;
385  }
386  
mmc_spi_set_crc(struct mmc_host * host,int use_crc)387  int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
388  {
389  	struct mmc_command cmd = {};
390  	int err;
391  
392  	cmd.opcode = MMC_SPI_CRC_ON_OFF;
393  	cmd.flags = MMC_RSP_SPI_R1;
394  	cmd.arg = use_crc;
395  
396  	err = mmc_wait_for_cmd(host, &cmd, 0);
397  	if (!err)
398  		host->use_spi_crc = use_crc;
399  	return err;
400  }
401  
mmc_switch_status_error(struct mmc_host * host,u32 status)402  static int mmc_switch_status_error(struct mmc_host *host, u32 status)
403  {
404  	if (mmc_host_is_spi(host)) {
405  		if (status & R1_SPI_ILLEGAL_COMMAND)
406  			return -EBADMSG;
407  	} else {
408  		if (R1_STATUS(status))
409  			pr_warn("%s: unexpected status %#x after switch\n",
410  				mmc_hostname(host), status);
411  		if (status & R1_SWITCH_ERROR)
412  			return -EBADMSG;
413  	}
414  	return 0;
415  }
416  
417  /* Caller must hold re-tuning */
mmc_switch_status(struct mmc_card * card,bool crc_err_fatal)418  int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
419  {
420  	u32 status;
421  	int err;
422  
423  	err = mmc_send_status(card, &status);
424  	if (!crc_err_fatal && err == -EILSEQ)
425  		return 0;
426  	if (err)
427  		return err;
428  
429  	return mmc_switch_status_error(card->host, status);
430  }
431  
mmc_busy_cb(void * cb_data,bool * busy)432  static int mmc_busy_cb(void *cb_data, bool *busy)
433  {
434  	struct mmc_busy_data *data = cb_data;
435  	struct mmc_host *host = data->card->host;
436  	u32 status = 0;
437  	int err;
438  
439  	if (data->busy_cmd != MMC_BUSY_IO && host->ops->card_busy) {
440  		*busy = host->ops->card_busy(host);
441  		return 0;
442  	}
443  
444  	err = mmc_send_status(data->card, &status);
445  	if (data->retry_crc_err && err == -EILSEQ) {
446  		*busy = true;
447  		return 0;
448  	}
449  	if (err)
450  		return err;
451  
452  	switch (data->busy_cmd) {
453  	case MMC_BUSY_CMD6:
454  		err = mmc_switch_status_error(host, status);
455  		break;
456  	case MMC_BUSY_ERASE:
457  		err = R1_STATUS(status) ? -EIO : 0;
458  		break;
459  	case MMC_BUSY_HPI:
460  	case MMC_BUSY_EXTR_SINGLE:
461  	case MMC_BUSY_IO:
462  		break;
463  	default:
464  		err = -EINVAL;
465  	}
466  
467  	if (err)
468  		return err;
469  
470  	*busy = !mmc_ready_for_data(status);
471  	return 0;
472  }
473  
__mmc_poll_for_busy(struct mmc_card * card,unsigned int timeout_ms,int (* busy_cb)(void * cb_data,bool * busy),void * cb_data)474  int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
475  			int (*busy_cb)(void *cb_data, bool *busy),
476  			void *cb_data)
477  {
478  	struct mmc_host *host = card->host;
479  	int err;
480  	unsigned long timeout;
481  	unsigned int udelay = 32, udelay_max = 32768;
482  	bool expired = false;
483  	bool busy = false;
484  
485  	timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
486  	do {
487  		/*
488  		 * Due to the possibility of being preempted while polling,
489  		 * check the expiration time first.
490  		 */
491  		expired = time_after(jiffies, timeout);
492  
493  		err = (*busy_cb)(cb_data, &busy);
494  		if (err)
495  			return err;
496  
497  		/* Timeout if the device still remains busy. */
498  		if (expired && busy) {
499  			pr_err("%s: Card stuck being busy! %s\n",
500  				mmc_hostname(host), __func__);
501  			return -ETIMEDOUT;
502  		}
503  
504  		/* Throttle the polling rate to avoid hogging the CPU. */
505  		if (busy) {
506  			usleep_range(udelay, udelay * 2);
507  			if (udelay < udelay_max)
508  				udelay *= 2;
509  		}
510  	} while (busy);
511  
512  	return 0;
513  }
514  EXPORT_SYMBOL_GPL(__mmc_poll_for_busy);
515  
mmc_poll_for_busy(struct mmc_card * card,unsigned int timeout_ms,bool retry_crc_err,enum mmc_busy_cmd busy_cmd)516  int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
517  		      bool retry_crc_err, enum mmc_busy_cmd busy_cmd)
518  {
519  	struct mmc_busy_data cb_data;
520  
521  	cb_data.card = card;
522  	cb_data.retry_crc_err = retry_crc_err;
523  	cb_data.busy_cmd = busy_cmd;
524  
525  	return __mmc_poll_for_busy(card, timeout_ms, &mmc_busy_cb, &cb_data);
526  }
527  EXPORT_SYMBOL_GPL(mmc_poll_for_busy);
528  
mmc_prepare_busy_cmd(struct mmc_host * host,struct mmc_command * cmd,unsigned int timeout_ms)529  bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
530  			  unsigned int timeout_ms)
531  {
532  	/*
533  	 * If the max_busy_timeout of the host is specified, make sure it's
534  	 * enough to fit the used timeout_ms. In case it's not, let's instruct
535  	 * the host to avoid HW busy detection, by converting to a R1 response
536  	 * instead of a R1B. Note, some hosts requires R1B, which also means
537  	 * they are on their own when it comes to deal with the busy timeout.
538  	 */
539  	if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
540  	    (timeout_ms > host->max_busy_timeout)) {
541  		cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1 | MMC_RSP_R1;
542  		return false;
543  	}
544  
545  	cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1B | MMC_RSP_R1B;
546  	cmd->busy_timeout = timeout_ms;
547  	return true;
548  }
549  EXPORT_SYMBOL_GPL(mmc_prepare_busy_cmd);
550  
551  /**
552   *	__mmc_switch - modify EXT_CSD register
553   *	@card: the MMC card associated with the data transfer
554   *	@set: cmd set values
555   *	@index: EXT_CSD register index
556   *	@value: value to program into EXT_CSD register
557   *	@timeout_ms: timeout (ms) for operation performed by register write,
558   *                   timeout of zero implies maximum possible timeout
559   *	@timing: new timing to change to
560   *	@send_status: send status cmd to poll for busy
561   *	@retry_crc_err: retry when CRC errors when polling with CMD13 for busy
562   *	@retries: number of retries
563   *
564   *	Modifies the EXT_CSD register for selected card.
565   */
__mmc_switch(struct mmc_card * card,u8 set,u8 index,u8 value,unsigned int timeout_ms,unsigned char timing,bool send_status,bool retry_crc_err,unsigned int retries)566  int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
567  		unsigned int timeout_ms, unsigned char timing,
568  		bool send_status, bool retry_crc_err, unsigned int retries)
569  {
570  	struct mmc_host *host = card->host;
571  	int err;
572  	struct mmc_command cmd = {};
573  	bool use_r1b_resp;
574  	unsigned char old_timing = host->ios.timing;
575  
576  	mmc_retune_hold(host);
577  
578  	if (!timeout_ms) {
579  		pr_warn("%s: unspecified timeout for CMD6 - use generic\n",
580  			mmc_hostname(host));
581  		timeout_ms = card->ext_csd.generic_cmd6_time;
582  	}
583  
584  	cmd.opcode = MMC_SWITCH;
585  	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
586  		  (index << 16) |
587  		  (value << 8) |
588  		  set;
589  	use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, timeout_ms);
590  
591  	err = mmc_wait_for_cmd(host, &cmd, retries);
592  	if (err)
593  		goto out;
594  
595  	/*If SPI or used HW busy detection above, then we don't need to poll. */
596  	if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
597  		mmc_host_is_spi(host))
598  		goto out_tim;
599  
600  	/*
601  	 * If the host doesn't support HW polling via the ->card_busy() ops and
602  	 * when it's not allowed to poll by using CMD13, then we need to rely on
603  	 * waiting the stated timeout to be sufficient.
604  	 */
605  	if (!send_status && !host->ops->card_busy) {
606  		mmc_delay(timeout_ms);
607  		goto out_tim;
608  	}
609  
610  	/* Let's try to poll to find out when the command is completed. */
611  	err = mmc_poll_for_busy(card, timeout_ms, retry_crc_err, MMC_BUSY_CMD6);
612  	if (err)
613  		goto out;
614  
615  out_tim:
616  	/* Switch to new timing before check switch status. */
617  	if (timing)
618  		mmc_set_timing(host, timing);
619  
620  	if (send_status) {
621  		err = mmc_switch_status(card, true);
622  		if (err && timing)
623  			mmc_set_timing(host, old_timing);
624  	}
625  out:
626  	mmc_retune_release(host);
627  
628  	return err;
629  }
630  
mmc_switch(struct mmc_card * card,u8 set,u8 index,u8 value,unsigned int timeout_ms)631  int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
632  		unsigned int timeout_ms)
633  {
634  	return __mmc_switch(card, set, index, value, timeout_ms, 0,
635  			    true, false, MMC_CMD_RETRIES);
636  }
637  EXPORT_SYMBOL_GPL(mmc_switch);
638  
mmc_send_tuning(struct mmc_host * host,u32 opcode,int * cmd_error)639  int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
640  {
641  	struct mmc_request mrq = {};
642  	struct mmc_command cmd = {};
643  	struct mmc_data data = {};
644  	struct scatterlist sg;
645  	struct mmc_ios *ios = &host->ios;
646  	const u8 *tuning_block_pattern;
647  	int size, err = 0;
648  	u8 *data_buf;
649  
650  	if (ios->bus_width == MMC_BUS_WIDTH_8) {
651  		tuning_block_pattern = tuning_blk_pattern_8bit;
652  		size = sizeof(tuning_blk_pattern_8bit);
653  	} else if (ios->bus_width == MMC_BUS_WIDTH_4) {
654  		tuning_block_pattern = tuning_blk_pattern_4bit;
655  		size = sizeof(tuning_blk_pattern_4bit);
656  	} else
657  		return -EINVAL;
658  
659  	data_buf = kzalloc(size, GFP_KERNEL);
660  	if (!data_buf)
661  		return -ENOMEM;
662  
663  	mrq.cmd = &cmd;
664  	mrq.data = &data;
665  
666  	cmd.opcode = opcode;
667  	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
668  
669  	data.blksz = size;
670  	data.blocks = 1;
671  	data.flags = MMC_DATA_READ;
672  
673  	/*
674  	 * According to the tuning specs, Tuning process
675  	 * is normally shorter 40 executions of CMD19,
676  	 * and timeout value should be shorter than 150 ms
677  	 */
678  	data.timeout_ns = 150 * NSEC_PER_MSEC;
679  
680  	data.sg = &sg;
681  	data.sg_len = 1;
682  	sg_init_one(&sg, data_buf, size);
683  
684  	mmc_wait_for_req(host, &mrq);
685  
686  	if (cmd_error)
687  		*cmd_error = cmd.error;
688  
689  	if (cmd.error) {
690  		err = cmd.error;
691  		goto out;
692  	}
693  
694  	if (data.error) {
695  		err = data.error;
696  		goto out;
697  	}
698  
699  	if (memcmp(data_buf, tuning_block_pattern, size))
700  		err = -EIO;
701  
702  out:
703  	kfree(data_buf);
704  	return err;
705  }
706  EXPORT_SYMBOL_GPL(mmc_send_tuning);
707  
mmc_send_abort_tuning(struct mmc_host * host,u32 opcode)708  int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode)
709  {
710  	struct mmc_command cmd = {};
711  
712  	/*
713  	 * eMMC specification specifies that CMD12 can be used to stop a tuning
714  	 * command, but SD specification does not, so do nothing unless it is
715  	 * eMMC.
716  	 */
717  	if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
718  		return 0;
719  
720  	cmd.opcode = MMC_STOP_TRANSMISSION;
721  	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
722  
723  	/*
724  	 * For drivers that override R1 to R1b, set an arbitrary timeout based
725  	 * on the tuning timeout i.e. 150ms.
726  	 */
727  	cmd.busy_timeout = 150;
728  
729  	return mmc_wait_for_cmd(host, &cmd, 0);
730  }
731  EXPORT_SYMBOL_GPL(mmc_send_abort_tuning);
732  
733  static int
mmc_send_bus_test(struct mmc_card * card,struct mmc_host * host,u8 opcode,u8 len)734  mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
735  		  u8 len)
736  {
737  	struct mmc_request mrq = {};
738  	struct mmc_command cmd = {};
739  	struct mmc_data data = {};
740  	struct scatterlist sg;
741  	u8 *data_buf;
742  	u8 *test_buf;
743  	int i, err;
744  	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
745  	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
746  
747  	/* dma onto stack is unsafe/nonportable, but callers to this
748  	 * routine normally provide temporary on-stack buffers ...
749  	 */
750  	data_buf = kmalloc(len, GFP_KERNEL);
751  	if (!data_buf)
752  		return -ENOMEM;
753  
754  	if (len == 8)
755  		test_buf = testdata_8bit;
756  	else if (len == 4)
757  		test_buf = testdata_4bit;
758  	else {
759  		pr_err("%s: Invalid bus_width %d\n",
760  		       mmc_hostname(host), len);
761  		kfree(data_buf);
762  		return -EINVAL;
763  	}
764  
765  	if (opcode == MMC_BUS_TEST_W)
766  		memcpy(data_buf, test_buf, len);
767  
768  	mrq.cmd = &cmd;
769  	mrq.data = &data;
770  	cmd.opcode = opcode;
771  	cmd.arg = 0;
772  
773  	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
774  	 * rely on callers to never use this with "native" calls for reading
775  	 * CSD or CID.  Native versions of those commands use the R2 type,
776  	 * not R1 plus a data block.
777  	 */
778  	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
779  
780  	data.blksz = len;
781  	data.blocks = 1;
782  	if (opcode == MMC_BUS_TEST_R)
783  		data.flags = MMC_DATA_READ;
784  	else
785  		data.flags = MMC_DATA_WRITE;
786  
787  	data.sg = &sg;
788  	data.sg_len = 1;
789  	mmc_set_data_timeout(&data, card);
790  	sg_init_one(&sg, data_buf, len);
791  	mmc_wait_for_req(host, &mrq);
792  	err = 0;
793  	if (opcode == MMC_BUS_TEST_R) {
794  		for (i = 0; i < len / 4; i++)
795  			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
796  				err = -EIO;
797  				break;
798  			}
799  	}
800  	kfree(data_buf);
801  
802  	if (cmd.error)
803  		return cmd.error;
804  	if (data.error)
805  		return data.error;
806  
807  	return err;
808  }
809  
mmc_bus_test(struct mmc_card * card,u8 bus_width)810  int mmc_bus_test(struct mmc_card *card, u8 bus_width)
811  {
812  	int width;
813  
814  	if (bus_width == MMC_BUS_WIDTH_8)
815  		width = 8;
816  	else if (bus_width == MMC_BUS_WIDTH_4)
817  		width = 4;
818  	else if (bus_width == MMC_BUS_WIDTH_1)
819  		return 0; /* no need for test */
820  	else
821  		return -EINVAL;
822  
823  	/*
824  	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
825  	 * is a problem.  This improves chances that the test will work.
826  	 */
827  	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
828  	return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
829  }
830  
mmc_send_hpi_cmd(struct mmc_card * card)831  static int mmc_send_hpi_cmd(struct mmc_card *card)
832  {
833  	unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time;
834  	struct mmc_host *host = card->host;
835  	bool use_r1b_resp = false;
836  	struct mmc_command cmd = {};
837  	int err;
838  
839  	cmd.opcode = card->ext_csd.hpi_cmd;
840  	cmd.arg = card->rca << 16 | 1;
841  	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
842  
843  	if (cmd.opcode == MMC_STOP_TRANSMISSION)
844  		use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd,
845  						    busy_timeout_ms);
846  
847  	err = mmc_wait_for_cmd(host, &cmd, 0);
848  	if (err) {
849  		pr_warn("%s: HPI error %d. Command response %#x\n",
850  			mmc_hostname(host), err, cmd.resp[0]);
851  		return err;
852  	}
853  
854  	/* No need to poll when using HW busy detection. */
855  	if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp)
856  		return 0;
857  
858  	/* Let's poll to find out when the HPI request completes. */
859  	return mmc_poll_for_busy(card, busy_timeout_ms, false, MMC_BUSY_HPI);
860  }
861  
862  /**
863   *	mmc_interrupt_hpi - Issue for High priority Interrupt
864   *	@card: the MMC card associated with the HPI transfer
865   *
866   *	Issued High Priority Interrupt, and check for card status
867   *	until out-of prg-state.
868   */
mmc_interrupt_hpi(struct mmc_card * card)869  static int mmc_interrupt_hpi(struct mmc_card *card)
870  {
871  	int err;
872  	u32 status;
873  
874  	if (!card->ext_csd.hpi_en) {
875  		pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
876  		return 1;
877  	}
878  
879  	err = mmc_send_status(card, &status);
880  	if (err) {
881  		pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
882  		goto out;
883  	}
884  
885  	switch (R1_CURRENT_STATE(status)) {
886  	case R1_STATE_IDLE:
887  	case R1_STATE_READY:
888  	case R1_STATE_STBY:
889  	case R1_STATE_TRAN:
890  		/*
891  		 * In idle and transfer states, HPI is not needed and the caller
892  		 * can issue the next intended command immediately
893  		 */
894  		goto out;
895  	case R1_STATE_PRG:
896  		break;
897  	default:
898  		/* In all other states, it's illegal to issue HPI */
899  		pr_debug("%s: HPI cannot be sent. Card state=%d\n",
900  			mmc_hostname(card->host), R1_CURRENT_STATE(status));
901  		err = -EINVAL;
902  		goto out;
903  	}
904  
905  	err = mmc_send_hpi_cmd(card);
906  out:
907  	return err;
908  }
909  
mmc_can_ext_csd(struct mmc_card * card)910  int mmc_can_ext_csd(struct mmc_card *card)
911  {
912  	return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
913  }
914  
mmc_read_bkops_status(struct mmc_card * card)915  static int mmc_read_bkops_status(struct mmc_card *card)
916  {
917  	int err;
918  	u8 *ext_csd;
919  
920  	err = mmc_get_ext_csd(card, &ext_csd);
921  	if (err)
922  		return err;
923  
924  	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
925  	card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
926  	kfree(ext_csd);
927  	return 0;
928  }
929  
930  /**
931   *	mmc_run_bkops - Run BKOPS for supported cards
932   *	@card: MMC card to run BKOPS for
933   *
934   *	Run background operations synchronously for cards having manual BKOPS
935   *	enabled and in case it reports urgent BKOPS level.
936  */
mmc_run_bkops(struct mmc_card * card)937  void mmc_run_bkops(struct mmc_card *card)
938  {
939  	int err;
940  
941  	if (!card->ext_csd.man_bkops_en)
942  		return;
943  
944  	err = mmc_read_bkops_status(card);
945  	if (err) {
946  		pr_err("%s: Failed to read bkops status: %d\n",
947  		       mmc_hostname(card->host), err);
948  		return;
949  	}
950  
951  	if (!card->ext_csd.raw_bkops_status ||
952  	    card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2)
953  		return;
954  
955  	mmc_retune_hold(card->host);
956  
957  	/*
958  	 * For urgent BKOPS status, LEVEL_2 and higher, let's execute
959  	 * synchronously. Future wise, we may consider to start BKOPS, for less
960  	 * urgent levels by using an asynchronous background task, when idle.
961  	 */
962  	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
963  			 EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS);
964  	/*
965  	 * If the BKOPS timed out, the card is probably still busy in the
966  	 * R1_STATE_PRG. Rather than continue to wait, let's try to abort
967  	 * it with a HPI command to get back into R1_STATE_TRAN.
968  	 */
969  	if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
970  		pr_warn("%s: BKOPS aborted\n", mmc_hostname(card->host));
971  	else if (err)
972  		pr_warn("%s: Error %d running bkops\n",
973  			mmc_hostname(card->host), err);
974  
975  	mmc_retune_release(card->host);
976  }
977  EXPORT_SYMBOL(mmc_run_bkops);
978  
mmc_cmdq_switch(struct mmc_card * card,bool enable)979  static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
980  {
981  	u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
982  	int err;
983  
984  	if (!card->ext_csd.cmdq_support)
985  		return -EOPNOTSUPP;
986  
987  	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
988  			 val, card->ext_csd.generic_cmd6_time);
989  	if (!err)
990  		card->ext_csd.cmdq_en = enable;
991  
992  	return err;
993  }
994  
mmc_cmdq_enable(struct mmc_card * card)995  int mmc_cmdq_enable(struct mmc_card *card)
996  {
997  	return mmc_cmdq_switch(card, true);
998  }
999  EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
1000  
mmc_cmdq_disable(struct mmc_card * card)1001  int mmc_cmdq_disable(struct mmc_card *card)
1002  {
1003  	return mmc_cmdq_switch(card, false);
1004  }
1005  EXPORT_SYMBOL_GPL(mmc_cmdq_disable);
1006  
mmc_sanitize(struct mmc_card * card,unsigned int timeout_ms)1007  int mmc_sanitize(struct mmc_card *card, unsigned int timeout_ms)
1008  {
1009  	struct mmc_host *host = card->host;
1010  	int err;
1011  
1012  	if (!mmc_can_sanitize(card)) {
1013  		pr_warn("%s: Sanitize not supported\n", mmc_hostname(host));
1014  		return -EOPNOTSUPP;
1015  	}
1016  
1017  	if (!timeout_ms)
1018  		timeout_ms = MMC_SANITIZE_TIMEOUT_MS;
1019  
1020  	pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host));
1021  
1022  	mmc_retune_hold(host);
1023  
1024  	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START,
1025  			   1, timeout_ms, 0, true, false, 0);
1026  	if (err)
1027  		pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host), err);
1028  
1029  	/*
1030  	 * If the sanitize operation timed out, the card is probably still busy
1031  	 * in the R1_STATE_PRG. Rather than continue to wait, let's try to abort
1032  	 * it with a HPI command to get back into R1_STATE_TRAN.
1033  	 */
1034  	if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
1035  		pr_warn("%s: Sanitize aborted\n", mmc_hostname(host));
1036  
1037  	mmc_retune_release(host);
1038  
1039  	pr_debug("%s: Sanitize completed\n", mmc_hostname(host));
1040  	return err;
1041  }
1042  EXPORT_SYMBOL_GPL(mmc_sanitize);
1043