• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /*
2   *  linux/drivers/mmc/core/core.c
3   *
4   *  Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5   *  SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6   *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7   *  MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
8   *
9   * This program is free software; you can redistribute it and/or modify
10   * it under the terms of the GNU General Public License version 2 as
11   * published by the Free Software Foundation.
12   */
13  #include <linux/module.h>
14  #include <linux/init.h>
15  #include <linux/interrupt.h>
16  #include <linux/completion.h>
17  #include <linux/device.h>
18  #include <linux/delay.h>
19  #include <linux/pagemap.h>
20  #include <linux/err.h>
21  #include <linux/leds.h>
22  #include <linux/scatterlist.h>
23  #include <linux/log2.h>
24  #include <linux/regulator/consumer.h>
25  #include <linux/pm_runtime.h>
26  #include <linux/pm_wakeup.h>
27  #include <linux/suspend.h>
28  #include <linux/fault-inject.h>
29  #include <linux/random.h>
30  #include <linux/slab.h>
31  #include <linux/of.h>
32  
33  #include <linux/mmc/card.h>
34  #include <linux/mmc/host.h>
35  #include <linux/mmc/mmc.h>
36  #include <linux/mmc/sd.h>
37  #include <linux/mmc/slot-gpio.h>
38  
39  #define CREATE_TRACE_POINTS
40  #include <trace/events/mmc.h>
41  
42  #include "core.h"
43  #include "card.h"
44  #include "bus.h"
45  #include "host.h"
46  #include "sdio_bus.h"
47  #include "pwrseq.h"
48  
49  #include "mmc_ops.h"
50  #include "sd_ops.h"
51  #include "sdio_ops.h"
52  
53  /* If the device is not responding */
54  #define MMC_CORE_TIMEOUT_MS	(10 * 60 * 1000) /* 10 minute timeout */
55  
56  /* The max erase timeout, used when host->max_busy_timeout isn't specified */
57  #define MMC_ERASE_TIMEOUT_MS	(60 * 1000) /* 60 s */
58  
59  static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
60  
61  /*
62   * Enabling software CRCs on the data blocks can be a significant (30%)
63   * performance cost, and for other reasons may not always be desired.
64   * So we allow it it to be disabled.
65   */
66  bool use_spi_crc = 1;
67  module_param(use_spi_crc, bool, 0);
68  
mmc_schedule_delayed_work(struct delayed_work * work,unsigned long delay)69  static int mmc_schedule_delayed_work(struct delayed_work *work,
70  				     unsigned long delay)
71  {
72  	/*
73  	 * We use the system_freezable_wq, because of two reasons.
74  	 * First, it allows several works (not the same work item) to be
75  	 * executed simultaneously. Second, the queue becomes frozen when
76  	 * userspace becomes frozen during system PM.
77  	 */
78  	return queue_delayed_work(system_freezable_wq, work, delay);
79  }
80  
81  #ifdef CONFIG_FAIL_MMC_REQUEST
82  
83  /*
84   * Internal function. Inject random data errors.
85   * If mmc_data is NULL no errors are injected.
86   */
mmc_should_fail_request(struct mmc_host * host,struct mmc_request * mrq)87  static void mmc_should_fail_request(struct mmc_host *host,
88  				    struct mmc_request *mrq)
89  {
90  	struct mmc_command *cmd = mrq->cmd;
91  	struct mmc_data *data = mrq->data;
92  	static const int data_errors[] = {
93  		-ETIMEDOUT,
94  		-EILSEQ,
95  		-EIO,
96  	};
97  
98  	if (!data)
99  		return;
100  
101  	if (cmd->error || data->error ||
102  	    !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
103  		return;
104  
105  	data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
106  	data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
107  }
108  
109  #else /* CONFIG_FAIL_MMC_REQUEST */
110  
mmc_should_fail_request(struct mmc_host * host,struct mmc_request * mrq)111  static inline void mmc_should_fail_request(struct mmc_host *host,
112  					   struct mmc_request *mrq)
113  {
114  }
115  
116  #endif /* CONFIG_FAIL_MMC_REQUEST */
117  
mmc_complete_cmd(struct mmc_request * mrq)118  static inline void mmc_complete_cmd(struct mmc_request *mrq)
119  {
120  	if (mrq->cap_cmd_during_tfr && !completion_done(&mrq->cmd_completion))
121  		complete_all(&mrq->cmd_completion);
122  }
123  
mmc_command_done(struct mmc_host * host,struct mmc_request * mrq)124  void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq)
125  {
126  	if (!mrq->cap_cmd_during_tfr)
127  		return;
128  
129  	mmc_complete_cmd(mrq);
130  
131  	pr_debug("%s: cmd done, tfr ongoing (CMD%u)\n",
132  		 mmc_hostname(host), mrq->cmd->opcode);
133  }
134  EXPORT_SYMBOL(mmc_command_done);
135  
136  /**
137   *	mmc_request_done - finish processing an MMC request
138   *	@host: MMC host which completed request
139   *	@mrq: MMC request which request
140   *
141   *	MMC drivers should call this function when they have completed
142   *	their processing of a request.
143   */
mmc_request_done(struct mmc_host * host,struct mmc_request * mrq)144  void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
145  {
146  	struct mmc_command *cmd = mrq->cmd;
147  	int err = cmd->error;
148  
149  	/* Flag re-tuning needed on CRC errors */
150  	if ((cmd->opcode != MMC_SEND_TUNING_BLOCK &&
151  	    cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
152  	    (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
153  	    (mrq->data && mrq->data->error == -EILSEQ) ||
154  	    (mrq->stop && mrq->stop->error == -EILSEQ)))
155  		mmc_retune_needed(host);
156  
157  	if (err && cmd->retries && mmc_host_is_spi(host)) {
158  		if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
159  			cmd->retries = 0;
160  	}
161  
162  	if (host->ongoing_mrq == mrq)
163  		host->ongoing_mrq = NULL;
164  
165  	mmc_complete_cmd(mrq);
166  
167  	trace_mmc_request_done(host, mrq);
168  
169  	/*
170  	 * We list various conditions for the command to be considered
171  	 * properly done:
172  	 *
173  	 * - There was no error, OK fine then
174  	 * - We are not doing some kind of retry
175  	 * - The card was removed (...so just complete everything no matter
176  	 *   if there are errors or retries)
177  	 */
178  	if (!err || !cmd->retries || mmc_card_removed(host->card)) {
179  		mmc_should_fail_request(host, mrq);
180  
181  		if (!host->ongoing_mrq)
182  			led_trigger_event(host->led, LED_OFF);
183  
184  		if (mrq->sbc) {
185  			pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
186  				mmc_hostname(host), mrq->sbc->opcode,
187  				mrq->sbc->error,
188  				mrq->sbc->resp[0], mrq->sbc->resp[1],
189  				mrq->sbc->resp[2], mrq->sbc->resp[3]);
190  		}
191  
192  		pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
193  			mmc_hostname(host), cmd->opcode, err,
194  			cmd->resp[0], cmd->resp[1],
195  			cmd->resp[2], cmd->resp[3]);
196  
197  		if (mrq->data) {
198  			pr_debug("%s:     %d bytes transferred: %d\n",
199  				mmc_hostname(host),
200  				mrq->data->bytes_xfered, mrq->data->error);
201  		}
202  
203  		if (mrq->stop) {
204  			pr_debug("%s:     (CMD%u): %d: %08x %08x %08x %08x\n",
205  				mmc_hostname(host), mrq->stop->opcode,
206  				mrq->stop->error,
207  				mrq->stop->resp[0], mrq->stop->resp[1],
208  				mrq->stop->resp[2], mrq->stop->resp[3]);
209  		}
210  	}
211  	/*
212  	 * Request starter must handle retries - see
213  	 * mmc_wait_for_req_done().
214  	 */
215  	if (mrq->done)
216  		mrq->done(mrq);
217  }
218  
219  EXPORT_SYMBOL(mmc_request_done);
220  
__mmc_start_request(struct mmc_host * host,struct mmc_request * mrq)221  static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
222  {
223  	int err;
224  
225  	/* Assumes host controller has been runtime resumed by mmc_claim_host */
226  	err = mmc_retune(host);
227  	if (err) {
228  		mrq->cmd->error = err;
229  		mmc_request_done(host, mrq);
230  		return;
231  	}
232  
233  	/*
234  	 * For sdio rw commands we must wait for card busy otherwise some
235  	 * sdio devices won't work properly.
236  	 * And bypass I/O abort, reset and bus suspend operations.
237  	 */
238  	if (sdio_is_io_busy(mrq->cmd->opcode, mrq->cmd->arg) &&
239  	    host->ops->card_busy) {
240  		int tries = 500; /* Wait aprox 500ms at maximum */
241  
242  		while (host->ops->card_busy(host) && --tries)
243  			mmc_delay(1);
244  
245  		if (tries == 0) {
246  			mrq->cmd->error = -EBUSY;
247  			mmc_request_done(host, mrq);
248  			return;
249  		}
250  	}
251  
252  	if (mrq->cap_cmd_during_tfr) {
253  		host->ongoing_mrq = mrq;
254  		/*
255  		 * Retry path could come through here without having waiting on
256  		 * cmd_completion, so ensure it is reinitialised.
257  		 */
258  		reinit_completion(&mrq->cmd_completion);
259  	}
260  
261  	trace_mmc_request_start(host, mrq);
262  
263  	if (host->cqe_on)
264  		host->cqe_ops->cqe_off(host);
265  
266  	host->ops->request(host, mrq);
267  }
268  
mmc_mrq_pr_debug(struct mmc_host * host,struct mmc_request * mrq)269  static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq)
270  {
271  	if (mrq->sbc) {
272  		pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
273  			 mmc_hostname(host), mrq->sbc->opcode,
274  			 mrq->sbc->arg, mrq->sbc->flags);
275  	}
276  
277  	if (mrq->cmd) {
278  		pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
279  			 mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->arg,
280  			 mrq->cmd->flags);
281  	}
282  
283  	if (mrq->data) {
284  		pr_debug("%s:     blksz %d blocks %d flags %08x "
285  			"tsac %d ms nsac %d\n",
286  			mmc_hostname(host), mrq->data->blksz,
287  			mrq->data->blocks, mrq->data->flags,
288  			mrq->data->timeout_ns / 1000000,
289  			mrq->data->timeout_clks);
290  	}
291  
292  	if (mrq->stop) {
293  		pr_debug("%s:     CMD%u arg %08x flags %08x\n",
294  			 mmc_hostname(host), mrq->stop->opcode,
295  			 mrq->stop->arg, mrq->stop->flags);
296  	}
297  }
298  
mmc_mrq_prep(struct mmc_host * host,struct mmc_request * mrq)299  static int mmc_mrq_prep(struct mmc_host *host, struct mmc_request *mrq)
300  {
301  	unsigned int i, sz = 0;
302  	struct scatterlist *sg;
303  
304  	if (mrq->cmd) {
305  		mrq->cmd->error = 0;
306  		mrq->cmd->mrq = mrq;
307  		mrq->cmd->data = mrq->data;
308  	}
309  	if (mrq->sbc) {
310  		mrq->sbc->error = 0;
311  		mrq->sbc->mrq = mrq;
312  	}
313  	if (mrq->data) {
314  		if (mrq->data->blksz > host->max_blk_size ||
315  		    mrq->data->blocks > host->max_blk_count ||
316  		    mrq->data->blocks * mrq->data->blksz > host->max_req_size)
317  			return -EINVAL;
318  
319  		for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
320  			sz += sg->length;
321  		if (sz != mrq->data->blocks * mrq->data->blksz)
322  			return -EINVAL;
323  
324  		mrq->data->error = 0;
325  		mrq->data->mrq = mrq;
326  		if (mrq->stop) {
327  			mrq->data->stop = mrq->stop;
328  			mrq->stop->error = 0;
329  			mrq->stop->mrq = mrq;
330  		}
331  	}
332  
333  	return 0;
334  }
335  
mmc_start_request(struct mmc_host * host,struct mmc_request * mrq)336  static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
337  {
338  	int err;
339  
340  	mmc_retune_hold(host);
341  
342  	if (mmc_card_removed(host->card))
343  		return -ENOMEDIUM;
344  
345  	mmc_mrq_pr_debug(host, mrq);
346  
347  	WARN_ON(!host->claimed);
348  
349  	err = mmc_mrq_prep(host, mrq);
350  	if (err)
351  		return err;
352  
353  	led_trigger_event(host->led, LED_FULL);
354  	__mmc_start_request(host, mrq);
355  
356  	return 0;
357  }
358  
359  /*
360   * mmc_wait_data_done() - done callback for data request
361   * @mrq: done data request
362   *
363   * Wakes up mmc context, passed as a callback to host controller driver
364   */
mmc_wait_data_done(struct mmc_request * mrq)365  static void mmc_wait_data_done(struct mmc_request *mrq)
366  {
367  	struct mmc_context_info *context_info = &mrq->host->context_info;
368  
369  	context_info->is_done_rcv = true;
370  	wake_up_interruptible(&context_info->wait);
371  }
372  
mmc_wait_done(struct mmc_request * mrq)373  static void mmc_wait_done(struct mmc_request *mrq)
374  {
375  	complete(&mrq->completion);
376  }
377  
mmc_wait_ongoing_tfr_cmd(struct mmc_host * host)378  static inline void mmc_wait_ongoing_tfr_cmd(struct mmc_host *host)
379  {
380  	struct mmc_request *ongoing_mrq = READ_ONCE(host->ongoing_mrq);
381  
382  	/*
383  	 * If there is an ongoing transfer, wait for the command line to become
384  	 * available.
385  	 */
386  	if (ongoing_mrq && !completion_done(&ongoing_mrq->cmd_completion))
387  		wait_for_completion(&ongoing_mrq->cmd_completion);
388  }
389  
390  /*
391   *__mmc_start_data_req() - starts data request
392   * @host: MMC host to start the request
393   * @mrq: data request to start
394   *
395   * Sets the done callback to be called when request is completed by the card.
396   * Starts data mmc request execution
397   * If an ongoing transfer is already in progress, wait for the command line
398   * to become available before sending another command.
399   */
__mmc_start_data_req(struct mmc_host * host,struct mmc_request * mrq)400  static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
401  {
402  	int err;
403  
404  	mmc_wait_ongoing_tfr_cmd(host);
405  
406  	mrq->done = mmc_wait_data_done;
407  	mrq->host = host;
408  
409  	init_completion(&mrq->cmd_completion);
410  
411  	err = mmc_start_request(host, mrq);
412  	if (err) {
413  		mrq->cmd->error = err;
414  		mmc_complete_cmd(mrq);
415  		mmc_wait_data_done(mrq);
416  	}
417  
418  	return err;
419  }
420  
__mmc_start_req(struct mmc_host * host,struct mmc_request * mrq)421  static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
422  {
423  	int err;
424  
425  	mmc_wait_ongoing_tfr_cmd(host);
426  
427  	init_completion(&mrq->completion);
428  	mrq->done = mmc_wait_done;
429  
430  	init_completion(&mrq->cmd_completion);
431  
432  	err = mmc_start_request(host, mrq);
433  	if (err) {
434  		mrq->cmd->error = err;
435  		mmc_complete_cmd(mrq);
436  		complete(&mrq->completion);
437  	}
438  
439  	return err;
440  }
441  
mmc_wait_for_req_done(struct mmc_host * host,struct mmc_request * mrq)442  void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
443  {
444  	struct mmc_command *cmd;
445  
446  	while (1) {
447  		wait_for_completion(&mrq->completion);
448  
449  		cmd = mrq->cmd;
450  
451  		/*
452  		 * If host has timed out waiting for the sanitize
453  		 * to complete, card might be still in programming state
454  		 * so let's try to bring the card out of programming
455  		 * state.
456  		 */
457  		if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) {
458  			if (!mmc_interrupt_hpi(host->card)) {
459  				pr_warn("%s: %s: Interrupted sanitize\n",
460  					mmc_hostname(host), __func__);
461  				cmd->error = 0;
462  				break;
463  			} else {
464  				pr_err("%s: %s: Failed to interrupt sanitize\n",
465  				       mmc_hostname(host), __func__);
466  			}
467  		}
468  		if (!cmd->error || !cmd->retries ||
469  		    mmc_card_removed(host->card))
470  			break;
471  
472  		mmc_retune_recheck(host);
473  
474  		pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
475  			 mmc_hostname(host), cmd->opcode, cmd->error);
476  		cmd->retries--;
477  		cmd->error = 0;
478  		__mmc_start_request(host, mrq);
479  	}
480  
481  	mmc_retune_release(host);
482  }
483  EXPORT_SYMBOL(mmc_wait_for_req_done);
484  
485  /**
486   *	mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done
487   *	@host: MMC host
488   *	@mrq: MMC request
489   *
490   *	mmc_is_req_done() is used with requests that have
491   *	mrq->cap_cmd_during_tfr = true. mmc_is_req_done() must be called after
492   *	starting a request and before waiting for it to complete. That is,
493   *	either in between calls to mmc_start_req(), or after mmc_wait_for_req()
494   *	and before mmc_wait_for_req_done(). If it is called at other times the
495   *	result is not meaningful.
496   */
mmc_is_req_done(struct mmc_host * host,struct mmc_request * mrq)497  bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq)
498  {
499  	if (host->areq)
500  		return host->context_info.is_done_rcv;
501  	else
502  		return completion_done(&mrq->completion);
503  }
504  EXPORT_SYMBOL(mmc_is_req_done);
505  
506  /**
507   *	mmc_pre_req - Prepare for a new request
508   *	@host: MMC host to prepare command
509   *	@mrq: MMC request to prepare for
510   *
511   *	mmc_pre_req() is called in prior to mmc_start_req() to let
512   *	host prepare for the new request. Preparation of a request may be
513   *	performed while another request is running on the host.
514   */
mmc_pre_req(struct mmc_host * host,struct mmc_request * mrq)515  static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq)
516  {
517  	if (host->ops->pre_req)
518  		host->ops->pre_req(host, mrq);
519  }
520  
521  /**
522   *	mmc_post_req - Post process a completed request
523   *	@host: MMC host to post process command
524   *	@mrq: MMC request to post process for
525   *	@err: Error, if non zero, clean up any resources made in pre_req
526   *
527   *	Let the host post process a completed request. Post processing of
528   *	a request may be performed while another reuqest is running.
529   */
mmc_post_req(struct mmc_host * host,struct mmc_request * mrq,int err)530  static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
531  			 int err)
532  {
533  	if (host->ops->post_req)
534  		host->ops->post_req(host, mrq, err);
535  }
536  
537  /**
538   * mmc_finalize_areq() - finalize an asynchronous request
539   * @host: MMC host to finalize any ongoing request on
540   *
541   * Returns the status of the ongoing asynchronous request, but
542   * MMC_BLK_SUCCESS if no request was going on.
543   */
mmc_finalize_areq(struct mmc_host * host)544  static enum mmc_blk_status mmc_finalize_areq(struct mmc_host *host)
545  {
546  	struct mmc_context_info *context_info = &host->context_info;
547  	enum mmc_blk_status status;
548  
549  	if (!host->areq)
550  		return MMC_BLK_SUCCESS;
551  
552  	while (1) {
553  		wait_event_interruptible(context_info->wait,
554  				(context_info->is_done_rcv ||
555  				 context_info->is_new_req));
556  
557  		if (context_info->is_done_rcv) {
558  			struct mmc_command *cmd;
559  
560  			context_info->is_done_rcv = false;
561  			cmd = host->areq->mrq->cmd;
562  
563  			if (!cmd->error || !cmd->retries ||
564  			    mmc_card_removed(host->card)) {
565  				status = host->areq->err_check(host->card,
566  							       host->areq);
567  				break; /* return status */
568  			} else {
569  				mmc_retune_recheck(host);
570  				pr_info("%s: req failed (CMD%u): %d, retrying...\n",
571  					mmc_hostname(host),
572  					cmd->opcode, cmd->error);
573  				cmd->retries--;
574  				cmd->error = 0;
575  				__mmc_start_request(host, host->areq->mrq);
576  				continue; /* wait for done/new event again */
577  			}
578  		}
579  
580  		return MMC_BLK_NEW_REQUEST;
581  	}
582  
583  	mmc_retune_release(host);
584  
585  	/*
586  	 * Check BKOPS urgency for each R1 response
587  	 */
588  	if (host->card && mmc_card_mmc(host->card) &&
589  	    ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
590  	     (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
591  	    (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) {
592  		mmc_start_bkops(host->card, true);
593  	}
594  
595  	return status;
596  }
597  
598  /**
599   *	mmc_start_areq - start an asynchronous request
600   *	@host: MMC host to start command
601   *	@areq: asynchronous request to start
602   *	@ret_stat: out parameter for status
603   *
604   *	Start a new MMC custom command request for a host.
605   *	If there is on ongoing async request wait for completion
606   *	of that request and start the new one and return.
607   *	Does not wait for the new request to complete.
608   *
609   *      Returns the completed request, NULL in case of none completed.
610   *	Wait for the an ongoing request (previoulsy started) to complete and
611   *	return the completed request. If there is no ongoing request, NULL
612   *	is returned without waiting. NULL is not an error condition.
613   */
mmc_start_areq(struct mmc_host * host,struct mmc_async_req * areq,enum mmc_blk_status * ret_stat)614  struct mmc_async_req *mmc_start_areq(struct mmc_host *host,
615  				     struct mmc_async_req *areq,
616  				     enum mmc_blk_status *ret_stat)
617  {
618  	enum mmc_blk_status status;
619  	int start_err = 0;
620  	struct mmc_async_req *previous = host->areq;
621  
622  	/* Prepare a new request */
623  	if (areq)
624  		mmc_pre_req(host, areq->mrq);
625  
626  	/* Finalize previous request */
627  	status = mmc_finalize_areq(host);
628  	if (ret_stat)
629  		*ret_stat = status;
630  
631  	/* The previous request is still going on... */
632  	if (status == MMC_BLK_NEW_REQUEST)
633  		return NULL;
634  
635  	/* Fine so far, start the new request! */
636  	if (status == MMC_BLK_SUCCESS && areq)
637  		start_err = __mmc_start_data_req(host, areq->mrq);
638  
639  	/* Postprocess the old request at this point */
640  	if (host->areq)
641  		mmc_post_req(host, host->areq->mrq, 0);
642  
643  	/* Cancel a prepared request if it was not started. */
644  	if ((status != MMC_BLK_SUCCESS || start_err) && areq)
645  		mmc_post_req(host, areq->mrq, -EINVAL);
646  
647  	if (status != MMC_BLK_SUCCESS)
648  		host->areq = NULL;
649  	else
650  		host->areq = areq;
651  
652  	return previous;
653  }
654  EXPORT_SYMBOL(mmc_start_areq);
655  
656  /**
657   *	mmc_wait_for_req - start a request and wait for completion
658   *	@host: MMC host to start command
659   *	@mrq: MMC request to start
660   *
661   *	Start a new MMC custom command request for a host, and wait
662   *	for the command to complete. In the case of 'cap_cmd_during_tfr'
663   *	requests, the transfer is ongoing and the caller can issue further
664   *	commands that do not use the data lines, and then wait by calling
665   *	mmc_wait_for_req_done().
666   *	Does not attempt to parse the response.
667   */
mmc_wait_for_req(struct mmc_host * host,struct mmc_request * mrq)668  void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
669  {
670  	__mmc_start_req(host, mrq);
671  
672  	if (!mrq->cap_cmd_during_tfr)
673  		mmc_wait_for_req_done(host, mrq);
674  }
675  EXPORT_SYMBOL(mmc_wait_for_req);
676  
677  /**
678   *	mmc_wait_for_cmd - start a command and wait for completion
679   *	@host: MMC host to start command
680   *	@cmd: MMC command to start
681   *	@retries: maximum number of retries
682   *
683   *	Start a new MMC command for a host, and wait for the command
684   *	to complete.  Return any error that occurred while the command
685   *	was executing.  Do not attempt to parse the response.
686   */
mmc_wait_for_cmd(struct mmc_host * host,struct mmc_command * cmd,int retries)687  int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
688  {
689  	struct mmc_request mrq = {};
690  
691  	WARN_ON(!host->claimed);
692  
693  	memset(cmd->resp, 0, sizeof(cmd->resp));
694  	cmd->retries = retries;
695  
696  	mrq.cmd = cmd;
697  	cmd->data = NULL;
698  
699  	mmc_wait_for_req(host, &mrq);
700  
701  	return cmd->error;
702  }
703  
704  EXPORT_SYMBOL(mmc_wait_for_cmd);
705  
706  /**
707   *	mmc_set_data_timeout - set the timeout for a data command
708   *	@data: data phase for command
709   *	@card: the MMC card associated with the data transfer
710   *
711   *	Computes the data timeout parameters according to the
712   *	correct algorithm given the card type.
713   */
mmc_set_data_timeout(struct mmc_data * data,const struct mmc_card * card)714  void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
715  {
716  	unsigned int mult;
717  
718  	/*
719  	 * SDIO cards only define an upper 1 s limit on access.
720  	 */
721  	if (mmc_card_sdio(card)) {
722  		data->timeout_ns = 1000000000;
723  		data->timeout_clks = 0;
724  		return;
725  	}
726  
727  	/*
728  	 * SD cards use a 100 multiplier rather than 10
729  	 */
730  	mult = mmc_card_sd(card) ? 100 : 10;
731  
732  	/*
733  	 * Scale up the multiplier (and therefore the timeout) by
734  	 * the r2w factor for writes.
735  	 */
736  	if (data->flags & MMC_DATA_WRITE)
737  		mult <<= card->csd.r2w_factor;
738  
739  	data->timeout_ns = card->csd.taac_ns * mult;
740  	data->timeout_clks = card->csd.taac_clks * mult;
741  
742  	/*
743  	 * SD cards also have an upper limit on the timeout.
744  	 */
745  	if (mmc_card_sd(card)) {
746  		unsigned int timeout_us, limit_us;
747  
748  		timeout_us = data->timeout_ns / 1000;
749  		if (card->host->ios.clock)
750  			timeout_us += data->timeout_clks * 1000 /
751  				(card->host->ios.clock / 1000);
752  
753  		if (data->flags & MMC_DATA_WRITE)
754  			/*
755  			 * The MMC spec "It is strongly recommended
756  			 * for hosts to implement more than 500ms
757  			 * timeout value even if the card indicates
758  			 * the 250ms maximum busy length."  Even the
759  			 * previous value of 300ms is known to be
760  			 * insufficient for some cards.
761  			 */
762  			limit_us = 3000000;
763  		else
764  			limit_us = 100000;
765  
766  		/*
767  		 * SDHC cards always use these fixed values.
768  		 */
769  		if (timeout_us > limit_us) {
770  			data->timeout_ns = limit_us * 1000;
771  			data->timeout_clks = 0;
772  		}
773  
774  		/* assign limit value if invalid */
775  		if (timeout_us == 0)
776  			data->timeout_ns = limit_us * 1000;
777  	}
778  
779  	/*
780  	 * Some cards require longer data read timeout than indicated in CSD.
781  	 * Address this by setting the read timeout to a "reasonably high"
782  	 * value. For the cards tested, 600ms has proven enough. If necessary,
783  	 * this value can be increased if other problematic cards require this.
784  	 */
785  	if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
786  		data->timeout_ns = 600000000;
787  		data->timeout_clks = 0;
788  	}
789  
790  	/*
791  	 * Some cards need very high timeouts if driven in SPI mode.
792  	 * The worst observed timeout was 900ms after writing a
793  	 * continuous stream of data until the internal logic
794  	 * overflowed.
795  	 */
796  	if (mmc_host_is_spi(card->host)) {
797  		if (data->flags & MMC_DATA_WRITE) {
798  			if (data->timeout_ns < 1000000000)
799  				data->timeout_ns = 1000000000;	/* 1s */
800  		} else {
801  			if (data->timeout_ns < 100000000)
802  				data->timeout_ns =  100000000;	/* 100ms */
803  		}
804  	}
805  }
806  EXPORT_SYMBOL(mmc_set_data_timeout);
807  
808  /**
809   *	mmc_align_data_size - pads a transfer size to a more optimal value
810   *	@card: the MMC card associated with the data transfer
811   *	@sz: original transfer size
812   *
813   *	Pads the original data size with a number of extra bytes in
814   *	order to avoid controller bugs and/or performance hits
815   *	(e.g. some controllers revert to PIO for certain sizes).
816   *
817   *	Returns the improved size, which might be unmodified.
818   *
819   *	Note that this function is only relevant when issuing a
820   *	single scatter gather entry.
821   */
mmc_align_data_size(struct mmc_card * card,unsigned int sz)822  unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
823  {
824  	/*
825  	 * FIXME: We don't have a system for the controller to tell
826  	 * the core about its problems yet, so for now we just 32-bit
827  	 * align the size.
828  	 */
829  	sz = ((sz + 3) / 4) * 4;
830  
831  	return sz;
832  }
833  EXPORT_SYMBOL(mmc_align_data_size);
834  
835  /**
836   *	__mmc_claim_host - exclusively claim a host
837   *	@host: mmc host to claim
838   *	@abort: whether or not the operation should be aborted
839   *
840   *	Claim a host for a set of operations.  If @abort is non null and
841   *	dereference a non-zero value then this will return prematurely with
842   *	that non-zero value without acquiring the lock.  Returns zero
843   *	with the lock held otherwise.
844   */
__mmc_claim_host(struct mmc_host * host,atomic_t * abort)845  int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
846  {
847  	DECLARE_WAITQUEUE(wait, current);
848  	unsigned long flags;
849  	int stop;
850  	bool pm = false;
851  
852  	might_sleep();
853  
854  	add_wait_queue(&host->wq, &wait);
855  	spin_lock_irqsave(&host->lock, flags);
856  	while (1) {
857  		set_current_state(TASK_UNINTERRUPTIBLE);
858  		stop = abort ? atomic_read(abort) : 0;
859  		if (stop || !host->claimed || host->claimer == current)
860  			break;
861  		spin_unlock_irqrestore(&host->lock, flags);
862  		schedule();
863  		spin_lock_irqsave(&host->lock, flags);
864  	}
865  	set_current_state(TASK_RUNNING);
866  	if (!stop) {
867  		host->claimed = 1;
868  		host->claimer = current;
869  		host->claim_cnt += 1;
870  		if (host->claim_cnt == 1)
871  			pm = true;
872  	} else
873  		wake_up(&host->wq);
874  	spin_unlock_irqrestore(&host->lock, flags);
875  	remove_wait_queue(&host->wq, &wait);
876  
877  	if (pm)
878  		pm_runtime_get_sync(mmc_dev(host));
879  
880  	return stop;
881  }
882  EXPORT_SYMBOL(__mmc_claim_host);
883  
884  /**
885   *	mmc_release_host - release a host
886   *	@host: mmc host to release
887   *
888   *	Release a MMC host, allowing others to claim the host
889   *	for their operations.
890   */
mmc_release_host(struct mmc_host * host)891  void mmc_release_host(struct mmc_host *host)
892  {
893  	unsigned long flags;
894  
895  	WARN_ON(!host->claimed);
896  
897  	spin_lock_irqsave(&host->lock, flags);
898  	if (--host->claim_cnt) {
899  		/* Release for nested claim */
900  		spin_unlock_irqrestore(&host->lock, flags);
901  	} else {
902  		host->claimed = 0;
903  		host->claimer = NULL;
904  		spin_unlock_irqrestore(&host->lock, flags);
905  		wake_up(&host->wq);
906  		pm_runtime_mark_last_busy(mmc_dev(host));
907  		pm_runtime_put_autosuspend(mmc_dev(host));
908  	}
909  }
910  EXPORT_SYMBOL(mmc_release_host);
911  
912  /*
913   * This is a helper function, which fetches a runtime pm reference for the
914   * card device and also claims the host.
915   */
mmc_get_card(struct mmc_card * card)916  void mmc_get_card(struct mmc_card *card)
917  {
918  	pm_runtime_get_sync(&card->dev);
919  	mmc_claim_host(card->host);
920  }
921  EXPORT_SYMBOL(mmc_get_card);
922  
923  /*
924   * This is a helper function, which releases the host and drops the runtime
925   * pm reference for the card device.
926   */
mmc_put_card(struct mmc_card * card)927  void mmc_put_card(struct mmc_card *card)
928  {
929  	mmc_release_host(card->host);
930  	pm_runtime_mark_last_busy(&card->dev);
931  	pm_runtime_put_autosuspend(&card->dev);
932  }
933  EXPORT_SYMBOL(mmc_put_card);
934  
935  /*
936   * Internal function that does the actual ios call to the host driver,
937   * optionally printing some debug output.
938   */
mmc_set_ios(struct mmc_host * host)939  static inline void mmc_set_ios(struct mmc_host *host)
940  {
941  	struct mmc_ios *ios = &host->ios;
942  
943  	pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
944  		"width %u timing %u\n",
945  		 mmc_hostname(host), ios->clock, ios->bus_mode,
946  		 ios->power_mode, ios->chip_select, ios->vdd,
947  		 1 << ios->bus_width, ios->timing);
948  
949  	host->ops->set_ios(host, ios);
950  }
951  
952  /*
953   * Control chip select pin on a host.
954   */
mmc_set_chip_select(struct mmc_host * host,int mode)955  void mmc_set_chip_select(struct mmc_host *host, int mode)
956  {
957  	host->ios.chip_select = mode;
958  	mmc_set_ios(host);
959  }
960  
961  /*
962   * Sets the host clock to the highest possible frequency that
963   * is below "hz".
964   */
mmc_set_clock(struct mmc_host * host,unsigned int hz)965  void mmc_set_clock(struct mmc_host *host, unsigned int hz)
966  {
967  	WARN_ON(hz && hz < host->f_min);
968  
969  	if (hz > host->f_max)
970  		hz = host->f_max;
971  
972  	host->ios.clock = hz;
973  	mmc_set_ios(host);
974  }
975  
mmc_execute_tuning(struct mmc_card * card)976  int mmc_execute_tuning(struct mmc_card *card)
977  {
978  	struct mmc_host *host = card->host;
979  	u32 opcode;
980  	int err;
981  
982  	if (!host->ops->execute_tuning)
983  		return 0;
984  
985  	if (host->cqe_on)
986  		host->cqe_ops->cqe_off(host);
987  
988  	if (mmc_card_mmc(card))
989  		opcode = MMC_SEND_TUNING_BLOCK_HS200;
990  	else
991  		opcode = MMC_SEND_TUNING_BLOCK;
992  
993  	err = host->ops->execute_tuning(host, opcode);
994  
995  	if (err)
996  		pr_err("%s: tuning execution failed: %d\n",
997  			mmc_hostname(host), err);
998  	else
999  		mmc_retune_enable(host);
1000  
1001  	return err;
1002  }
1003  
1004  /*
1005   * Change the bus mode (open drain/push-pull) of a host.
1006   */
mmc_set_bus_mode(struct mmc_host * host,unsigned int mode)1007  void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
1008  {
1009  	host->ios.bus_mode = mode;
1010  	mmc_set_ios(host);
1011  }
1012  
1013  /*
1014   * Change data bus width of a host.
1015   */
mmc_set_bus_width(struct mmc_host * host,unsigned int width)1016  void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
1017  {
1018  	host->ios.bus_width = width;
1019  	mmc_set_ios(host);
1020  }
1021  
1022  /*
1023   * Set initial state after a power cycle or a hw_reset.
1024   */
mmc_set_initial_state(struct mmc_host * host)1025  void mmc_set_initial_state(struct mmc_host *host)
1026  {
1027  	if (host->cqe_on)
1028  		host->cqe_ops->cqe_off(host);
1029  
1030  	mmc_retune_disable(host);
1031  
1032  	if (mmc_host_is_spi(host))
1033  		host->ios.chip_select = MMC_CS_HIGH;
1034  	else
1035  		host->ios.chip_select = MMC_CS_DONTCARE;
1036  	host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1037  	host->ios.bus_width = MMC_BUS_WIDTH_1;
1038  	host->ios.timing = MMC_TIMING_LEGACY;
1039  	host->ios.drv_type = 0;
1040  	host->ios.enhanced_strobe = false;
1041  
1042  	/*
1043  	 * Make sure we are in non-enhanced strobe mode before we
1044  	 * actually enable it in ext_csd.
1045  	 */
1046  	if ((host->caps2 & MMC_CAP2_HS400_ES) &&
1047  	     host->ops->hs400_enhanced_strobe)
1048  		host->ops->hs400_enhanced_strobe(host, &host->ios);
1049  
1050  	mmc_set_ios(host);
1051  }
1052  
1053  /**
1054   * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
1055   * @vdd:	voltage (mV)
1056   * @low_bits:	prefer low bits in boundary cases
1057   *
1058   * This function returns the OCR bit number according to the provided @vdd
1059   * value. If conversion is not possible a negative errno value returned.
1060   *
1061   * Depending on the @low_bits flag the function prefers low or high OCR bits
1062   * on boundary voltages. For example,
1063   * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
1064   * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
1065   *
1066   * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
1067   */
mmc_vdd_to_ocrbitnum(int vdd,bool low_bits)1068  static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
1069  {
1070  	const int max_bit = ilog2(MMC_VDD_35_36);
1071  	int bit;
1072  
1073  	if (vdd < 1650 || vdd > 3600)
1074  		return -EINVAL;
1075  
1076  	if (vdd >= 1650 && vdd <= 1950)
1077  		return ilog2(MMC_VDD_165_195);
1078  
1079  	if (low_bits)
1080  		vdd -= 1;
1081  
1082  	/* Base 2000 mV, step 100 mV, bit's base 8. */
1083  	bit = (vdd - 2000) / 100 + 8;
1084  	if (bit > max_bit)
1085  		return max_bit;
1086  	return bit;
1087  }
1088  
1089  /**
1090   * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
1091   * @vdd_min:	minimum voltage value (mV)
1092   * @vdd_max:	maximum voltage value (mV)
1093   *
1094   * This function returns the OCR mask bits according to the provided @vdd_min
1095   * and @vdd_max values. If conversion is not possible the function returns 0.
1096   *
1097   * Notes wrt boundary cases:
1098   * This function sets the OCR bits for all boundary voltages, for example
1099   * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
1100   * MMC_VDD_34_35 mask.
1101   */
mmc_vddrange_to_ocrmask(int vdd_min,int vdd_max)1102  u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
1103  {
1104  	u32 mask = 0;
1105  
1106  	if (vdd_max < vdd_min)
1107  		return 0;
1108  
1109  	/* Prefer high bits for the boundary vdd_max values. */
1110  	vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
1111  	if (vdd_max < 0)
1112  		return 0;
1113  
1114  	/* Prefer low bits for the boundary vdd_min values. */
1115  	vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
1116  	if (vdd_min < 0)
1117  		return 0;
1118  
1119  	/* Fill the mask, from max bit to min bit. */
1120  	while (vdd_max >= vdd_min)
1121  		mask |= 1 << vdd_max--;
1122  
1123  	return mask;
1124  }
1125  EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
1126  
1127  #ifdef CONFIG_OF
1128  
1129  /**
1130   * mmc_of_parse_voltage - return mask of supported voltages
1131   * @np: The device node need to be parsed.
1132   * @mask: mask of voltages available for MMC/SD/SDIO
1133   *
1134   * Parse the "voltage-ranges" DT property, returning zero if it is not
1135   * found, negative errno if the voltage-range specification is invalid,
1136   * or one if the voltage-range is specified and successfully parsed.
1137   */
mmc_of_parse_voltage(struct device_node * np,u32 * mask)1138  int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
1139  {
1140  	const u32 *voltage_ranges;
1141  	int num_ranges, i;
1142  
1143  	voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
1144  	num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
1145  	if (!voltage_ranges) {
1146  		pr_debug("%pOF: voltage-ranges unspecified\n", np);
1147  		return 0;
1148  	}
1149  	if (!num_ranges) {
1150  		pr_err("%pOF: voltage-ranges empty\n", np);
1151  		return -EINVAL;
1152  	}
1153  
1154  	for (i = 0; i < num_ranges; i++) {
1155  		const int j = i * 2;
1156  		u32 ocr_mask;
1157  
1158  		ocr_mask = mmc_vddrange_to_ocrmask(
1159  				be32_to_cpu(voltage_ranges[j]),
1160  				be32_to_cpu(voltage_ranges[j + 1]));
1161  		if (!ocr_mask) {
1162  			pr_err("%pOF: voltage-range #%d is invalid\n",
1163  				np, i);
1164  			return -EINVAL;
1165  		}
1166  		*mask |= ocr_mask;
1167  	}
1168  
1169  	return 1;
1170  }
1171  EXPORT_SYMBOL(mmc_of_parse_voltage);
1172  
1173  #endif /* CONFIG_OF */
1174  
mmc_of_get_func_num(struct device_node * node)1175  static int mmc_of_get_func_num(struct device_node *node)
1176  {
1177  	u32 reg;
1178  	int ret;
1179  
1180  	ret = of_property_read_u32(node, "reg", &reg);
1181  	if (ret < 0)
1182  		return ret;
1183  
1184  	return reg;
1185  }
1186  
mmc_of_find_child_device(struct mmc_host * host,unsigned func_num)1187  struct device_node *mmc_of_find_child_device(struct mmc_host *host,
1188  		unsigned func_num)
1189  {
1190  	struct device_node *node;
1191  
1192  	if (!host->parent || !host->parent->of_node)
1193  		return NULL;
1194  
1195  	for_each_child_of_node(host->parent->of_node, node) {
1196  		if (mmc_of_get_func_num(node) == func_num)
1197  			return node;
1198  	}
1199  
1200  	return NULL;
1201  }
1202  
1203  #ifdef CONFIG_REGULATOR
1204  
1205  /**
1206   * mmc_ocrbitnum_to_vdd - Convert a OCR bit number to its voltage
1207   * @vdd_bit:	OCR bit number
1208   * @min_uV:	minimum voltage value (mV)
1209   * @max_uV:	maximum voltage value (mV)
1210   *
1211   * This function returns the voltage range according to the provided OCR
1212   * bit number. If conversion is not possible a negative errno value returned.
1213   */
mmc_ocrbitnum_to_vdd(int vdd_bit,int * min_uV,int * max_uV)1214  static int mmc_ocrbitnum_to_vdd(int vdd_bit, int *min_uV, int *max_uV)
1215  {
1216  	int		tmp;
1217  
1218  	if (!vdd_bit)
1219  		return -EINVAL;
1220  
1221  	/*
1222  	 * REVISIT mmc_vddrange_to_ocrmask() may have set some
1223  	 * bits this regulator doesn't quite support ... don't
1224  	 * be too picky, most cards and regulators are OK with
1225  	 * a 0.1V range goof (it's a small error percentage).
1226  	 */
1227  	tmp = vdd_bit - ilog2(MMC_VDD_165_195);
1228  	if (tmp == 0) {
1229  		*min_uV = 1650 * 1000;
1230  		*max_uV = 1950 * 1000;
1231  	} else {
1232  		*min_uV = 1900 * 1000 + tmp * 100 * 1000;
1233  		*max_uV = *min_uV + 100 * 1000;
1234  	}
1235  
1236  	return 0;
1237  }
1238  
1239  /**
1240   * mmc_regulator_get_ocrmask - return mask of supported voltages
1241   * @supply: regulator to use
1242   *
1243   * This returns either a negative errno, or a mask of voltages that
1244   * can be provided to MMC/SD/SDIO devices using the specified voltage
1245   * regulator.  This would normally be called before registering the
1246   * MMC host adapter.
1247   */
mmc_regulator_get_ocrmask(struct regulator * supply)1248  int mmc_regulator_get_ocrmask(struct regulator *supply)
1249  {
1250  	int			result = 0;
1251  	int			count;
1252  	int			i;
1253  	int			vdd_uV;
1254  	int			vdd_mV;
1255  
1256  	count = regulator_count_voltages(supply);
1257  	if (count < 0)
1258  		return count;
1259  
1260  	for (i = 0; i < count; i++) {
1261  		vdd_uV = regulator_list_voltage(supply, i);
1262  		if (vdd_uV <= 0)
1263  			continue;
1264  
1265  		vdd_mV = vdd_uV / 1000;
1266  		result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1267  	}
1268  
1269  	if (!result) {
1270  		vdd_uV = regulator_get_voltage(supply);
1271  		if (vdd_uV <= 0)
1272  			return vdd_uV;
1273  
1274  		vdd_mV = vdd_uV / 1000;
1275  		result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1276  	}
1277  
1278  	return result;
1279  }
1280  EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
1281  
1282  /**
1283   * mmc_regulator_set_ocr - set regulator to match host->ios voltage
1284   * @mmc: the host to regulate
1285   * @supply: regulator to use
1286   * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
1287   *
1288   * Returns zero on success, else negative errno.
1289   *
1290   * MMC host drivers may use this to enable or disable a regulator using
1291   * a particular supply voltage.  This would normally be called from the
1292   * set_ios() method.
1293   */
mmc_regulator_set_ocr(struct mmc_host * mmc,struct regulator * supply,unsigned short vdd_bit)1294  int mmc_regulator_set_ocr(struct mmc_host *mmc,
1295  			struct regulator *supply,
1296  			unsigned short vdd_bit)
1297  {
1298  	int			result = 0;
1299  	int			min_uV, max_uV;
1300  
1301  	if (vdd_bit) {
1302  		mmc_ocrbitnum_to_vdd(vdd_bit, &min_uV, &max_uV);
1303  
1304  		result = regulator_set_voltage(supply, min_uV, max_uV);
1305  		if (result == 0 && !mmc->regulator_enabled) {
1306  			result = regulator_enable(supply);
1307  			if (!result)
1308  				mmc->regulator_enabled = true;
1309  		}
1310  	} else if (mmc->regulator_enabled) {
1311  		result = regulator_disable(supply);
1312  		if (result == 0)
1313  			mmc->regulator_enabled = false;
1314  	}
1315  
1316  	if (result)
1317  		dev_err(mmc_dev(mmc),
1318  			"could not set regulator OCR (%d)\n", result);
1319  	return result;
1320  }
1321  EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
1322  
mmc_regulator_set_voltage_if_supported(struct regulator * regulator,int min_uV,int target_uV,int max_uV)1323  static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator,
1324  						  int min_uV, int target_uV,
1325  						  int max_uV)
1326  {
1327  	/*
1328  	 * Check if supported first to avoid errors since we may try several
1329  	 * signal levels during power up and don't want to show errors.
1330  	 */
1331  	if (!regulator_is_supported_voltage(regulator, min_uV, max_uV))
1332  		return -EINVAL;
1333  
1334  	return regulator_set_voltage_triplet(regulator, min_uV, target_uV,
1335  					     max_uV);
1336  }
1337  
1338  /**
1339   * mmc_regulator_set_vqmmc - Set VQMMC as per the ios
1340   *
1341   * For 3.3V signaling, we try to match VQMMC to VMMC as closely as possible.
1342   * That will match the behavior of old boards where VQMMC and VMMC were supplied
1343   * by the same supply.  The Bus Operating conditions for 3.3V signaling in the
1344   * SD card spec also define VQMMC in terms of VMMC.
1345   * If this is not possible we'll try the full 2.7-3.6V of the spec.
1346   *
1347   * For 1.2V and 1.8V signaling we'll try to get as close as possible to the
1348   * requested voltage.  This is definitely a good idea for UHS where there's a
1349   * separate regulator on the card that's trying to make 1.8V and it's best if
1350   * we match.
1351   *
1352   * This function is expected to be used by a controller's
1353   * start_signal_voltage_switch() function.
1354   */
mmc_regulator_set_vqmmc(struct mmc_host * mmc,struct mmc_ios * ios)1355  int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios)
1356  {
1357  	struct device *dev = mmc_dev(mmc);
1358  	int ret, volt, min_uV, max_uV;
1359  
1360  	/* If no vqmmc supply then we can't change the voltage */
1361  	if (IS_ERR(mmc->supply.vqmmc))
1362  		return -EINVAL;
1363  
1364  	switch (ios->signal_voltage) {
1365  	case MMC_SIGNAL_VOLTAGE_120:
1366  		return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1367  						1100000, 1200000, 1300000);
1368  	case MMC_SIGNAL_VOLTAGE_180:
1369  		return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1370  						1700000, 1800000, 1950000);
1371  	case MMC_SIGNAL_VOLTAGE_330:
1372  		ret = mmc_ocrbitnum_to_vdd(mmc->ios.vdd, &volt, &max_uV);
1373  		if (ret < 0)
1374  			return ret;
1375  
1376  		dev_dbg(dev, "%s: found vmmc voltage range of %d-%duV\n",
1377  			__func__, volt, max_uV);
1378  
1379  		min_uV = max(volt - 300000, 2700000);
1380  		max_uV = min(max_uV + 200000, 3600000);
1381  
1382  		/*
1383  		 * Due to a limitation in the current implementation of
1384  		 * regulator_set_voltage_triplet() which is taking the lowest
1385  		 * voltage possible if below the target, search for a suitable
1386  		 * voltage in two steps and try to stay close to vmmc
1387  		 * with a 0.3V tolerance at first.
1388  		 */
1389  		if (!mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1390  						min_uV, volt, max_uV))
1391  			return 0;
1392  
1393  		return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1394  						2700000, volt, 3600000);
1395  	default:
1396  		return -EINVAL;
1397  	}
1398  }
1399  EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc);
1400  
1401  #endif /* CONFIG_REGULATOR */
1402  
mmc_regulator_get_supply(struct mmc_host * mmc)1403  int mmc_regulator_get_supply(struct mmc_host *mmc)
1404  {
1405  	struct device *dev = mmc_dev(mmc);
1406  	int ret;
1407  
1408  	mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
1409  	mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
1410  
1411  	if (IS_ERR(mmc->supply.vmmc)) {
1412  		if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
1413  			return -EPROBE_DEFER;
1414  		dev_dbg(dev, "No vmmc regulator found\n");
1415  	} else {
1416  		ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
1417  		if (ret > 0)
1418  			mmc->ocr_avail = ret;
1419  		else
1420  			dev_warn(dev, "Failed getting OCR mask: %d\n", ret);
1421  	}
1422  
1423  	if (IS_ERR(mmc->supply.vqmmc)) {
1424  		if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
1425  			return -EPROBE_DEFER;
1426  		dev_dbg(dev, "No vqmmc regulator found\n");
1427  	}
1428  
1429  	return 0;
1430  }
1431  EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
1432  
1433  /*
1434   * Mask off any voltages we don't support and select
1435   * the lowest voltage
1436   */
mmc_select_voltage(struct mmc_host * host,u32 ocr)1437  u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1438  {
1439  	int bit;
1440  
1441  	/*
1442  	 * Sanity check the voltages that the card claims to
1443  	 * support.
1444  	 */
1445  	if (ocr & 0x7F) {
1446  		dev_warn(mmc_dev(host),
1447  		"card claims to support voltages below defined range\n");
1448  		ocr &= ~0x7F;
1449  	}
1450  
1451  	ocr &= host->ocr_avail;
1452  	if (!ocr) {
1453  		dev_warn(mmc_dev(host), "no support for card's volts\n");
1454  		return 0;
1455  	}
1456  
1457  	if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
1458  		bit = ffs(ocr) - 1;
1459  		ocr &= 3 << bit;
1460  		mmc_power_cycle(host, ocr);
1461  	} else {
1462  		bit = fls(ocr) - 1;
1463  		ocr &= 3 << bit;
1464  		if (bit != host->ios.vdd)
1465  			dev_warn(mmc_dev(host), "exceeding card's volts\n");
1466  	}
1467  
1468  	return ocr;
1469  }
1470  
mmc_set_signal_voltage(struct mmc_host * host,int signal_voltage)1471  int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
1472  {
1473  	int err = 0;
1474  	int old_signal_voltage = host->ios.signal_voltage;
1475  
1476  	host->ios.signal_voltage = signal_voltage;
1477  	if (host->ops->start_signal_voltage_switch)
1478  		err = host->ops->start_signal_voltage_switch(host, &host->ios);
1479  
1480  	if (err)
1481  		host->ios.signal_voltage = old_signal_voltage;
1482  
1483  	return err;
1484  
1485  }
1486  
mmc_set_uhs_voltage(struct mmc_host * host,u32 ocr)1487  int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
1488  {
1489  	struct mmc_command cmd = {};
1490  	int err = 0;
1491  	u32 clock;
1492  
1493  	/*
1494  	 * If we cannot switch voltages, return failure so the caller
1495  	 * can continue without UHS mode
1496  	 */
1497  	if (!host->ops->start_signal_voltage_switch)
1498  		return -EPERM;
1499  	if (!host->ops->card_busy)
1500  		pr_warn("%s: cannot verify signal voltage switch\n",
1501  			mmc_hostname(host));
1502  
1503  	cmd.opcode = SD_SWITCH_VOLTAGE;
1504  	cmd.arg = 0;
1505  	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1506  
1507  	err = mmc_wait_for_cmd(host, &cmd, 0);
1508  	if (err)
1509  		return err;
1510  
1511  	if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
1512  		return -EIO;
1513  
1514  	/*
1515  	 * The card should drive cmd and dat[0:3] low immediately
1516  	 * after the response of cmd11, but wait 1 ms to be sure
1517  	 */
1518  	mmc_delay(1);
1519  	if (host->ops->card_busy && !host->ops->card_busy(host)) {
1520  		err = -EAGAIN;
1521  		goto power_cycle;
1522  	}
1523  	/*
1524  	 * During a signal voltage level switch, the clock must be gated
1525  	 * for 5 ms according to the SD spec
1526  	 */
1527  	clock = host->ios.clock;
1528  	host->ios.clock = 0;
1529  	mmc_set_ios(host);
1530  
1531  	if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180)) {
1532  		/*
1533  		 * Voltages may not have been switched, but we've already
1534  		 * sent CMD11, so a power cycle is required anyway
1535  		 */
1536  		err = -EAGAIN;
1537  		goto power_cycle;
1538  	}
1539  
1540  	/* Keep clock gated for at least 10 ms, though spec only says 5 ms */
1541  	mmc_delay(10);
1542  	host->ios.clock = clock;
1543  	mmc_set_ios(host);
1544  
1545  	/* Wait for at least 1 ms according to spec */
1546  	mmc_delay(1);
1547  
1548  	/*
1549  	 * Failure to switch is indicated by the card holding
1550  	 * dat[0:3] low
1551  	 */
1552  	if (host->ops->card_busy && host->ops->card_busy(host))
1553  		err = -EAGAIN;
1554  
1555  power_cycle:
1556  	if (err) {
1557  		pr_debug("%s: Signal voltage switch failed, "
1558  			"power cycling card\n", mmc_hostname(host));
1559  		mmc_power_cycle(host, ocr);
1560  	}
1561  
1562  	return err;
1563  }
1564  
1565  /*
1566   * Select timing parameters for host.
1567   */
mmc_set_timing(struct mmc_host * host,unsigned int timing)1568  void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1569  {
1570  	host->ios.timing = timing;
1571  	mmc_set_ios(host);
1572  }
1573  
1574  /*
1575   * Select appropriate driver type for host.
1576   */
mmc_set_driver_type(struct mmc_host * host,unsigned int drv_type)1577  void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1578  {
1579  	host->ios.drv_type = drv_type;
1580  	mmc_set_ios(host);
1581  }
1582  
mmc_select_drive_strength(struct mmc_card * card,unsigned int max_dtr,int card_drv_type,int * drv_type)1583  int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
1584  			      int card_drv_type, int *drv_type)
1585  {
1586  	struct mmc_host *host = card->host;
1587  	int host_drv_type = SD_DRIVER_TYPE_B;
1588  
1589  	*drv_type = 0;
1590  
1591  	if (!host->ops->select_drive_strength)
1592  		return 0;
1593  
1594  	/* Use SD definition of driver strength for hosts */
1595  	if (host->caps & MMC_CAP_DRIVER_TYPE_A)
1596  		host_drv_type |= SD_DRIVER_TYPE_A;
1597  
1598  	if (host->caps & MMC_CAP_DRIVER_TYPE_C)
1599  		host_drv_type |= SD_DRIVER_TYPE_C;
1600  
1601  	if (host->caps & MMC_CAP_DRIVER_TYPE_D)
1602  		host_drv_type |= SD_DRIVER_TYPE_D;
1603  
1604  	/*
1605  	 * The drive strength that the hardware can support
1606  	 * depends on the board design.  Pass the appropriate
1607  	 * information and let the hardware specific code
1608  	 * return what is possible given the options
1609  	 */
1610  	return host->ops->select_drive_strength(card, max_dtr,
1611  						host_drv_type,
1612  						card_drv_type,
1613  						drv_type);
1614  }
1615  
1616  /*
1617   * Apply power to the MMC stack.  This is a two-stage process.
1618   * First, we enable power to the card without the clock running.
1619   * We then wait a bit for the power to stabilise.  Finally,
1620   * enable the bus drivers and clock to the card.
1621   *
1622   * We must _NOT_ enable the clock prior to power stablising.
1623   *
1624   * If a host does all the power sequencing itself, ignore the
1625   * initial MMC_POWER_UP stage.
1626   */
mmc_power_up(struct mmc_host * host,u32 ocr)1627  void mmc_power_up(struct mmc_host *host, u32 ocr)
1628  {
1629  	if (host->ios.power_mode == MMC_POWER_ON)
1630  		return;
1631  
1632  	mmc_pwrseq_pre_power_on(host);
1633  
1634  	host->ios.vdd = fls(ocr) - 1;
1635  	host->ios.power_mode = MMC_POWER_UP;
1636  	/* Set initial state and call mmc_set_ios */
1637  	mmc_set_initial_state(host);
1638  
1639  	/* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
1640  	if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330))
1641  		dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
1642  	else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
1643  		dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
1644  	else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120))
1645  		dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
1646  
1647  	/*
1648  	 * This delay should be sufficient to allow the power supply
1649  	 * to reach the minimum voltage.
1650  	 */
1651  	mmc_delay(10);
1652  
1653  	mmc_pwrseq_post_power_on(host);
1654  
1655  	host->ios.clock = host->f_init;
1656  
1657  	host->ios.power_mode = MMC_POWER_ON;
1658  	mmc_set_ios(host);
1659  
1660  	/*
1661  	 * This delay must be at least 74 clock sizes, or 1 ms, or the
1662  	 * time required to reach a stable voltage.
1663  	 */
1664  	mmc_delay(10);
1665  }
1666  
mmc_power_off(struct mmc_host * host)1667  void mmc_power_off(struct mmc_host *host)
1668  {
1669  	if (host->ios.power_mode == MMC_POWER_OFF)
1670  		return;
1671  
1672  	mmc_pwrseq_power_off(host);
1673  
1674  	host->ios.clock = 0;
1675  	host->ios.vdd = 0;
1676  
1677  	host->ios.power_mode = MMC_POWER_OFF;
1678  	/* Set initial state and call mmc_set_ios */
1679  	mmc_set_initial_state(host);
1680  
1681  	/*
1682  	 * Some configurations, such as the 802.11 SDIO card in the OLPC
1683  	 * XO-1.5, require a short delay after poweroff before the card
1684  	 * can be successfully turned on again.
1685  	 */
1686  	mmc_delay(1);
1687  }
1688  
mmc_power_cycle(struct mmc_host * host,u32 ocr)1689  void mmc_power_cycle(struct mmc_host *host, u32 ocr)
1690  {
1691  	mmc_power_off(host);
1692  	/* Wait at least 1 ms according to SD spec */
1693  	mmc_delay(1);
1694  	mmc_power_up(host, ocr);
1695  }
1696  
1697  /*
1698   * Cleanup when the last reference to the bus operator is dropped.
1699   */
__mmc_release_bus(struct mmc_host * host)1700  static void __mmc_release_bus(struct mmc_host *host)
1701  {
1702  	WARN_ON(!host->bus_dead);
1703  
1704  	host->bus_ops = NULL;
1705  }
1706  
1707  /*
1708   * Increase reference count of bus operator
1709   */
mmc_bus_get(struct mmc_host * host)1710  static inline void mmc_bus_get(struct mmc_host *host)
1711  {
1712  	unsigned long flags;
1713  
1714  	spin_lock_irqsave(&host->lock, flags);
1715  	host->bus_refs++;
1716  	spin_unlock_irqrestore(&host->lock, flags);
1717  }
1718  
1719  /*
1720   * Decrease reference count of bus operator and free it if
1721   * it is the last reference.
1722   */
mmc_bus_put(struct mmc_host * host)1723  static inline void mmc_bus_put(struct mmc_host *host)
1724  {
1725  	unsigned long flags;
1726  
1727  	spin_lock_irqsave(&host->lock, flags);
1728  	host->bus_refs--;
1729  	if ((host->bus_refs == 0) && host->bus_ops)
1730  		__mmc_release_bus(host);
1731  	spin_unlock_irqrestore(&host->lock, flags);
1732  }
1733  
1734  /*
1735   * Assign a mmc bus handler to a host. Only one bus handler may control a
1736   * host at any given time.
1737   */
mmc_attach_bus(struct mmc_host * host,const struct mmc_bus_ops * ops)1738  void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1739  {
1740  	unsigned long flags;
1741  
1742  	WARN_ON(!host->claimed);
1743  
1744  	spin_lock_irqsave(&host->lock, flags);
1745  
1746  	WARN_ON(host->bus_ops);
1747  	WARN_ON(host->bus_refs);
1748  
1749  	host->bus_ops = ops;
1750  	host->bus_refs = 1;
1751  	host->bus_dead = 0;
1752  
1753  	spin_unlock_irqrestore(&host->lock, flags);
1754  }
1755  
1756  /*
1757   * Remove the current bus handler from a host.
1758   */
mmc_detach_bus(struct mmc_host * host)1759  void mmc_detach_bus(struct mmc_host *host)
1760  {
1761  	unsigned long flags;
1762  
1763  	WARN_ON(!host->claimed);
1764  	WARN_ON(!host->bus_ops);
1765  
1766  	spin_lock_irqsave(&host->lock, flags);
1767  
1768  	host->bus_dead = 1;
1769  
1770  	spin_unlock_irqrestore(&host->lock, flags);
1771  
1772  	mmc_bus_put(host);
1773  }
1774  
_mmc_detect_change(struct mmc_host * host,unsigned long delay,bool cd_irq)1775  static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
1776  				bool cd_irq)
1777  {
1778  	/*
1779  	 * If the device is configured as wakeup, we prevent a new sleep for
1780  	 * 5 s to give provision for user space to consume the event.
1781  	 */
1782  	if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) &&
1783  		device_can_wakeup(mmc_dev(host)))
1784  		pm_wakeup_event(mmc_dev(host), 5000);
1785  
1786  	host->detect_change = 1;
1787  	mmc_schedule_delayed_work(&host->detect, delay);
1788  }
1789  
1790  /**
1791   *	mmc_detect_change - process change of state on a MMC socket
1792   *	@host: host which changed state.
1793   *	@delay: optional delay to wait before detection (jiffies)
1794   *
1795   *	MMC drivers should call this when they detect a card has been
1796   *	inserted or removed. The MMC layer will confirm that any
1797   *	present card is still functional, and initialize any newly
1798   *	inserted.
1799   */
mmc_detect_change(struct mmc_host * host,unsigned long delay)1800  void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1801  {
1802  	_mmc_detect_change(host, delay, true);
1803  }
1804  EXPORT_SYMBOL(mmc_detect_change);
1805  
mmc_init_erase(struct mmc_card * card)1806  void mmc_init_erase(struct mmc_card *card)
1807  {
1808  	unsigned int sz;
1809  
1810  	if (is_power_of_2(card->erase_size))
1811  		card->erase_shift = ffs(card->erase_size) - 1;
1812  	else
1813  		card->erase_shift = 0;
1814  
1815  	/*
1816  	 * It is possible to erase an arbitrarily large area of an SD or MMC
1817  	 * card.  That is not desirable because it can take a long time
1818  	 * (minutes) potentially delaying more important I/O, and also the
1819  	 * timeout calculations become increasingly hugely over-estimated.
1820  	 * Consequently, 'pref_erase' is defined as a guide to limit erases
1821  	 * to that size and alignment.
1822  	 *
1823  	 * For SD cards that define Allocation Unit size, limit erases to one
1824  	 * Allocation Unit at a time.
1825  	 * For MMC, have a stab at ai good value and for modern cards it will
1826  	 * end up being 4MiB. Note that if the value is too small, it can end
1827  	 * up taking longer to erase. Also note, erase_size is already set to
1828  	 * High Capacity Erase Size if available when this function is called.
1829  	 */
1830  	if (mmc_card_sd(card) && card->ssr.au) {
1831  		card->pref_erase = card->ssr.au;
1832  		card->erase_shift = ffs(card->ssr.au) - 1;
1833  	} else if (card->erase_size) {
1834  		sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
1835  		if (sz < 128)
1836  			card->pref_erase = 512 * 1024 / 512;
1837  		else if (sz < 512)
1838  			card->pref_erase = 1024 * 1024 / 512;
1839  		else if (sz < 1024)
1840  			card->pref_erase = 2 * 1024 * 1024 / 512;
1841  		else
1842  			card->pref_erase = 4 * 1024 * 1024 / 512;
1843  		if (card->pref_erase < card->erase_size)
1844  			card->pref_erase = card->erase_size;
1845  		else {
1846  			sz = card->pref_erase % card->erase_size;
1847  			if (sz)
1848  				card->pref_erase += card->erase_size - sz;
1849  		}
1850  	} else
1851  		card->pref_erase = 0;
1852  }
1853  
mmc_mmc_erase_timeout(struct mmc_card * card,unsigned int arg,unsigned int qty)1854  static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1855  				          unsigned int arg, unsigned int qty)
1856  {
1857  	unsigned int erase_timeout;
1858  
1859  	if (arg == MMC_DISCARD_ARG ||
1860  	    (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
1861  		erase_timeout = card->ext_csd.trim_timeout;
1862  	} else if (card->ext_csd.erase_group_def & 1) {
1863  		/* High Capacity Erase Group Size uses HC timeouts */
1864  		if (arg == MMC_TRIM_ARG)
1865  			erase_timeout = card->ext_csd.trim_timeout;
1866  		else
1867  			erase_timeout = card->ext_csd.hc_erase_timeout;
1868  	} else {
1869  		/* CSD Erase Group Size uses write timeout */
1870  		unsigned int mult = (10 << card->csd.r2w_factor);
1871  		unsigned int timeout_clks = card->csd.taac_clks * mult;
1872  		unsigned int timeout_us;
1873  
1874  		/* Avoid overflow: e.g. taac_ns=80000000 mult=1280 */
1875  		if (card->csd.taac_ns < 1000000)
1876  			timeout_us = (card->csd.taac_ns * mult) / 1000;
1877  		else
1878  			timeout_us = (card->csd.taac_ns / 1000) * mult;
1879  
1880  		/*
1881  		 * ios.clock is only a target.  The real clock rate might be
1882  		 * less but not that much less, so fudge it by multiplying by 2.
1883  		 */
1884  		timeout_clks <<= 1;
1885  		timeout_us += (timeout_clks * 1000) /
1886  			      (card->host->ios.clock / 1000);
1887  
1888  		erase_timeout = timeout_us / 1000;
1889  
1890  		/*
1891  		 * Theoretically, the calculation could underflow so round up
1892  		 * to 1ms in that case.
1893  		 */
1894  		if (!erase_timeout)
1895  			erase_timeout = 1;
1896  	}
1897  
1898  	/* Multiplier for secure operations */
1899  	if (arg & MMC_SECURE_ARGS) {
1900  		if (arg == MMC_SECURE_ERASE_ARG)
1901  			erase_timeout *= card->ext_csd.sec_erase_mult;
1902  		else
1903  			erase_timeout *= card->ext_csd.sec_trim_mult;
1904  	}
1905  
1906  	erase_timeout *= qty;
1907  
1908  	/*
1909  	 * Ensure at least a 1 second timeout for SPI as per
1910  	 * 'mmc_set_data_timeout()'
1911  	 */
1912  	if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
1913  		erase_timeout = 1000;
1914  
1915  	return erase_timeout;
1916  }
1917  
mmc_sd_erase_timeout(struct mmc_card * card,unsigned int arg,unsigned int qty)1918  static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
1919  					 unsigned int arg,
1920  					 unsigned int qty)
1921  {
1922  	unsigned int erase_timeout;
1923  
1924  	if (card->ssr.erase_timeout) {
1925  		/* Erase timeout specified in SD Status Register (SSR) */
1926  		erase_timeout = card->ssr.erase_timeout * qty +
1927  				card->ssr.erase_offset;
1928  	} else {
1929  		/*
1930  		 * Erase timeout not specified in SD Status Register (SSR) so
1931  		 * use 250ms per write block.
1932  		 */
1933  		erase_timeout = 250 * qty;
1934  	}
1935  
1936  	/* Must not be less than 1 second */
1937  	if (erase_timeout < 1000)
1938  		erase_timeout = 1000;
1939  
1940  	return erase_timeout;
1941  }
1942  
mmc_erase_timeout(struct mmc_card * card,unsigned int arg,unsigned int qty)1943  static unsigned int mmc_erase_timeout(struct mmc_card *card,
1944  				      unsigned int arg,
1945  				      unsigned int qty)
1946  {
1947  	if (mmc_card_sd(card))
1948  		return mmc_sd_erase_timeout(card, arg, qty);
1949  	else
1950  		return mmc_mmc_erase_timeout(card, arg, qty);
1951  }
1952  
mmc_do_erase(struct mmc_card * card,unsigned int from,unsigned int to,unsigned int arg)1953  static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1954  			unsigned int to, unsigned int arg)
1955  {
1956  	struct mmc_command cmd = {};
1957  	unsigned int qty = 0, busy_timeout = 0;
1958  	bool use_r1b_resp = false;
1959  	unsigned long timeout;
1960  	int err;
1961  
1962  	mmc_retune_hold(card->host);
1963  
1964  	/*
1965  	 * qty is used to calculate the erase timeout which depends on how many
1966  	 * erase groups (or allocation units in SD terminology) are affected.
1967  	 * We count erasing part of an erase group as one erase group.
1968  	 * For SD, the allocation units are always a power of 2.  For MMC, the
1969  	 * erase group size is almost certainly also power of 2, but it does not
1970  	 * seem to insist on that in the JEDEC standard, so we fall back to
1971  	 * division in that case.  SD may not specify an allocation unit size,
1972  	 * in which case the timeout is based on the number of write blocks.
1973  	 *
1974  	 * Note that the timeout for secure trim 2 will only be correct if the
1975  	 * number of erase groups specified is the same as the total of all
1976  	 * preceding secure trim 1 commands.  Since the power may have been
1977  	 * lost since the secure trim 1 commands occurred, it is generally
1978  	 * impossible to calculate the secure trim 2 timeout correctly.
1979  	 */
1980  	if (card->erase_shift)
1981  		qty += ((to >> card->erase_shift) -
1982  			(from >> card->erase_shift)) + 1;
1983  	else if (mmc_card_sd(card))
1984  		qty += to - from + 1;
1985  	else
1986  		qty += ((to / card->erase_size) -
1987  			(from / card->erase_size)) + 1;
1988  
1989  	if (!mmc_card_blockaddr(card)) {
1990  		from <<= 9;
1991  		to <<= 9;
1992  	}
1993  
1994  	if (mmc_card_sd(card))
1995  		cmd.opcode = SD_ERASE_WR_BLK_START;
1996  	else
1997  		cmd.opcode = MMC_ERASE_GROUP_START;
1998  	cmd.arg = from;
1999  	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2000  	err = mmc_wait_for_cmd(card->host, &cmd, 0);
2001  	if (err) {
2002  		pr_err("mmc_erase: group start error %d, "
2003  		       "status %#x\n", err, cmd.resp[0]);
2004  		err = -EIO;
2005  		goto out;
2006  	}
2007  
2008  	memset(&cmd, 0, sizeof(struct mmc_command));
2009  	if (mmc_card_sd(card))
2010  		cmd.opcode = SD_ERASE_WR_BLK_END;
2011  	else
2012  		cmd.opcode = MMC_ERASE_GROUP_END;
2013  	cmd.arg = to;
2014  	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2015  	err = mmc_wait_for_cmd(card->host, &cmd, 0);
2016  	if (err) {
2017  		pr_err("mmc_erase: group end error %d, status %#x\n",
2018  		       err, cmd.resp[0]);
2019  		err = -EIO;
2020  		goto out;
2021  	}
2022  
2023  	memset(&cmd, 0, sizeof(struct mmc_command));
2024  	cmd.opcode = MMC_ERASE;
2025  	cmd.arg = arg;
2026  	busy_timeout = mmc_erase_timeout(card, arg, qty);
2027  	/*
2028  	 * If the host controller supports busy signalling and the timeout for
2029  	 * the erase operation does not exceed the max_busy_timeout, we should
2030  	 * use R1B response. Or we need to prevent the host from doing hw busy
2031  	 * detection, which is done by converting to a R1 response instead.
2032  	 */
2033  	if (card->host->max_busy_timeout &&
2034  	    busy_timeout > card->host->max_busy_timeout) {
2035  		cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2036  	} else {
2037  		cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
2038  		cmd.busy_timeout = busy_timeout;
2039  		use_r1b_resp = true;
2040  	}
2041  
2042  	err = mmc_wait_for_cmd(card->host, &cmd, 0);
2043  	if (err) {
2044  		pr_err("mmc_erase: erase error %d, status %#x\n",
2045  		       err, cmd.resp[0]);
2046  		err = -EIO;
2047  		goto out;
2048  	}
2049  
2050  	if (mmc_host_is_spi(card->host))
2051  		goto out;
2052  
2053  	/*
2054  	 * In case of when R1B + MMC_CAP_WAIT_WHILE_BUSY is used, the polling
2055  	 * shall be avoided.
2056  	 */
2057  	if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
2058  		goto out;
2059  
2060  	timeout = jiffies + msecs_to_jiffies(busy_timeout);
2061  	do {
2062  		memset(&cmd, 0, sizeof(struct mmc_command));
2063  		cmd.opcode = MMC_SEND_STATUS;
2064  		cmd.arg = card->rca << 16;
2065  		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
2066  		/* Do not retry else we can't see errors */
2067  		err = mmc_wait_for_cmd(card->host, &cmd, 0);
2068  		if (err || (cmd.resp[0] & 0xFDF92000)) {
2069  			pr_err("error %d requesting status %#x\n",
2070  				err, cmd.resp[0]);
2071  			err = -EIO;
2072  			goto out;
2073  		}
2074  
2075  		/* Timeout if the device never becomes ready for data and
2076  		 * never leaves the program state.
2077  		 */
2078  		if (time_after(jiffies, timeout)) {
2079  			pr_err("%s: Card stuck in programming state! %s\n",
2080  				mmc_hostname(card->host), __func__);
2081  			err =  -EIO;
2082  			goto out;
2083  		}
2084  
2085  	} while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
2086  		 (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
2087  out:
2088  	mmc_retune_release(card->host);
2089  	return err;
2090  }
2091  
mmc_align_erase_size(struct mmc_card * card,unsigned int * from,unsigned int * to,unsigned int nr)2092  static unsigned int mmc_align_erase_size(struct mmc_card *card,
2093  					 unsigned int *from,
2094  					 unsigned int *to,
2095  					 unsigned int nr)
2096  {
2097  	unsigned int from_new = *from, nr_new = nr, rem;
2098  
2099  	/*
2100  	 * When the 'card->erase_size' is power of 2, we can use round_up/down()
2101  	 * to align the erase size efficiently.
2102  	 */
2103  	if (is_power_of_2(card->erase_size)) {
2104  		unsigned int temp = from_new;
2105  
2106  		from_new = round_up(temp, card->erase_size);
2107  		rem = from_new - temp;
2108  
2109  		if (nr_new > rem)
2110  			nr_new -= rem;
2111  		else
2112  			return 0;
2113  
2114  		nr_new = round_down(nr_new, card->erase_size);
2115  	} else {
2116  		rem = from_new % card->erase_size;
2117  		if (rem) {
2118  			rem = card->erase_size - rem;
2119  			from_new += rem;
2120  			if (nr_new > rem)
2121  				nr_new -= rem;
2122  			else
2123  				return 0;
2124  		}
2125  
2126  		rem = nr_new % card->erase_size;
2127  		if (rem)
2128  			nr_new -= rem;
2129  	}
2130  
2131  	if (nr_new == 0)
2132  		return 0;
2133  
2134  	*to = from_new + nr_new;
2135  	*from = from_new;
2136  
2137  	return nr_new;
2138  }
2139  
2140  /**
2141   * mmc_erase - erase sectors.
2142   * @card: card to erase
2143   * @from: first sector to erase
2144   * @nr: number of sectors to erase
2145   * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
2146   *
2147   * Caller must claim host before calling this function.
2148   */
mmc_erase(struct mmc_card * card,unsigned int from,unsigned int nr,unsigned int arg)2149  int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
2150  	      unsigned int arg)
2151  {
2152  	unsigned int rem, to = from + nr;
2153  	int err;
2154  
2155  	if (!(card->host->caps & MMC_CAP_ERASE) ||
2156  	    !(card->csd.cmdclass & CCC_ERASE))
2157  		return -EOPNOTSUPP;
2158  
2159  	if (!card->erase_size)
2160  		return -EOPNOTSUPP;
2161  
2162  	if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
2163  		return -EOPNOTSUPP;
2164  
2165  	if ((arg & MMC_SECURE_ARGS) &&
2166  	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
2167  		return -EOPNOTSUPP;
2168  
2169  	if ((arg & MMC_TRIM_ARGS) &&
2170  	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
2171  		return -EOPNOTSUPP;
2172  
2173  	if (arg == MMC_SECURE_ERASE_ARG) {
2174  		if (from % card->erase_size || nr % card->erase_size)
2175  			return -EINVAL;
2176  	}
2177  
2178  	if (arg == MMC_ERASE_ARG)
2179  		nr = mmc_align_erase_size(card, &from, &to, nr);
2180  
2181  	if (nr == 0)
2182  		return 0;
2183  
2184  	if (to <= from)
2185  		return -EINVAL;
2186  
2187  	/* 'from' and 'to' are inclusive */
2188  	to -= 1;
2189  
2190  	/*
2191  	 * Special case where only one erase-group fits in the timeout budget:
2192  	 * If the region crosses an erase-group boundary on this particular
2193  	 * case, we will be trimming more than one erase-group which, does not
2194  	 * fit in the timeout budget of the controller, so we need to split it
2195  	 * and call mmc_do_erase() twice if necessary. This special case is
2196  	 * identified by the card->eg_boundary flag.
2197  	 */
2198  	rem = card->erase_size - (from % card->erase_size);
2199  	if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) {
2200  		err = mmc_do_erase(card, from, from + rem - 1, arg);
2201  		from += rem;
2202  		if ((err) || (to <= from))
2203  			return err;
2204  	}
2205  
2206  	return mmc_do_erase(card, from, to, arg);
2207  }
2208  EXPORT_SYMBOL(mmc_erase);
2209  
mmc_can_erase(struct mmc_card * card)2210  int mmc_can_erase(struct mmc_card *card)
2211  {
2212  	if ((card->host->caps & MMC_CAP_ERASE) &&
2213  	    (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
2214  		return 1;
2215  	return 0;
2216  }
2217  EXPORT_SYMBOL(mmc_can_erase);
2218  
mmc_can_trim(struct mmc_card * card)2219  int mmc_can_trim(struct mmc_card *card)
2220  {
2221  	if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) &&
2222  	    (!(card->quirks & MMC_QUIRK_TRIM_BROKEN)))
2223  		return 1;
2224  	return 0;
2225  }
2226  EXPORT_SYMBOL(mmc_can_trim);
2227  
mmc_can_discard(struct mmc_card * card)2228  int mmc_can_discard(struct mmc_card *card)
2229  {
2230  	/*
2231  	 * As there's no way to detect the discard support bit at v4.5
2232  	 * use the s/w feature support filed.
2233  	 */
2234  	if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
2235  		return 1;
2236  	return 0;
2237  }
2238  EXPORT_SYMBOL(mmc_can_discard);
2239  
mmc_can_sanitize(struct mmc_card * card)2240  int mmc_can_sanitize(struct mmc_card *card)
2241  {
2242  	if (!mmc_can_trim(card) && !mmc_can_erase(card))
2243  		return 0;
2244  	if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
2245  		return 1;
2246  	return 0;
2247  }
2248  EXPORT_SYMBOL(mmc_can_sanitize);
2249  
mmc_can_secure_erase_trim(struct mmc_card * card)2250  int mmc_can_secure_erase_trim(struct mmc_card *card)
2251  {
2252  	if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
2253  	    !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
2254  		return 1;
2255  	return 0;
2256  }
2257  EXPORT_SYMBOL(mmc_can_secure_erase_trim);
2258  
mmc_erase_group_aligned(struct mmc_card * card,unsigned int from,unsigned int nr)2259  int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
2260  			    unsigned int nr)
2261  {
2262  	if (!card->erase_size)
2263  		return 0;
2264  	if (from % card->erase_size || nr % card->erase_size)
2265  		return 0;
2266  	return 1;
2267  }
2268  EXPORT_SYMBOL(mmc_erase_group_aligned);
2269  
mmc_do_calc_max_discard(struct mmc_card * card,unsigned int arg)2270  static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
2271  					    unsigned int arg)
2272  {
2273  	struct mmc_host *host = card->host;
2274  	unsigned int max_discard, x, y, qty = 0, max_qty, min_qty, timeout;
2275  	unsigned int last_timeout = 0;
2276  	unsigned int max_busy_timeout = host->max_busy_timeout ?
2277  			host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS;
2278  
2279  	if (card->erase_shift) {
2280  		max_qty = UINT_MAX >> card->erase_shift;
2281  		min_qty = card->pref_erase >> card->erase_shift;
2282  	} else if (mmc_card_sd(card)) {
2283  		max_qty = UINT_MAX;
2284  		min_qty = card->pref_erase;
2285  	} else {
2286  		max_qty = UINT_MAX / card->erase_size;
2287  		min_qty = card->pref_erase / card->erase_size;
2288  	}
2289  
2290  	/*
2291  	 * We should not only use 'host->max_busy_timeout' as the limitation
2292  	 * when deciding the max discard sectors. We should set a balance value
2293  	 * to improve the erase speed, and it can not get too long timeout at
2294  	 * the same time.
2295  	 *
2296  	 * Here we set 'card->pref_erase' as the minimal discard sectors no
2297  	 * matter what size of 'host->max_busy_timeout', but if the
2298  	 * 'host->max_busy_timeout' is large enough for more discard sectors,
2299  	 * then we can continue to increase the max discard sectors until we
2300  	 * get a balance value. In cases when the 'host->max_busy_timeout'
2301  	 * isn't specified, use the default max erase timeout.
2302  	 */
2303  	do {
2304  		y = 0;
2305  		for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
2306  			timeout = mmc_erase_timeout(card, arg, qty + x);
2307  
2308  			if (qty + x > min_qty && timeout > max_busy_timeout)
2309  				break;
2310  
2311  			if (timeout < last_timeout)
2312  				break;
2313  			last_timeout = timeout;
2314  			y = x;
2315  		}
2316  		qty += y;
2317  	} while (y);
2318  
2319  	if (!qty)
2320  		return 0;
2321  
2322  	/*
2323  	 * When specifying a sector range to trim, chances are we might cross
2324  	 * an erase-group boundary even if the amount of sectors is less than
2325  	 * one erase-group.
2326  	 * If we can only fit one erase-group in the controller timeout budget,
2327  	 * we have to care that erase-group boundaries are not crossed by a
2328  	 * single trim operation. We flag that special case with "eg_boundary".
2329  	 * In all other cases we can just decrement qty and pretend that we
2330  	 * always touch (qty + 1) erase-groups as a simple optimization.
2331  	 */
2332  	if (qty == 1)
2333  		card->eg_boundary = 1;
2334  	else
2335  		qty--;
2336  
2337  	/* Convert qty to sectors */
2338  	if (card->erase_shift)
2339  		max_discard = qty << card->erase_shift;
2340  	else if (mmc_card_sd(card))
2341  		max_discard = qty + 1;
2342  	else
2343  		max_discard = qty * card->erase_size;
2344  
2345  	return max_discard;
2346  }
2347  
mmc_calc_max_discard(struct mmc_card * card)2348  unsigned int mmc_calc_max_discard(struct mmc_card *card)
2349  {
2350  	struct mmc_host *host = card->host;
2351  	unsigned int max_discard, max_trim;
2352  
2353  	/*
2354  	 * Without erase_group_def set, MMC erase timeout depends on clock
2355  	 * frequence which can change.  In that case, the best choice is
2356  	 * just the preferred erase size.
2357  	 */
2358  	if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
2359  		return card->pref_erase;
2360  
2361  	max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
2362  	if (mmc_can_trim(card)) {
2363  		max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
2364  		if (max_trim < max_discard)
2365  			max_discard = max_trim;
2366  	} else if (max_discard < card->erase_size) {
2367  		max_discard = 0;
2368  	}
2369  	pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
2370  		mmc_hostname(host), max_discard, host->max_busy_timeout ?
2371  		host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS);
2372  	return max_discard;
2373  }
2374  EXPORT_SYMBOL(mmc_calc_max_discard);
2375  
mmc_card_is_blockaddr(struct mmc_card * card)2376  bool mmc_card_is_blockaddr(struct mmc_card *card)
2377  {
2378  	return card ? mmc_card_blockaddr(card) : false;
2379  }
2380  EXPORT_SYMBOL(mmc_card_is_blockaddr);
2381  
mmc_set_blocklen(struct mmc_card * card,unsigned int blocklen)2382  int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
2383  {
2384  	struct mmc_command cmd = {};
2385  
2386  	if (mmc_card_blockaddr(card) || mmc_card_ddr52(card) ||
2387  	    mmc_card_hs400(card) || mmc_card_hs400es(card))
2388  		return 0;
2389  
2390  	cmd.opcode = MMC_SET_BLOCKLEN;
2391  	cmd.arg = blocklen;
2392  	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2393  	return mmc_wait_for_cmd(card->host, &cmd, 5);
2394  }
2395  EXPORT_SYMBOL(mmc_set_blocklen);
2396  
mmc_set_blockcount(struct mmc_card * card,unsigned int blockcount,bool is_rel_write)2397  int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
2398  			bool is_rel_write)
2399  {
2400  	struct mmc_command cmd = {};
2401  
2402  	cmd.opcode = MMC_SET_BLOCK_COUNT;
2403  	cmd.arg = blockcount & 0x0000FFFF;
2404  	if (is_rel_write)
2405  		cmd.arg |= 1 << 31;
2406  	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2407  	return mmc_wait_for_cmd(card->host, &cmd, 5);
2408  }
2409  EXPORT_SYMBOL(mmc_set_blockcount);
2410  
mmc_hw_reset_for_init(struct mmc_host * host)2411  static void mmc_hw_reset_for_init(struct mmc_host *host)
2412  {
2413  	mmc_pwrseq_reset(host);
2414  
2415  	if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
2416  		return;
2417  	host->ops->hw_reset(host);
2418  }
2419  
mmc_hw_reset(struct mmc_host * host)2420  int mmc_hw_reset(struct mmc_host *host)
2421  {
2422  	int ret;
2423  
2424  	if (!host->card)
2425  		return -EINVAL;
2426  
2427  	mmc_bus_get(host);
2428  	if (!host->bus_ops || host->bus_dead || !host->bus_ops->reset) {
2429  		mmc_bus_put(host);
2430  		return -EOPNOTSUPP;
2431  	}
2432  
2433  	ret = host->bus_ops->reset(host);
2434  	mmc_bus_put(host);
2435  
2436  	if (ret)
2437  		pr_warn("%s: tried to reset card, got error %d\n",
2438  			mmc_hostname(host), ret);
2439  
2440  	return ret;
2441  }
2442  EXPORT_SYMBOL(mmc_hw_reset);
2443  
mmc_rescan_try_freq(struct mmc_host * host,unsigned freq)2444  static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
2445  {
2446  	host->f_init = freq;
2447  
2448  	pr_debug("%s: %s: trying to init card at %u Hz\n",
2449  		mmc_hostname(host), __func__, host->f_init);
2450  
2451  	mmc_power_up(host, host->ocr_avail);
2452  
2453  	/*
2454  	 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
2455  	 * do a hardware reset if possible.
2456  	 */
2457  	mmc_hw_reset_for_init(host);
2458  
2459  	/*
2460  	 * sdio_reset sends CMD52 to reset card.  Since we do not know
2461  	 * if the card is being re-initialized, just send it.  CMD52
2462  	 * should be ignored by SD/eMMC cards.
2463  	 * Skip it if we already know that we do not support SDIO commands
2464  	 */
2465  	if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2466  		sdio_reset(host);
2467  
2468  	mmc_go_idle(host);
2469  
2470  	if (!(host->caps2 & MMC_CAP2_NO_SD))
2471  		mmc_send_if_cond(host, host->ocr_avail);
2472  
2473  	/* Order's important: probe SDIO, then SD, then MMC */
2474  	if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2475  		if (!mmc_attach_sdio(host))
2476  			return 0;
2477  
2478  	if (!(host->caps2 & MMC_CAP2_NO_SD))
2479  		if (!mmc_attach_sd(host))
2480  			return 0;
2481  
2482  	if (!(host->caps2 & MMC_CAP2_NO_MMC))
2483  		if (!mmc_attach_mmc(host))
2484  			return 0;
2485  
2486  	mmc_power_off(host);
2487  	return -EIO;
2488  }
2489  
_mmc_detect_card_removed(struct mmc_host * host)2490  int _mmc_detect_card_removed(struct mmc_host *host)
2491  {
2492  	int ret;
2493  
2494  	if (!host->card || mmc_card_removed(host->card))
2495  		return 1;
2496  
2497  	ret = host->bus_ops->alive(host);
2498  
2499  	/*
2500  	 * Card detect status and alive check may be out of sync if card is
2501  	 * removed slowly, when card detect switch changes while card/slot
2502  	 * pads are still contacted in hardware (refer to "SD Card Mechanical
2503  	 * Addendum, Appendix C: Card Detection Switch"). So reschedule a
2504  	 * detect work 200ms later for this case.
2505  	 */
2506  	if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
2507  		mmc_detect_change(host, msecs_to_jiffies(200));
2508  		pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
2509  	}
2510  
2511  	if (ret) {
2512  		mmc_card_set_removed(host->card);
2513  		pr_debug("%s: card remove detected\n", mmc_hostname(host));
2514  	}
2515  
2516  	return ret;
2517  }
2518  
mmc_detect_card_removed(struct mmc_host * host)2519  int mmc_detect_card_removed(struct mmc_host *host)
2520  {
2521  	struct mmc_card *card = host->card;
2522  	int ret;
2523  
2524  	WARN_ON(!host->claimed);
2525  
2526  	if (!card)
2527  		return 1;
2528  
2529  	if (!mmc_card_is_removable(host))
2530  		return 0;
2531  
2532  	ret = mmc_card_removed(card);
2533  	/*
2534  	 * The card will be considered unchanged unless we have been asked to
2535  	 * detect a change or host requires polling to provide card detection.
2536  	 */
2537  	if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
2538  		return ret;
2539  
2540  	host->detect_change = 0;
2541  	if (!ret) {
2542  		ret = _mmc_detect_card_removed(host);
2543  		if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) {
2544  			/*
2545  			 * Schedule a detect work as soon as possible to let a
2546  			 * rescan handle the card removal.
2547  			 */
2548  			cancel_delayed_work(&host->detect);
2549  			_mmc_detect_change(host, 0, false);
2550  		}
2551  	}
2552  
2553  	return ret;
2554  }
2555  EXPORT_SYMBOL(mmc_detect_card_removed);
2556  
mmc_rescan(struct work_struct * work)2557  void mmc_rescan(struct work_struct *work)
2558  {
2559  	struct mmc_host *host =
2560  		container_of(work, struct mmc_host, detect.work);
2561  	int i;
2562  
2563  	if (host->rescan_disable)
2564  		return;
2565  
2566  	/* If there is a non-removable card registered, only scan once */
2567  	if (!mmc_card_is_removable(host) && host->rescan_entered)
2568  		return;
2569  	host->rescan_entered = 1;
2570  
2571  	if (host->trigger_card_event && host->ops->card_event) {
2572  		mmc_claim_host(host);
2573  		host->ops->card_event(host);
2574  		mmc_release_host(host);
2575  		host->trigger_card_event = false;
2576  	}
2577  
2578  	mmc_bus_get(host);
2579  
2580  	/*
2581  	 * if there is a _removable_ card registered, check whether it is
2582  	 * still present
2583  	 */
2584  	if (host->bus_ops && !host->bus_dead && mmc_card_is_removable(host))
2585  		host->bus_ops->detect(host);
2586  
2587  	host->detect_change = 0;
2588  
2589  	/*
2590  	 * Let mmc_bus_put() free the bus/bus_ops if we've found that
2591  	 * the card is no longer present.
2592  	 */
2593  	mmc_bus_put(host);
2594  	mmc_bus_get(host);
2595  
2596  	/* if there still is a card present, stop here */
2597  	if (host->bus_ops != NULL) {
2598  		mmc_bus_put(host);
2599  		goto out;
2600  	}
2601  
2602  	/*
2603  	 * Only we can add a new handler, so it's safe to
2604  	 * release the lock here.
2605  	 */
2606  	mmc_bus_put(host);
2607  
2608  	mmc_claim_host(host);
2609  	if (mmc_card_is_removable(host) && host->ops->get_cd &&
2610  			host->ops->get_cd(host) == 0) {
2611  		mmc_power_off(host);
2612  		mmc_release_host(host);
2613  		goto out;
2614  	}
2615  
2616  	for (i = 0; i < ARRAY_SIZE(freqs); i++) {
2617  		if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
2618  			break;
2619  		if (freqs[i] <= host->f_min)
2620  			break;
2621  	}
2622  	mmc_release_host(host);
2623  
2624   out:
2625  	if (host->caps & MMC_CAP_NEEDS_POLL)
2626  		mmc_schedule_delayed_work(&host->detect, HZ);
2627  }
2628  
mmc_start_host(struct mmc_host * host)2629  void mmc_start_host(struct mmc_host *host)
2630  {
2631  	host->f_init = max(freqs[0], host->f_min);
2632  	host->rescan_disable = 0;
2633  	host->ios.power_mode = MMC_POWER_UNDEFINED;
2634  
2635  	if (!(host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)) {
2636  		mmc_claim_host(host);
2637  		mmc_power_up(host, host->ocr_avail);
2638  		mmc_release_host(host);
2639  	}
2640  
2641  	mmc_gpiod_request_cd_irq(host);
2642  	_mmc_detect_change(host, 0, false);
2643  }
2644  
mmc_stop_host(struct mmc_host * host)2645  void mmc_stop_host(struct mmc_host *host)
2646  {
2647  	if (host->slot.cd_irq >= 0) {
2648  		if (host->slot.cd_wake_enabled)
2649  			disable_irq_wake(host->slot.cd_irq);
2650  		disable_irq(host->slot.cd_irq);
2651  	}
2652  
2653  	host->rescan_disable = 1;
2654  	cancel_delayed_work_sync(&host->detect);
2655  
2656  	/* clear pm flags now and let card drivers set them as needed */
2657  	host->pm_flags = 0;
2658  
2659  	mmc_bus_get(host);
2660  	if (host->bus_ops && !host->bus_dead) {
2661  		/* Calling bus_ops->remove() with a claimed host can deadlock */
2662  		host->bus_ops->remove(host);
2663  		mmc_claim_host(host);
2664  		mmc_detach_bus(host);
2665  		mmc_power_off(host);
2666  		mmc_release_host(host);
2667  		mmc_bus_put(host);
2668  		return;
2669  	}
2670  	mmc_bus_put(host);
2671  
2672  	mmc_claim_host(host);
2673  	mmc_power_off(host);
2674  	mmc_release_host(host);
2675  }
2676  
mmc_power_save_host(struct mmc_host * host)2677  int mmc_power_save_host(struct mmc_host *host)
2678  {
2679  	int ret = 0;
2680  
2681  	pr_debug("%s: %s: powering down\n", mmc_hostname(host), __func__);
2682  
2683  	mmc_bus_get(host);
2684  
2685  	if (!host->bus_ops || host->bus_dead) {
2686  		mmc_bus_put(host);
2687  		return -EINVAL;
2688  	}
2689  
2690  	if (host->bus_ops->power_save)
2691  		ret = host->bus_ops->power_save(host);
2692  
2693  	mmc_bus_put(host);
2694  
2695  	mmc_power_off(host);
2696  
2697  	return ret;
2698  }
2699  EXPORT_SYMBOL(mmc_power_save_host);
2700  
mmc_power_restore_host(struct mmc_host * host)2701  int mmc_power_restore_host(struct mmc_host *host)
2702  {
2703  	int ret;
2704  
2705  	pr_debug("%s: %s: powering up\n", mmc_hostname(host), __func__);
2706  
2707  	mmc_bus_get(host);
2708  
2709  	if (!host->bus_ops || host->bus_dead) {
2710  		mmc_bus_put(host);
2711  		return -EINVAL;
2712  	}
2713  
2714  	mmc_power_up(host, host->card->ocr);
2715  	ret = host->bus_ops->power_restore(host);
2716  
2717  	mmc_bus_put(host);
2718  
2719  	return ret;
2720  }
2721  EXPORT_SYMBOL(mmc_power_restore_host);
2722  
2723  #ifdef CONFIG_PM_SLEEP
2724  /* Do the card removal on suspend if card is assumed removeable
2725   * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2726     to sync the card.
2727  */
mmc_pm_notify(struct notifier_block * notify_block,unsigned long mode,void * unused)2728  static int mmc_pm_notify(struct notifier_block *notify_block,
2729  			unsigned long mode, void *unused)
2730  {
2731  	struct mmc_host *host = container_of(
2732  		notify_block, struct mmc_host, pm_notify);
2733  	unsigned long flags;
2734  	int err = 0;
2735  
2736  	switch (mode) {
2737  	case PM_HIBERNATION_PREPARE:
2738  	case PM_SUSPEND_PREPARE:
2739  	case PM_RESTORE_PREPARE:
2740  		spin_lock_irqsave(&host->lock, flags);
2741  		host->rescan_disable = 1;
2742  		spin_unlock_irqrestore(&host->lock, flags);
2743  		cancel_delayed_work_sync(&host->detect);
2744  
2745  		if (!host->bus_ops)
2746  			break;
2747  
2748  		/* Validate prerequisites for suspend */
2749  		if (host->bus_ops->pre_suspend)
2750  			err = host->bus_ops->pre_suspend(host);
2751  		if (!err)
2752  			break;
2753  
2754  		if (!mmc_card_is_removable(host)) {
2755  			dev_warn(mmc_dev(host),
2756  				 "pre_suspend failed for non-removable host: "
2757  				 "%d\n", err);
2758  			/* Avoid removing non-removable hosts */
2759  			break;
2760  		}
2761  
2762  		/* Calling bus_ops->remove() with a claimed host can deadlock */
2763  		host->bus_ops->remove(host);
2764  		mmc_claim_host(host);
2765  		mmc_detach_bus(host);
2766  		mmc_power_off(host);
2767  		mmc_release_host(host);
2768  		host->pm_flags = 0;
2769  		break;
2770  
2771  	case PM_POST_SUSPEND:
2772  	case PM_POST_HIBERNATION:
2773  	case PM_POST_RESTORE:
2774  
2775  		spin_lock_irqsave(&host->lock, flags);
2776  		host->rescan_disable = 0;
2777  		spin_unlock_irqrestore(&host->lock, flags);
2778  		_mmc_detect_change(host, 0, false);
2779  
2780  	}
2781  
2782  	return 0;
2783  }
2784  
mmc_register_pm_notifier(struct mmc_host * host)2785  void mmc_register_pm_notifier(struct mmc_host *host)
2786  {
2787  	host->pm_notify.notifier_call = mmc_pm_notify;
2788  	register_pm_notifier(&host->pm_notify);
2789  }
2790  
mmc_unregister_pm_notifier(struct mmc_host * host)2791  void mmc_unregister_pm_notifier(struct mmc_host *host)
2792  {
2793  	unregister_pm_notifier(&host->pm_notify);
2794  }
2795  #endif
2796  
2797  /**
2798   * mmc_init_context_info() - init synchronization context
2799   * @host: mmc host
2800   *
2801   * Init struct context_info needed to implement asynchronous
2802   * request mechanism, used by mmc core, host driver and mmc requests
2803   * supplier.
2804   */
mmc_init_context_info(struct mmc_host * host)2805  void mmc_init_context_info(struct mmc_host *host)
2806  {
2807  	host->context_info.is_new_req = false;
2808  	host->context_info.is_done_rcv = false;
2809  	host->context_info.is_waiting_last_req = false;
2810  	init_waitqueue_head(&host->context_info.wait);
2811  }
2812  
2813  #ifdef CONFIG_MMC_EMBEDDED_SDIO
mmc_set_embedded_sdio_data(struct mmc_host * host,struct sdio_cis * cis,struct sdio_cccr * cccr,struct sdio_embedded_func * funcs,int num_funcs)2814  void mmc_set_embedded_sdio_data(struct mmc_host *host,
2815  				struct sdio_cis *cis,
2816  				struct sdio_cccr *cccr,
2817  				struct sdio_embedded_func *funcs,
2818  				int num_funcs)
2819  {
2820  	host->embedded_sdio_data.cis = cis;
2821  	host->embedded_sdio_data.cccr = cccr;
2822  	host->embedded_sdio_data.funcs = funcs;
2823  	host->embedded_sdio_data.num_funcs = num_funcs;
2824  }
2825  
2826  EXPORT_SYMBOL(mmc_set_embedded_sdio_data);
2827  #endif
2828  
mmc_init(void)2829  static int __init mmc_init(void)
2830  {
2831  	int ret;
2832  
2833  	ret = mmc_register_bus();
2834  	if (ret)
2835  		return ret;
2836  
2837  	ret = mmc_register_host_class();
2838  	if (ret)
2839  		goto unregister_bus;
2840  
2841  	ret = sdio_register_bus();
2842  	if (ret)
2843  		goto unregister_host_class;
2844  
2845  	return 0;
2846  
2847  unregister_host_class:
2848  	mmc_unregister_host_class();
2849  unregister_bus:
2850  	mmc_unregister_bus();
2851  	return ret;
2852  }
2853  
mmc_exit(void)2854  static void __exit mmc_exit(void)
2855  {
2856  	sdio_unregister_bus();
2857  	mmc_unregister_host_class();
2858  	mmc_unregister_bus();
2859  }
2860  
2861  subsys_initcall(mmc_init);
2862  module_exit(mmc_exit);
2863  
2864  MODULE_LICENSE("GPL");
2865