• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  linux/drivers/mmc/core/core.c
3  *
4  *  Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5  *  SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6  *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7  *  MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/completion.h>
17 #include <linux/device.h>
18 #include <linux/delay.h>
19 #include <linux/pagemap.h>
20 #include <linux/err.h>
21 #include <linux/leds.h>
22 #include <linux/scatterlist.h>
23 #include <linux/log2.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm_wakeup.h>
27 #include <linux/suspend.h>
28 #include <linux/fault-inject.h>
29 #include <linux/random.h>
30 #include <linux/slab.h>
31 #include <linux/of.h>
32 
33 #define CREATE_TRACE_POINTS
34 #include <trace/events/mmc.h>
35 
36 #include <linux/mmc/card.h>
37 #include <linux/mmc/host.h>
38 #include <linux/mmc/mmc.h>
39 #include <linux/mmc/sd.h>
40 #include <linux/mmc/slot-gpio.h>
41 
42 #include "core.h"
43 #include "bus.h"
44 #include "host.h"
45 #include "sdio_bus.h"
46 
47 #include "mmc_ops.h"
48 #include "sd_ops.h"
49 #include "sdio_ops.h"
50 
51 EXPORT_TRACEPOINT_SYMBOL_GPL(mmc_blk_erase_start);
52 EXPORT_TRACEPOINT_SYMBOL_GPL(mmc_blk_erase_end);
53 EXPORT_TRACEPOINT_SYMBOL_GPL(mmc_blk_rw_start);
54 EXPORT_TRACEPOINT_SYMBOL_GPL(mmc_blk_rw_end);
55 
56 /* If the device is not responding */
57 #define MMC_CORE_TIMEOUT_MS	(10 * 60 * 1000) /* 10 minute timeout */
58 
59 /*
60  * Background operations can take a long time, depending on the housekeeping
61  * operations the card has to perform.
62  */
63 #define MMC_BKOPS_MAX_TIMEOUT	(4 * 60 * 1000) /* max time to wait in ms */
64 
65 static struct workqueue_struct *workqueue;
66 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
67 
68 /*
69  * Enabling software CRCs on the data blocks can be a significant (30%)
70  * performance cost, and for other reasons may not always be desired.
71  * So we allow it it to be disabled.
72  */
73 bool use_spi_crc = 1;
74 module_param(use_spi_crc, bool, 0);
75 
76 /*
77  * Internal function. Schedule delayed work in the MMC work queue.
78  */
mmc_schedule_delayed_work(struct delayed_work * work,unsigned long delay)79 static int mmc_schedule_delayed_work(struct delayed_work *work,
80 				     unsigned long delay)
81 {
82 	return queue_delayed_work(workqueue, work, delay);
83 }
84 
85 /*
86  * Internal function. Flush all scheduled work from the MMC work queue.
87  */
mmc_flush_scheduled_work(void)88 static void mmc_flush_scheduled_work(void)
89 {
90 	flush_workqueue(workqueue);
91 }
92 
93 #ifdef CONFIG_FAIL_MMC_REQUEST
94 
95 /*
96  * Internal function. Inject random data errors.
97  * If mmc_data is NULL no errors are injected.
98  */
mmc_should_fail_request(struct mmc_host * host,struct mmc_request * mrq)99 static void mmc_should_fail_request(struct mmc_host *host,
100 				    struct mmc_request *mrq)
101 {
102 	struct mmc_command *cmd = mrq->cmd;
103 	struct mmc_data *data = mrq->data;
104 	static const int data_errors[] = {
105 		-ETIMEDOUT,
106 		-EILSEQ,
107 		-EIO,
108 	};
109 
110 	if (!data)
111 		return;
112 
113 	if (cmd->error || data->error ||
114 	    !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
115 		return;
116 
117 	data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
118 	data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
119 }
120 
121 #else /* CONFIG_FAIL_MMC_REQUEST */
122 
mmc_should_fail_request(struct mmc_host * host,struct mmc_request * mrq)123 static inline void mmc_should_fail_request(struct mmc_host *host,
124 					   struct mmc_request *mrq)
125 {
126 }
127 
128 #endif /* CONFIG_FAIL_MMC_REQUEST */
129 
130 /**
131  *	mmc_request_done - finish processing an MMC request
132  *	@host: MMC host which completed request
133  *	@mrq: MMC request which request
134  *
135  *	MMC drivers should call this function when they have completed
136  *	their processing of a request.
137  */
mmc_request_done(struct mmc_host * host,struct mmc_request * mrq)138 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
139 {
140 	struct mmc_command *cmd = mrq->cmd;
141 	int err = cmd->error;
142 
143 	if (err && cmd->retries && mmc_host_is_spi(host)) {
144 		if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
145 			cmd->retries = 0;
146 	}
147 
148 	if (err && cmd->retries && !mmc_card_removed(host->card)) {
149 		/*
150 		 * Request starter must handle retries - see
151 		 * mmc_wait_for_req_done().
152 		 */
153 		if (mrq->done)
154 			mrq->done(mrq);
155 	} else {
156 		mmc_should_fail_request(host, mrq);
157 
158 		led_trigger_event(host->led, LED_OFF);
159 
160 		pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
161 			mmc_hostname(host), cmd->opcode, err,
162 			cmd->resp[0], cmd->resp[1],
163 			cmd->resp[2], cmd->resp[3]);
164 
165 		if (mrq->data) {
166 			pr_debug("%s:     %d bytes transferred: %d\n",
167 				mmc_hostname(host),
168 				mrq->data->bytes_xfered, mrq->data->error);
169 #ifdef CONFIG_BLOCK
170 			if (mrq->lat_hist_enabled) {
171 				ktime_t completion;
172 				u_int64_t delta_us;
173 
174 				completion = ktime_get();
175 				delta_us = ktime_us_delta(completion,
176 							  mrq->io_start);
177 				blk_update_latency_hist(
178 					(mrq->data->flags & MMC_DATA_READ) ?
179 					&host->io_lat_read :
180 					&host->io_lat_write, delta_us);
181 			}
182 #endif
183 			trace_mmc_blk_rw_end(cmd->opcode, cmd->arg, mrq->data);
184 		}
185 
186 		if (mrq->stop) {
187 			pr_debug("%s:     (CMD%u): %d: %08x %08x %08x %08x\n",
188 				mmc_hostname(host), mrq->stop->opcode,
189 				mrq->stop->error,
190 				mrq->stop->resp[0], mrq->stop->resp[1],
191 				mrq->stop->resp[2], mrq->stop->resp[3]);
192 		}
193 
194 		if (mrq->done)
195 			mrq->done(mrq);
196 
197 		mmc_host_clk_release(host);
198 	}
199 }
200 
201 EXPORT_SYMBOL(mmc_request_done);
202 
203 static void
mmc_start_request(struct mmc_host * host,struct mmc_request * mrq)204 mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
205 {
206 #ifdef CONFIG_MMC_DEBUG
207 	unsigned int i, sz;
208 	struct scatterlist *sg;
209 #endif
210 
211 	if (mrq->sbc) {
212 		pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
213 			 mmc_hostname(host), mrq->sbc->opcode,
214 			 mrq->sbc->arg, mrq->sbc->flags);
215 	}
216 
217 	pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
218 		 mmc_hostname(host), mrq->cmd->opcode,
219 		 mrq->cmd->arg, mrq->cmd->flags);
220 
221 	if (mrq->data) {
222 		pr_debug("%s:     blksz %d blocks %d flags %08x "
223 			"tsac %d ms nsac %d\n",
224 			mmc_hostname(host), mrq->data->blksz,
225 			mrq->data->blocks, mrq->data->flags,
226 			mrq->data->timeout_ns / 1000000,
227 			mrq->data->timeout_clks);
228 	}
229 
230 	if (mrq->stop) {
231 		pr_debug("%s:     CMD%u arg %08x flags %08x\n",
232 			 mmc_hostname(host), mrq->stop->opcode,
233 			 mrq->stop->arg, mrq->stop->flags);
234 	}
235 
236 	WARN_ON(!host->claimed);
237 
238 	mrq->cmd->error = 0;
239 	mrq->cmd->mrq = mrq;
240 	if (mrq->data) {
241 		BUG_ON(mrq->data->blksz > host->max_blk_size);
242 		BUG_ON(mrq->data->blocks > host->max_blk_count);
243 		BUG_ON(mrq->data->blocks * mrq->data->blksz >
244 			host->max_req_size);
245 
246 #ifdef CONFIG_MMC_DEBUG
247 		sz = 0;
248 		for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
249 			sz += sg->length;
250 		BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
251 #endif
252 
253 		mrq->cmd->data = mrq->data;
254 		mrq->data->error = 0;
255 		mrq->data->mrq = mrq;
256 		if (mrq->stop) {
257 			mrq->data->stop = mrq->stop;
258 			mrq->stop->error = 0;
259 			mrq->stop->mrq = mrq;
260 		}
261 	}
262 	mmc_host_clk_hold(host);
263 	led_trigger_event(host->led, LED_FULL);
264 	host->ops->request(host, mrq);
265 }
266 
267 /**
268  *	mmc_start_bkops - start BKOPS for supported cards
269  *	@card: MMC card to start BKOPS
270  *	@form_exception: A flag to indicate if this function was
271  *			 called due to an exception raised by the card
272  *
273  *	Start background operations whenever requested.
274  *	When the urgent BKOPS bit is set in a R1 command response
275  *	then background operations should be started immediately.
276 */
mmc_start_bkops(struct mmc_card * card,bool from_exception)277 void mmc_start_bkops(struct mmc_card *card, bool from_exception)
278 {
279 	int err;
280 	int timeout;
281 	bool use_busy_signal;
282 
283 	BUG_ON(!card);
284 
285 	if (!card->ext_csd.bkops_en || mmc_card_doing_bkops(card))
286 		return;
287 
288 	err = mmc_read_bkops_status(card);
289 	if (err) {
290 		pr_err("%s: Failed to read bkops status: %d\n",
291 		       mmc_hostname(card->host), err);
292 		return;
293 	}
294 
295 	if (!card->ext_csd.raw_bkops_status)
296 		return;
297 
298 	if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
299 	    from_exception)
300 		return;
301 
302 	mmc_claim_host(card->host);
303 	if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
304 		timeout = MMC_BKOPS_MAX_TIMEOUT;
305 		use_busy_signal = true;
306 	} else {
307 		timeout = 0;
308 		use_busy_signal = false;
309 	}
310 
311 	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
312 			EXT_CSD_BKOPS_START, 1, timeout,
313 			use_busy_signal, true, false);
314 	if (err) {
315 		pr_warn("%s: Error %d starting bkops\n",
316 			mmc_hostname(card->host), err);
317 		goto out;
318 	}
319 
320 	/*
321 	 * For urgent bkops status (LEVEL_2 and more)
322 	 * bkops executed synchronously, otherwise
323 	 * the operation is in progress
324 	 */
325 	if (!use_busy_signal)
326 		mmc_card_set_doing_bkops(card);
327 out:
328 	mmc_release_host(card->host);
329 }
330 EXPORT_SYMBOL(mmc_start_bkops);
331 
332 /*
333  * mmc_wait_data_done() - done callback for data request
334  * @mrq: done data request
335  *
336  * Wakes up mmc context, passed as a callback to host controller driver
337  */
mmc_wait_data_done(struct mmc_request * mrq)338 static void mmc_wait_data_done(struct mmc_request *mrq)
339 {
340 	struct mmc_context_info *context_info = &mrq->host->context_info;
341 
342 	context_info->is_done_rcv = true;
343 	wake_up_interruptible(&context_info->wait);
344 }
345 
mmc_wait_done(struct mmc_request * mrq)346 static void mmc_wait_done(struct mmc_request *mrq)
347 {
348 	complete(&mrq->completion);
349 }
350 
351 /*
352  *__mmc_start_data_req() - starts data request
353  * @host: MMC host to start the request
354  * @mrq: data request to start
355  *
356  * Sets the done callback to be called when request is completed by the card.
357  * Starts data mmc request execution
358  */
__mmc_start_data_req(struct mmc_host * host,struct mmc_request * mrq)359 static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
360 {
361 	mrq->done = mmc_wait_data_done;
362 	mrq->host = host;
363 	if (mmc_card_removed(host->card)) {
364 		mrq->cmd->error = -ENOMEDIUM;
365 		mmc_wait_data_done(mrq);
366 		return -ENOMEDIUM;
367 	}
368 	mmc_start_request(host, mrq);
369 
370 	return 0;
371 }
372 
__mmc_start_req(struct mmc_host * host,struct mmc_request * mrq)373 static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
374 {
375 	init_completion(&mrq->completion);
376 	mrq->done = mmc_wait_done;
377 	if (mmc_card_removed(host->card)) {
378 		mrq->cmd->error = -ENOMEDIUM;
379 		complete(&mrq->completion);
380 		return -ENOMEDIUM;
381 	}
382 	mmc_start_request(host, mrq);
383 	return 0;
384 }
385 
386 /*
387  * mmc_wait_for_data_req_done() - wait for request completed
388  * @host: MMC host to prepare the command.
389  * @mrq: MMC request to wait for
390  *
391  * Blocks MMC context till host controller will ack end of data request
392  * execution or new request notification arrives from the block layer.
393  * Handles command retries.
394  *
395  * Returns enum mmc_blk_status after checking errors.
396  */
mmc_wait_for_data_req_done(struct mmc_host * host,struct mmc_request * mrq,struct mmc_async_req * next_req)397 static int mmc_wait_for_data_req_done(struct mmc_host *host,
398 				      struct mmc_request *mrq,
399 				      struct mmc_async_req *next_req)
400 {
401 	struct mmc_command *cmd;
402 	struct mmc_context_info *context_info = &host->context_info;
403 	int err;
404 	unsigned long flags;
405 
406 	while (1) {
407 		wait_event_interruptible(context_info->wait,
408 				(context_info->is_done_rcv ||
409 				 context_info->is_new_req));
410 		spin_lock_irqsave(&context_info->lock, flags);
411 		context_info->is_waiting_last_req = false;
412 		spin_unlock_irqrestore(&context_info->lock, flags);
413 		if (context_info->is_done_rcv) {
414 			context_info->is_done_rcv = false;
415 			context_info->is_new_req = false;
416 			cmd = mrq->cmd;
417 
418 			if (!cmd->error || !cmd->retries ||
419 			    mmc_card_removed(host->card)) {
420 				err = host->areq->err_check(host->card,
421 							    host->areq);
422 				break; /* return err */
423 			} else {
424 				pr_info("%s: req failed (CMD%u): %d, retrying...\n",
425 					mmc_hostname(host),
426 					cmd->opcode, cmd->error);
427 				cmd->retries--;
428 				cmd->error = 0;
429 				host->ops->request(host, mrq);
430 				continue; /* wait for done/new event again */
431 			}
432 		} else if (context_info->is_new_req) {
433 			context_info->is_new_req = false;
434 			if (!next_req) {
435 				err = MMC_BLK_NEW_REQUEST;
436 				break; /* return err */
437 			}
438 		}
439 	}
440 	return err;
441 }
442 
mmc_wait_for_req_done(struct mmc_host * host,struct mmc_request * mrq)443 static void mmc_wait_for_req_done(struct mmc_host *host,
444 				  struct mmc_request *mrq)
445 {
446 	struct mmc_command *cmd;
447 
448 	while (1) {
449 		wait_for_completion(&mrq->completion);
450 
451 		cmd = mrq->cmd;
452 
453 		/*
454 		 * If host has timed out waiting for the sanitize
455 		 * to complete, card might be still in programming state
456 		 * so let's try to bring the card out of programming
457 		 * state.
458 		 */
459 		if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) {
460 			if (!mmc_interrupt_hpi(host->card)) {
461 				pr_warn("%s: %s: Interrupted sanitize\n",
462 					mmc_hostname(host), __func__);
463 				cmd->error = 0;
464 				break;
465 			} else {
466 				pr_err("%s: %s: Failed to interrupt sanitize\n",
467 				       mmc_hostname(host), __func__);
468 			}
469 		}
470 		if (!cmd->error || !cmd->retries ||
471 		    mmc_card_removed(host->card))
472 			break;
473 
474 		pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
475 			 mmc_hostname(host), cmd->opcode, cmd->error);
476 		cmd->retries--;
477 		cmd->error = 0;
478 		host->ops->request(host, mrq);
479 	}
480 }
481 
482 /**
483  *	mmc_pre_req - Prepare for a new request
484  *	@host: MMC host to prepare command
485  *	@mrq: MMC request to prepare for
486  *	@is_first_req: true if there is no previous started request
487  *                     that may run in parellel to this call, otherwise false
488  *
489  *	mmc_pre_req() is called in prior to mmc_start_req() to let
490  *	host prepare for the new request. Preparation of a request may be
491  *	performed while another request is running on the host.
492  */
mmc_pre_req(struct mmc_host * host,struct mmc_request * mrq,bool is_first_req)493 static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
494 		 bool is_first_req)
495 {
496 	if (host->ops->pre_req) {
497 		mmc_host_clk_hold(host);
498 		host->ops->pre_req(host, mrq, is_first_req);
499 		mmc_host_clk_release(host);
500 	}
501 }
502 
503 /**
504  *	mmc_post_req - Post process a completed request
505  *	@host: MMC host to post process command
506  *	@mrq: MMC request to post process for
507  *	@err: Error, if non zero, clean up any resources made in pre_req
508  *
509  *	Let the host post process a completed request. Post processing of
510  *	a request may be performed while another reuqest is running.
511  */
mmc_post_req(struct mmc_host * host,struct mmc_request * mrq,int err)512 static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
513 			 int err)
514 {
515 	if (host->ops->post_req) {
516 		mmc_host_clk_hold(host);
517 		host->ops->post_req(host, mrq, err);
518 		mmc_host_clk_release(host);
519 	}
520 }
521 
522 /**
523  *	mmc_start_req - start a non-blocking request
524  *	@host: MMC host to start command
525  *	@areq: async request to start
526  *	@error: out parameter returns 0 for success, otherwise non zero
527  *
528  *	Start a new MMC custom command request for a host.
529  *	If there is on ongoing async request wait for completion
530  *	of that request and start the new one and return.
531  *	Does not wait for the new request to complete.
532  *
533  *      Returns the completed request, NULL in case of none completed.
534  *	Wait for the an ongoing request (previoulsy started) to complete and
535  *	return the completed request. If there is no ongoing request, NULL
536  *	is returned without waiting. NULL is not an error condition.
537  */
mmc_start_req(struct mmc_host * host,struct mmc_async_req * areq,int * error)538 struct mmc_async_req *mmc_start_req(struct mmc_host *host,
539 				    struct mmc_async_req *areq, int *error)
540 {
541 	int err = 0;
542 	int start_err = 0;
543 	struct mmc_async_req *data = host->areq;
544 
545 	/* Prepare a new request */
546 	if (areq)
547 		mmc_pre_req(host, areq->mrq, !host->areq);
548 
549 	if (host->areq) {
550 		err = mmc_wait_for_data_req_done(host, host->areq->mrq,	areq);
551 		if (err == MMC_BLK_NEW_REQUEST) {
552 			if (error)
553 				*error = err;
554 			/*
555 			 * The previous request was not completed,
556 			 * nothing to return
557 			 */
558 			return NULL;
559 		}
560 		/*
561 		 * Check BKOPS urgency for each R1 response
562 		 */
563 		if (host->card && mmc_card_mmc(host->card) &&
564 		    ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
565 		     (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
566 		    (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT))
567 			mmc_start_bkops(host->card, true);
568 	}
569 
570 	if (!err && areq) {
571 #ifdef CONFIG_BLOCK
572 		if (host->latency_hist_enabled) {
573 			areq->mrq->io_start = ktime_get();
574 			areq->mrq->lat_hist_enabled = 1;
575 		} else
576 			areq->mrq->lat_hist_enabled = 0;
577 #endif
578 		trace_mmc_blk_rw_start(areq->mrq->cmd->opcode,
579 				       areq->mrq->cmd->arg,
580 				       areq->mrq->data);
581 		start_err = __mmc_start_data_req(host, areq->mrq);
582 	}
583 
584 	if (host->areq)
585 		mmc_post_req(host, host->areq->mrq, 0);
586 
587 	 /* Cancel a prepared request if it was not started. */
588 	if ((err || start_err) && areq)
589 		mmc_post_req(host, areq->mrq, -EINVAL);
590 
591 	if (err)
592 		host->areq = NULL;
593 	else
594 		host->areq = areq;
595 
596 	if (error)
597 		*error = err;
598 	return data;
599 }
600 EXPORT_SYMBOL(mmc_start_req);
601 
602 /**
603  *	mmc_wait_for_req - start a request and wait for completion
604  *	@host: MMC host to start command
605  *	@mrq: MMC request to start
606  *
607  *	Start a new MMC custom command request for a host, and wait
608  *	for the command to complete. Does not attempt to parse the
609  *	response.
610  */
mmc_wait_for_req(struct mmc_host * host,struct mmc_request * mrq)611 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
612 {
613 	__mmc_start_req(host, mrq);
614 	mmc_wait_for_req_done(host, mrq);
615 }
616 EXPORT_SYMBOL(mmc_wait_for_req);
617 
618 /**
619  *	mmc_interrupt_hpi - Issue for High priority Interrupt
620  *	@card: the MMC card associated with the HPI transfer
621  *
622  *	Issued High Priority Interrupt, and check for card status
623  *	until out-of prg-state.
624  */
mmc_interrupt_hpi(struct mmc_card * card)625 int mmc_interrupt_hpi(struct mmc_card *card)
626 {
627 	int err;
628 	u32 status;
629 	unsigned long prg_wait;
630 
631 	BUG_ON(!card);
632 
633 	if (!card->ext_csd.hpi_en) {
634 		pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
635 		return 1;
636 	}
637 
638 	mmc_claim_host(card->host);
639 	err = mmc_send_status(card, &status);
640 	if (err) {
641 		pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
642 		goto out;
643 	}
644 
645 	switch (R1_CURRENT_STATE(status)) {
646 	case R1_STATE_IDLE:
647 	case R1_STATE_READY:
648 	case R1_STATE_STBY:
649 	case R1_STATE_TRAN:
650 		/*
651 		 * In idle and transfer states, HPI is not needed and the caller
652 		 * can issue the next intended command immediately
653 		 */
654 		goto out;
655 	case R1_STATE_PRG:
656 		break;
657 	default:
658 		/* In all other states, it's illegal to issue HPI */
659 		pr_debug("%s: HPI cannot be sent. Card state=%d\n",
660 			mmc_hostname(card->host), R1_CURRENT_STATE(status));
661 		err = -EINVAL;
662 		goto out;
663 	}
664 
665 	err = mmc_send_hpi_cmd(card, &status);
666 	if (err)
667 		goto out;
668 
669 	prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
670 	do {
671 		err = mmc_send_status(card, &status);
672 
673 		if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
674 			break;
675 		if (time_after(jiffies, prg_wait))
676 			err = -ETIMEDOUT;
677 	} while (!err);
678 
679 out:
680 	mmc_release_host(card->host);
681 	return err;
682 }
683 EXPORT_SYMBOL(mmc_interrupt_hpi);
684 
685 /**
686  *	mmc_wait_for_cmd - start a command and wait for completion
687  *	@host: MMC host to start command
688  *	@cmd: MMC command to start
689  *	@retries: maximum number of retries
690  *
691  *	Start a new MMC command for a host, and wait for the command
692  *	to complete.  Return any error that occurred while the command
693  *	was executing.  Do not attempt to parse the response.
694  */
mmc_wait_for_cmd(struct mmc_host * host,struct mmc_command * cmd,int retries)695 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
696 {
697 	struct mmc_request mrq = {NULL};
698 
699 	WARN_ON(!host->claimed);
700 
701 	memset(cmd->resp, 0, sizeof(cmd->resp));
702 	cmd->retries = retries;
703 
704 	mrq.cmd = cmd;
705 	cmd->data = NULL;
706 
707 	mmc_wait_for_req(host, &mrq);
708 
709 	return cmd->error;
710 }
711 
712 EXPORT_SYMBOL(mmc_wait_for_cmd);
713 
714 /**
715  *	mmc_stop_bkops - stop ongoing BKOPS
716  *	@card: MMC card to check BKOPS
717  *
718  *	Send HPI command to stop ongoing background operations to
719  *	allow rapid servicing of foreground operations, e.g. read/
720  *	writes. Wait until the card comes out of the programming state
721  *	to avoid errors in servicing read/write requests.
722  */
mmc_stop_bkops(struct mmc_card * card)723 int mmc_stop_bkops(struct mmc_card *card)
724 {
725 	int err = 0;
726 
727 	BUG_ON(!card);
728 	err = mmc_interrupt_hpi(card);
729 
730 	/*
731 	 * If err is EINVAL, we can't issue an HPI.
732 	 * It should complete the BKOPS.
733 	 */
734 	if (!err || (err == -EINVAL)) {
735 		mmc_card_clr_doing_bkops(card);
736 		err = 0;
737 	}
738 
739 	return err;
740 }
741 EXPORT_SYMBOL(mmc_stop_bkops);
742 
mmc_read_bkops_status(struct mmc_card * card)743 int mmc_read_bkops_status(struct mmc_card *card)
744 {
745 	int err;
746 	u8 *ext_csd;
747 
748 	/*
749 	 * In future work, we should consider storing the entire ext_csd.
750 	 */
751 	ext_csd = kmalloc(512, GFP_KERNEL);
752 	if (!ext_csd) {
753 		pr_err("%s: could not allocate buffer to receive the ext_csd.\n",
754 		       mmc_hostname(card->host));
755 		return -ENOMEM;
756 	}
757 
758 	mmc_claim_host(card->host);
759 	err = mmc_send_ext_csd(card, ext_csd);
760 	mmc_release_host(card->host);
761 	if (err)
762 		goto out;
763 
764 	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
765 	card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
766 out:
767 	kfree(ext_csd);
768 	return err;
769 }
770 EXPORT_SYMBOL(mmc_read_bkops_status);
771 
772 /**
773  *	mmc_set_data_timeout - set the timeout for a data command
774  *	@data: data phase for command
775  *	@card: the MMC card associated with the data transfer
776  *
777  *	Computes the data timeout parameters according to the
778  *	correct algorithm given the card type.
779  */
mmc_set_data_timeout(struct mmc_data * data,const struct mmc_card * card)780 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
781 {
782 	unsigned int mult;
783 
784 	/*
785 	 * SDIO cards only define an upper 1 s limit on access.
786 	 */
787 	if (mmc_card_sdio(card)) {
788 		data->timeout_ns = 1000000000;
789 		data->timeout_clks = 0;
790 		return;
791 	}
792 
793 	/*
794 	 * SD cards use a 100 multiplier rather than 10
795 	 */
796 	mult = mmc_card_sd(card) ? 100 : 10;
797 
798 	/*
799 	 * Scale up the multiplier (and therefore the timeout) by
800 	 * the r2w factor for writes.
801 	 */
802 	if (data->flags & MMC_DATA_WRITE)
803 		mult <<= card->csd.r2w_factor;
804 
805 	data->timeout_ns = card->csd.tacc_ns * mult;
806 	data->timeout_clks = card->csd.tacc_clks * mult;
807 
808 	/*
809 	 * SD cards also have an upper limit on the timeout.
810 	 */
811 	if (mmc_card_sd(card)) {
812 		unsigned int timeout_us, limit_us;
813 
814 		timeout_us = data->timeout_ns / 1000;
815 		if (mmc_host_clk_rate(card->host))
816 			timeout_us += data->timeout_clks * 1000 /
817 				(mmc_host_clk_rate(card->host) / 1000);
818 
819 		if (data->flags & MMC_DATA_WRITE)
820 			/*
821 			 * The MMC spec "It is strongly recommended
822 			 * for hosts to implement more than 500ms
823 			 * timeout value even if the card indicates
824 			 * the 250ms maximum busy length."  Even the
825 			 * previous value of 300ms is known to be
826 			 * insufficient for some cards.
827 			 */
828 			limit_us = 3000000;
829 		else
830 			limit_us = 100000;
831 
832 		/*
833 		 * SDHC cards always use these fixed values.
834 		 */
835 		if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
836 			data->timeout_ns = limit_us * 1000;
837 			data->timeout_clks = 0;
838 		}
839 
840 		/* assign limit value if invalid */
841 		if (timeout_us == 0)
842 			data->timeout_ns = limit_us * 1000;
843 	}
844 
845 	/*
846 	 * Some cards require longer data read timeout than indicated in CSD.
847 	 * Address this by setting the read timeout to a "reasonably high"
848 	 * value. For the cards tested, 600ms has proven enough. If necessary,
849 	 * this value can be increased if other problematic cards require this.
850 	 */
851 	if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
852 		data->timeout_ns = 600000000;
853 		data->timeout_clks = 0;
854 	}
855 
856 	/*
857 	 * Some cards need very high timeouts if driven in SPI mode.
858 	 * The worst observed timeout was 900ms after writing a
859 	 * continuous stream of data until the internal logic
860 	 * overflowed.
861 	 */
862 	if (mmc_host_is_spi(card->host)) {
863 		if (data->flags & MMC_DATA_WRITE) {
864 			if (data->timeout_ns < 1000000000)
865 				data->timeout_ns = 1000000000;	/* 1s */
866 		} else {
867 			if (data->timeout_ns < 100000000)
868 				data->timeout_ns =  100000000;	/* 100ms */
869 		}
870 	}
871 }
872 EXPORT_SYMBOL(mmc_set_data_timeout);
873 
874 /**
875  *	mmc_align_data_size - pads a transfer size to a more optimal value
876  *	@card: the MMC card associated with the data transfer
877  *	@sz: original transfer size
878  *
879  *	Pads the original data size with a number of extra bytes in
880  *	order to avoid controller bugs and/or performance hits
881  *	(e.g. some controllers revert to PIO for certain sizes).
882  *
883  *	Returns the improved size, which might be unmodified.
884  *
885  *	Note that this function is only relevant when issuing a
886  *	single scatter gather entry.
887  */
mmc_align_data_size(struct mmc_card * card,unsigned int sz)888 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
889 {
890 	/*
891 	 * FIXME: We don't have a system for the controller to tell
892 	 * the core about its problems yet, so for now we just 32-bit
893 	 * align the size.
894 	 */
895 	sz = ((sz + 3) / 4) * 4;
896 
897 	return sz;
898 }
899 EXPORT_SYMBOL(mmc_align_data_size);
900 
901 /**
902  *	__mmc_claim_host - exclusively claim a host
903  *	@host: mmc host to claim
904  *	@abort: whether or not the operation should be aborted
905  *
906  *	Claim a host for a set of operations.  If @abort is non null and
907  *	dereference a non-zero value then this will return prematurely with
908  *	that non-zero value without acquiring the lock.  Returns zero
909  *	with the lock held otherwise.
910  */
__mmc_claim_host(struct mmc_host * host,atomic_t * abort)911 int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
912 {
913 	DECLARE_WAITQUEUE(wait, current);
914 	unsigned long flags;
915 	int stop;
916 
917 	might_sleep();
918 
919 	add_wait_queue(&host->wq, &wait);
920 	spin_lock_irqsave(&host->lock, flags);
921 	while (1) {
922 		set_current_state(TASK_UNINTERRUPTIBLE);
923 		stop = abort ? atomic_read(abort) : 0;
924 		if (stop || !host->claimed || host->claimer == current)
925 			break;
926 		spin_unlock_irqrestore(&host->lock, flags);
927 		schedule();
928 		spin_lock_irqsave(&host->lock, flags);
929 	}
930 	set_current_state(TASK_RUNNING);
931 	if (!stop) {
932 		host->claimed = 1;
933 		host->claimer = current;
934 		host->claim_cnt += 1;
935 	} else
936 		wake_up(&host->wq);
937 	spin_unlock_irqrestore(&host->lock, flags);
938 	remove_wait_queue(&host->wq, &wait);
939 	if (host->ops->enable && !stop && host->claim_cnt == 1)
940 		host->ops->enable(host);
941 	return stop;
942 }
943 
944 EXPORT_SYMBOL(__mmc_claim_host);
945 
946 /**
947  *	mmc_release_host - release a host
948  *	@host: mmc host to release
949  *
950  *	Release a MMC host, allowing others to claim the host
951  *	for their operations.
952  */
mmc_release_host(struct mmc_host * host)953 void mmc_release_host(struct mmc_host *host)
954 {
955 	unsigned long flags;
956 
957 	WARN_ON(!host->claimed);
958 
959 	if (host->ops->disable && host->claim_cnt == 1)
960 		host->ops->disable(host);
961 
962 	spin_lock_irqsave(&host->lock, flags);
963 	if (--host->claim_cnt) {
964 		/* Release for nested claim */
965 		spin_unlock_irqrestore(&host->lock, flags);
966 	} else {
967 		host->claimed = 0;
968 		host->claimer = NULL;
969 		spin_unlock_irqrestore(&host->lock, flags);
970 		wake_up(&host->wq);
971 	}
972 }
973 EXPORT_SYMBOL(mmc_release_host);
974 
975 /*
976  * This is a helper function, which fetches a runtime pm reference for the
977  * card device and also claims the host.
978  */
mmc_get_card(struct mmc_card * card)979 void mmc_get_card(struct mmc_card *card)
980 {
981 	pm_runtime_get_sync(&card->dev);
982 	mmc_claim_host(card->host);
983 }
984 EXPORT_SYMBOL(mmc_get_card);
985 
986 /*
987  * This is a helper function, which releases the host and drops the runtime
988  * pm reference for the card device.
989  */
mmc_put_card(struct mmc_card * card)990 void mmc_put_card(struct mmc_card *card)
991 {
992 	mmc_release_host(card->host);
993 	pm_runtime_mark_last_busy(&card->dev);
994 	pm_runtime_put_autosuspend(&card->dev);
995 }
996 EXPORT_SYMBOL(mmc_put_card);
997 
998 /*
999  * Internal function that does the actual ios call to the host driver,
1000  * optionally printing some debug output.
1001  */
mmc_set_ios(struct mmc_host * host)1002 static inline void mmc_set_ios(struct mmc_host *host)
1003 {
1004 	struct mmc_ios *ios = &host->ios;
1005 
1006 	pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
1007 		"width %u timing %u\n",
1008 		 mmc_hostname(host), ios->clock, ios->bus_mode,
1009 		 ios->power_mode, ios->chip_select, ios->vdd,
1010 		 ios->bus_width, ios->timing);
1011 
1012 	if (ios->clock > 0)
1013 		mmc_set_ungated(host);
1014 	host->ops->set_ios(host, ios);
1015 }
1016 
1017 /*
1018  * Control chip select pin on a host.
1019  */
mmc_set_chip_select(struct mmc_host * host,int mode)1020 void mmc_set_chip_select(struct mmc_host *host, int mode)
1021 {
1022 	mmc_host_clk_hold(host);
1023 	host->ios.chip_select = mode;
1024 	mmc_set_ios(host);
1025 	mmc_host_clk_release(host);
1026 }
1027 
1028 /*
1029  * Sets the host clock to the highest possible frequency that
1030  * is below "hz".
1031  */
__mmc_set_clock(struct mmc_host * host,unsigned int hz)1032 static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
1033 {
1034 	WARN_ON(hz && hz < host->f_min);
1035 
1036 	if (hz > host->f_max)
1037 		hz = host->f_max;
1038 
1039 	host->ios.clock = hz;
1040 	mmc_set_ios(host);
1041 }
1042 
mmc_set_clock(struct mmc_host * host,unsigned int hz)1043 void mmc_set_clock(struct mmc_host *host, unsigned int hz)
1044 {
1045 	mmc_host_clk_hold(host);
1046 	__mmc_set_clock(host, hz);
1047 	mmc_host_clk_release(host);
1048 }
1049 
1050 #ifdef CONFIG_MMC_CLKGATE
1051 /*
1052  * This gates the clock by setting it to 0 Hz.
1053  */
mmc_gate_clock(struct mmc_host * host)1054 void mmc_gate_clock(struct mmc_host *host)
1055 {
1056 	unsigned long flags;
1057 
1058 	spin_lock_irqsave(&host->clk_lock, flags);
1059 	host->clk_old = host->ios.clock;
1060 	host->ios.clock = 0;
1061 	host->clk_gated = true;
1062 	spin_unlock_irqrestore(&host->clk_lock, flags);
1063 	mmc_set_ios(host);
1064 }
1065 
1066 /*
1067  * This restores the clock from gating by using the cached
1068  * clock value.
1069  */
mmc_ungate_clock(struct mmc_host * host)1070 void mmc_ungate_clock(struct mmc_host *host)
1071 {
1072 	/*
1073 	 * We should previously have gated the clock, so the clock shall
1074 	 * be 0 here! The clock may however be 0 during initialization,
1075 	 * when some request operations are performed before setting
1076 	 * the frequency. When ungate is requested in that situation
1077 	 * we just ignore the call.
1078 	 */
1079 	if (host->clk_old) {
1080 		BUG_ON(host->ios.clock);
1081 		/* This call will also set host->clk_gated to false */
1082 		__mmc_set_clock(host, host->clk_old);
1083 	}
1084 }
1085 
mmc_set_ungated(struct mmc_host * host)1086 void mmc_set_ungated(struct mmc_host *host)
1087 {
1088 	unsigned long flags;
1089 
1090 	/*
1091 	 * We've been given a new frequency while the clock is gated,
1092 	 * so make sure we regard this as ungating it.
1093 	 */
1094 	spin_lock_irqsave(&host->clk_lock, flags);
1095 	host->clk_gated = false;
1096 	spin_unlock_irqrestore(&host->clk_lock, flags);
1097 }
1098 
1099 #else
mmc_set_ungated(struct mmc_host * host)1100 void mmc_set_ungated(struct mmc_host *host)
1101 {
1102 }
1103 #endif
1104 
mmc_execute_tuning(struct mmc_card * card)1105 int mmc_execute_tuning(struct mmc_card *card)
1106 {
1107 	struct mmc_host *host = card->host;
1108 	u32 opcode;
1109 	int err;
1110 
1111 	if (!host->ops->execute_tuning)
1112 		return 0;
1113 
1114 	if (mmc_card_mmc(card))
1115 		opcode = MMC_SEND_TUNING_BLOCK_HS200;
1116 	else
1117 		opcode = MMC_SEND_TUNING_BLOCK;
1118 
1119 	mmc_host_clk_hold(host);
1120 	err = host->ops->execute_tuning(host, opcode);
1121 	mmc_host_clk_release(host);
1122 
1123 	if (err)
1124 		pr_err("%s: tuning execution failed\n", mmc_hostname(host));
1125 
1126 	return err;
1127 }
1128 
1129 /*
1130  * Change the bus mode (open drain/push-pull) of a host.
1131  */
mmc_set_bus_mode(struct mmc_host * host,unsigned int mode)1132 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
1133 {
1134 	mmc_host_clk_hold(host);
1135 	host->ios.bus_mode = mode;
1136 	mmc_set_ios(host);
1137 	mmc_host_clk_release(host);
1138 }
1139 
1140 /*
1141  * Change data bus width of a host.
1142  */
mmc_set_bus_width(struct mmc_host * host,unsigned int width)1143 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
1144 {
1145 	mmc_host_clk_hold(host);
1146 	host->ios.bus_width = width;
1147 	mmc_set_ios(host);
1148 	mmc_host_clk_release(host);
1149 }
1150 
1151 /**
1152  * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
1153  * @vdd:	voltage (mV)
1154  * @low_bits:	prefer low bits in boundary cases
1155  *
1156  * This function returns the OCR bit number according to the provided @vdd
1157  * value. If conversion is not possible a negative errno value returned.
1158  *
1159  * Depending on the @low_bits flag the function prefers low or high OCR bits
1160  * on boundary voltages. For example,
1161  * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
1162  * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
1163  *
1164  * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
1165  */
mmc_vdd_to_ocrbitnum(int vdd,bool low_bits)1166 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
1167 {
1168 	const int max_bit = ilog2(MMC_VDD_35_36);
1169 	int bit;
1170 
1171 	if (vdd < 1650 || vdd > 3600)
1172 		return -EINVAL;
1173 
1174 	if (vdd >= 1650 && vdd <= 1950)
1175 		return ilog2(MMC_VDD_165_195);
1176 
1177 	if (low_bits)
1178 		vdd -= 1;
1179 
1180 	/* Base 2000 mV, step 100 mV, bit's base 8. */
1181 	bit = (vdd - 2000) / 100 + 8;
1182 	if (bit > max_bit)
1183 		return max_bit;
1184 	return bit;
1185 }
1186 
1187 /**
1188  * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
1189  * @vdd_min:	minimum voltage value (mV)
1190  * @vdd_max:	maximum voltage value (mV)
1191  *
1192  * This function returns the OCR mask bits according to the provided @vdd_min
1193  * and @vdd_max values. If conversion is not possible the function returns 0.
1194  *
1195  * Notes wrt boundary cases:
1196  * This function sets the OCR bits for all boundary voltages, for example
1197  * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
1198  * MMC_VDD_34_35 mask.
1199  */
mmc_vddrange_to_ocrmask(int vdd_min,int vdd_max)1200 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
1201 {
1202 	u32 mask = 0;
1203 
1204 	if (vdd_max < vdd_min)
1205 		return 0;
1206 
1207 	/* Prefer high bits for the boundary vdd_max values. */
1208 	vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
1209 	if (vdd_max < 0)
1210 		return 0;
1211 
1212 	/* Prefer low bits for the boundary vdd_min values. */
1213 	vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
1214 	if (vdd_min < 0)
1215 		return 0;
1216 
1217 	/* Fill the mask, from max bit to min bit. */
1218 	while (vdd_max >= vdd_min)
1219 		mask |= 1 << vdd_max--;
1220 
1221 	return mask;
1222 }
1223 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
1224 
1225 #ifdef CONFIG_OF
1226 
1227 /**
1228  * mmc_of_parse_voltage - return mask of supported voltages
1229  * @np: The device node need to be parsed.
1230  * @mask: mask of voltages available for MMC/SD/SDIO
1231  *
1232  * 1. Return zero on success.
1233  * 2. Return negative errno: voltage-range is invalid.
1234  */
mmc_of_parse_voltage(struct device_node * np,u32 * mask)1235 int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
1236 {
1237 	const u32 *voltage_ranges;
1238 	int num_ranges, i;
1239 
1240 	voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
1241 	num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
1242 	if (!voltage_ranges || !num_ranges) {
1243 		pr_info("%s: voltage-ranges unspecified\n", np->full_name);
1244 		return -EINVAL;
1245 	}
1246 
1247 	for (i = 0; i < num_ranges; i++) {
1248 		const int j = i * 2;
1249 		u32 ocr_mask;
1250 
1251 		ocr_mask = mmc_vddrange_to_ocrmask(
1252 				be32_to_cpu(voltage_ranges[j]),
1253 				be32_to_cpu(voltage_ranges[j + 1]));
1254 		if (!ocr_mask) {
1255 			pr_err("%s: voltage-range #%d is invalid\n",
1256 				np->full_name, i);
1257 			return -EINVAL;
1258 		}
1259 		*mask |= ocr_mask;
1260 	}
1261 
1262 	return 0;
1263 }
1264 EXPORT_SYMBOL(mmc_of_parse_voltage);
1265 
1266 #endif /* CONFIG_OF */
1267 
mmc_of_get_func_num(struct device_node * node)1268 static int mmc_of_get_func_num(struct device_node *node)
1269 {
1270 	u32 reg;
1271 	int ret;
1272 
1273 	ret = of_property_read_u32(node, "reg", &reg);
1274 	if (ret < 0)
1275 		return ret;
1276 
1277 	return reg;
1278 }
1279 
mmc_of_find_child_device(struct mmc_host * host,unsigned func_num)1280 struct device_node *mmc_of_find_child_device(struct mmc_host *host,
1281 		unsigned func_num)
1282 {
1283 	struct device_node *node;
1284 
1285 	if (!host->parent || !host->parent->of_node)
1286 		return NULL;
1287 
1288 	for_each_child_of_node(host->parent->of_node, node) {
1289 		if (mmc_of_get_func_num(node) == func_num)
1290 			return node;
1291 	}
1292 
1293 	return NULL;
1294 }
1295 
1296 #ifdef CONFIG_REGULATOR
1297 
1298 /**
1299  * mmc_regulator_get_ocrmask - return mask of supported voltages
1300  * @supply: regulator to use
1301  *
1302  * This returns either a negative errno, or a mask of voltages that
1303  * can be provided to MMC/SD/SDIO devices using the specified voltage
1304  * regulator.  This would normally be called before registering the
1305  * MMC host adapter.
1306  */
mmc_regulator_get_ocrmask(struct regulator * supply)1307 int mmc_regulator_get_ocrmask(struct regulator *supply)
1308 {
1309 	int			result = 0;
1310 	int			count;
1311 	int			i;
1312 	int			vdd_uV;
1313 	int			vdd_mV;
1314 
1315 	count = regulator_count_voltages(supply);
1316 	if (count < 0)
1317 		return count;
1318 
1319 	for (i = 0; i < count; i++) {
1320 		vdd_uV = regulator_list_voltage(supply, i);
1321 		if (vdd_uV <= 0)
1322 			continue;
1323 
1324 		vdd_mV = vdd_uV / 1000;
1325 		result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1326 	}
1327 
1328 	if (!result) {
1329 		vdd_uV = regulator_get_voltage(supply);
1330 		if (vdd_uV <= 0)
1331 			return vdd_uV;
1332 
1333 		vdd_mV = vdd_uV / 1000;
1334 		result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1335 	}
1336 
1337 	return result;
1338 }
1339 EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
1340 
1341 /**
1342  * mmc_regulator_set_ocr - set regulator to match host->ios voltage
1343  * @mmc: the host to regulate
1344  * @supply: regulator to use
1345  * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
1346  *
1347  * Returns zero on success, else negative errno.
1348  *
1349  * MMC host drivers may use this to enable or disable a regulator using
1350  * a particular supply voltage.  This would normally be called from the
1351  * set_ios() method.
1352  */
mmc_regulator_set_ocr(struct mmc_host * mmc,struct regulator * supply,unsigned short vdd_bit)1353 int mmc_regulator_set_ocr(struct mmc_host *mmc,
1354 			struct regulator *supply,
1355 			unsigned short vdd_bit)
1356 {
1357 	int			result = 0;
1358 	int			min_uV, max_uV;
1359 
1360 	if (vdd_bit) {
1361 		int		tmp;
1362 
1363 		/*
1364 		 * REVISIT mmc_vddrange_to_ocrmask() may have set some
1365 		 * bits this regulator doesn't quite support ... don't
1366 		 * be too picky, most cards and regulators are OK with
1367 		 * a 0.1V range goof (it's a small error percentage).
1368 		 */
1369 		tmp = vdd_bit - ilog2(MMC_VDD_165_195);
1370 		if (tmp == 0) {
1371 			min_uV = 1650 * 1000;
1372 			max_uV = 1950 * 1000;
1373 		} else {
1374 			min_uV = 1900 * 1000 + tmp * 100 * 1000;
1375 			max_uV = min_uV + 100 * 1000;
1376 		}
1377 
1378 		result = regulator_set_voltage(supply, min_uV, max_uV);
1379 		if (result == 0 && !mmc->regulator_enabled) {
1380 			result = regulator_enable(supply);
1381 			if (!result)
1382 				mmc->regulator_enabled = true;
1383 		}
1384 	} else if (mmc->regulator_enabled) {
1385 		result = regulator_disable(supply);
1386 		if (result == 0)
1387 			mmc->regulator_enabled = false;
1388 	}
1389 
1390 	if (result)
1391 		dev_err(mmc_dev(mmc),
1392 			"could not set regulator OCR (%d)\n", result);
1393 	return result;
1394 }
1395 EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
1396 
1397 #endif /* CONFIG_REGULATOR */
1398 
mmc_regulator_get_supply(struct mmc_host * mmc)1399 int mmc_regulator_get_supply(struct mmc_host *mmc)
1400 {
1401 	struct device *dev = mmc_dev(mmc);
1402 	int ret;
1403 
1404 	mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
1405 	mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
1406 
1407 	if (IS_ERR(mmc->supply.vmmc)) {
1408 		if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
1409 			return -EPROBE_DEFER;
1410 		dev_info(dev, "No vmmc regulator found\n");
1411 	} else {
1412 		ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
1413 		if (ret > 0)
1414 			mmc->ocr_avail = ret;
1415 		else
1416 			dev_warn(dev, "Failed getting OCR mask: %d\n", ret);
1417 	}
1418 
1419 	if (IS_ERR(mmc->supply.vqmmc)) {
1420 		if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
1421 			return -EPROBE_DEFER;
1422 		dev_info(dev, "No vqmmc regulator found\n");
1423 	}
1424 
1425 	return 0;
1426 }
1427 EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
1428 
1429 /*
1430  * Mask off any voltages we don't support and select
1431  * the lowest voltage
1432  */
mmc_select_voltage(struct mmc_host * host,u32 ocr)1433 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1434 {
1435 	int bit;
1436 
1437 	/*
1438 	 * Sanity check the voltages that the card claims to
1439 	 * support.
1440 	 */
1441 	if (ocr & 0x7F) {
1442 		dev_warn(mmc_dev(host),
1443 		"card claims to support voltages below defined range\n");
1444 		ocr &= ~0x7F;
1445 	}
1446 
1447 	ocr &= host->ocr_avail;
1448 	if (!ocr) {
1449 		dev_warn(mmc_dev(host), "no support for card's volts\n");
1450 		return 0;
1451 	}
1452 
1453 	if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
1454 		bit = ffs(ocr) - 1;
1455 		ocr &= 3 << bit;
1456 		mmc_power_cycle(host, ocr);
1457 	} else {
1458 		bit = fls(ocr) - 1;
1459 		ocr &= 3 << bit;
1460 		if (bit != host->ios.vdd)
1461 			dev_warn(mmc_dev(host), "exceeding card's volts\n");
1462 	}
1463 
1464 	return ocr;
1465 }
1466 
__mmc_set_signal_voltage(struct mmc_host * host,int signal_voltage)1467 int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
1468 {
1469 	int err = 0;
1470 	int old_signal_voltage = host->ios.signal_voltage;
1471 
1472 	host->ios.signal_voltage = signal_voltage;
1473 	if (host->ops->start_signal_voltage_switch) {
1474 		mmc_host_clk_hold(host);
1475 		err = host->ops->start_signal_voltage_switch(host, &host->ios);
1476 		mmc_host_clk_release(host);
1477 	}
1478 
1479 	if (err)
1480 		host->ios.signal_voltage = old_signal_voltage;
1481 
1482 	return err;
1483 
1484 }
1485 
mmc_set_signal_voltage(struct mmc_host * host,int signal_voltage,u32 ocr)1486 int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
1487 {
1488 	struct mmc_command cmd = {0};
1489 	int err = 0;
1490 	u32 clock;
1491 
1492 	BUG_ON(!host);
1493 
1494 	/*
1495 	 * Send CMD11 only if the request is to switch the card to
1496 	 * 1.8V signalling.
1497 	 */
1498 	if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1499 		return __mmc_set_signal_voltage(host, signal_voltage);
1500 
1501 	/*
1502 	 * If we cannot switch voltages, return failure so the caller
1503 	 * can continue without UHS mode
1504 	 */
1505 	if (!host->ops->start_signal_voltage_switch)
1506 		return -EPERM;
1507 	if (!host->ops->card_busy)
1508 		pr_warn("%s: cannot verify signal voltage switch\n",
1509 			mmc_hostname(host));
1510 
1511 	cmd.opcode = SD_SWITCH_VOLTAGE;
1512 	cmd.arg = 0;
1513 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1514 
1515 	err = mmc_wait_for_cmd(host, &cmd, 0);
1516 	if (err)
1517 		return err;
1518 
1519 	if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
1520 		return -EIO;
1521 
1522 	mmc_host_clk_hold(host);
1523 	/*
1524 	 * The card should drive cmd and dat[0:3] low immediately
1525 	 * after the response of cmd11, but wait 1 ms to be sure
1526 	 */
1527 	mmc_delay(1);
1528 	if (host->ops->card_busy && !host->ops->card_busy(host)) {
1529 		err = -EAGAIN;
1530 		goto power_cycle;
1531 	}
1532 	/*
1533 	 * During a signal voltage level switch, the clock must be gated
1534 	 * for 5 ms according to the SD spec
1535 	 */
1536 	clock = host->ios.clock;
1537 	host->ios.clock = 0;
1538 	mmc_set_ios(host);
1539 
1540 	if (__mmc_set_signal_voltage(host, signal_voltage)) {
1541 		/*
1542 		 * Voltages may not have been switched, but we've already
1543 		 * sent CMD11, so a power cycle is required anyway
1544 		 */
1545 		err = -EAGAIN;
1546 		goto power_cycle;
1547 	}
1548 
1549 	/* Keep clock gated for at least 5 ms */
1550 	mmc_delay(5);
1551 	host->ios.clock = clock;
1552 	mmc_set_ios(host);
1553 
1554 	/* Wait for at least 1 ms according to spec */
1555 	mmc_delay(1);
1556 
1557 	/*
1558 	 * Failure to switch is indicated by the card holding
1559 	 * dat[0:3] low
1560 	 */
1561 	if (host->ops->card_busy && host->ops->card_busy(host))
1562 		err = -EAGAIN;
1563 
1564 power_cycle:
1565 	if (err) {
1566 		pr_debug("%s: Signal voltage switch failed, "
1567 			"power cycling card\n", mmc_hostname(host));
1568 		mmc_power_cycle(host, ocr);
1569 	}
1570 
1571 	mmc_host_clk_release(host);
1572 
1573 	return err;
1574 }
1575 
1576 /*
1577  * Select timing parameters for host.
1578  */
mmc_set_timing(struct mmc_host * host,unsigned int timing)1579 void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1580 {
1581 	mmc_host_clk_hold(host);
1582 	host->ios.timing = timing;
1583 	mmc_set_ios(host);
1584 	mmc_host_clk_release(host);
1585 }
1586 
1587 /*
1588  * Select appropriate driver type for host.
1589  */
mmc_set_driver_type(struct mmc_host * host,unsigned int drv_type)1590 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1591 {
1592 	mmc_host_clk_hold(host);
1593 	host->ios.drv_type = drv_type;
1594 	mmc_set_ios(host);
1595 	mmc_host_clk_release(host);
1596 }
1597 
1598 /*
1599  * Apply power to the MMC stack.  This is a two-stage process.
1600  * First, we enable power to the card without the clock running.
1601  * We then wait a bit for the power to stabilise.  Finally,
1602  * enable the bus drivers and clock to the card.
1603  *
1604  * We must _NOT_ enable the clock prior to power stablising.
1605  *
1606  * If a host does all the power sequencing itself, ignore the
1607  * initial MMC_POWER_UP stage.
1608  */
mmc_power_up(struct mmc_host * host,u32 ocr)1609 void mmc_power_up(struct mmc_host *host, u32 ocr)
1610 {
1611 	if (host->ios.power_mode == MMC_POWER_ON)
1612 		return;
1613 
1614 	mmc_host_clk_hold(host);
1615 
1616 	host->ios.vdd = fls(ocr) - 1;
1617 	if (mmc_host_is_spi(host))
1618 		host->ios.chip_select = MMC_CS_HIGH;
1619 	else
1620 		host->ios.chip_select = MMC_CS_DONTCARE;
1621 	host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1622 	host->ios.power_mode = MMC_POWER_UP;
1623 	host->ios.bus_width = MMC_BUS_WIDTH_1;
1624 	host->ios.timing = MMC_TIMING_LEGACY;
1625 	mmc_set_ios(host);
1626 
1627 	/* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
1628 	if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330) == 0)
1629 		dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
1630 	else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180) == 0)
1631 		dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
1632 	else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120) == 0)
1633 		dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
1634 
1635 	/*
1636 	 * This delay should be sufficient to allow the power supply
1637 	 * to reach the minimum voltage.
1638 	 */
1639 	mmc_delay(10);
1640 
1641 	host->ios.clock = host->f_init;
1642 
1643 	host->ios.power_mode = MMC_POWER_ON;
1644 	mmc_set_ios(host);
1645 
1646 	/*
1647 	 * This delay must be at least 74 clock sizes, or 1 ms, or the
1648 	 * time required to reach a stable voltage.
1649 	 */
1650 	mmc_delay(10);
1651 
1652 	mmc_host_clk_release(host);
1653 }
1654 
mmc_power_off(struct mmc_host * host)1655 void mmc_power_off(struct mmc_host *host)
1656 {
1657 	if (host->ios.power_mode == MMC_POWER_OFF)
1658 		return;
1659 
1660 	mmc_host_clk_hold(host);
1661 
1662 	host->ios.clock = 0;
1663 	host->ios.vdd = 0;
1664 
1665 	if (!mmc_host_is_spi(host)) {
1666 		host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
1667 		host->ios.chip_select = MMC_CS_DONTCARE;
1668 	}
1669 	host->ios.power_mode = MMC_POWER_OFF;
1670 	host->ios.bus_width = MMC_BUS_WIDTH_1;
1671 	host->ios.timing = MMC_TIMING_LEGACY;
1672 	mmc_set_ios(host);
1673 
1674 	/*
1675 	 * Some configurations, such as the 802.11 SDIO card in the OLPC
1676 	 * XO-1.5, require a short delay after poweroff before the card
1677 	 * can be successfully turned on again.
1678 	 */
1679 	mmc_delay(1);
1680 
1681 	mmc_host_clk_release(host);
1682 }
1683 
mmc_power_cycle(struct mmc_host * host,u32 ocr)1684 void mmc_power_cycle(struct mmc_host *host, u32 ocr)
1685 {
1686 	mmc_power_off(host);
1687 	/* Wait at least 1 ms according to SD spec */
1688 	mmc_delay(1);
1689 	mmc_power_up(host, ocr);
1690 }
1691 
1692 /*
1693  * Cleanup when the last reference to the bus operator is dropped.
1694  */
__mmc_release_bus(struct mmc_host * host)1695 static void __mmc_release_bus(struct mmc_host *host)
1696 {
1697 	BUG_ON(!host);
1698 	BUG_ON(host->bus_refs);
1699 	BUG_ON(!host->bus_dead);
1700 
1701 	host->bus_ops = NULL;
1702 }
1703 
1704 /*
1705  * Increase reference count of bus operator
1706  */
mmc_bus_get(struct mmc_host * host)1707 static inline void mmc_bus_get(struct mmc_host *host)
1708 {
1709 	unsigned long flags;
1710 
1711 	spin_lock_irqsave(&host->lock, flags);
1712 	host->bus_refs++;
1713 	spin_unlock_irqrestore(&host->lock, flags);
1714 }
1715 
1716 /*
1717  * Decrease reference count of bus operator and free it if
1718  * it is the last reference.
1719  */
mmc_bus_put(struct mmc_host * host)1720 static inline void mmc_bus_put(struct mmc_host *host)
1721 {
1722 	unsigned long flags;
1723 
1724 	spin_lock_irqsave(&host->lock, flags);
1725 	host->bus_refs--;
1726 	if ((host->bus_refs == 0) && host->bus_ops)
1727 		__mmc_release_bus(host);
1728 	spin_unlock_irqrestore(&host->lock, flags);
1729 }
1730 
1731 /*
1732  * Assign a mmc bus handler to a host. Only one bus handler may control a
1733  * host at any given time.
1734  */
mmc_attach_bus(struct mmc_host * host,const struct mmc_bus_ops * ops)1735 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1736 {
1737 	unsigned long flags;
1738 
1739 	BUG_ON(!host);
1740 	BUG_ON(!ops);
1741 
1742 	WARN_ON(!host->claimed);
1743 
1744 	spin_lock_irqsave(&host->lock, flags);
1745 
1746 	BUG_ON(host->bus_ops);
1747 	BUG_ON(host->bus_refs);
1748 
1749 	host->bus_ops = ops;
1750 	host->bus_refs = 1;
1751 	host->bus_dead = 0;
1752 
1753 	spin_unlock_irqrestore(&host->lock, flags);
1754 }
1755 
1756 /*
1757  * Remove the current bus handler from a host.
1758  */
mmc_detach_bus(struct mmc_host * host)1759 void mmc_detach_bus(struct mmc_host *host)
1760 {
1761 	unsigned long flags;
1762 
1763 	BUG_ON(!host);
1764 
1765 	WARN_ON(!host->claimed);
1766 	WARN_ON(!host->bus_ops);
1767 
1768 	spin_lock_irqsave(&host->lock, flags);
1769 
1770 	host->bus_dead = 1;
1771 
1772 	spin_unlock_irqrestore(&host->lock, flags);
1773 
1774 	mmc_bus_put(host);
1775 }
1776 
_mmc_detect_change(struct mmc_host * host,unsigned long delay,bool cd_irq)1777 static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
1778 				bool cd_irq)
1779 {
1780 #ifdef CONFIG_MMC_DEBUG
1781 	unsigned long flags;
1782 	spin_lock_irqsave(&host->lock, flags);
1783 	WARN_ON(host->removed);
1784 	spin_unlock_irqrestore(&host->lock, flags);
1785 #endif
1786 
1787 	/*
1788 	 * If the device is configured as wakeup, we prevent a new sleep for
1789 	 * 5 s to give provision for user space to consume the event.
1790 	 */
1791 	if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) &&
1792 		device_can_wakeup(mmc_dev(host)))
1793 		pm_wakeup_event(mmc_dev(host), 5000);
1794 
1795 	host->detect_change = 1;
1796 	mmc_schedule_delayed_work(&host->detect, delay);
1797 }
1798 
1799 /**
1800  *	mmc_detect_change - process change of state on a MMC socket
1801  *	@host: host which changed state.
1802  *	@delay: optional delay to wait before detection (jiffies)
1803  *
1804  *	MMC drivers should call this when they detect a card has been
1805  *	inserted or removed. The MMC layer will confirm that any
1806  *	present card is still functional, and initialize any newly
1807  *	inserted.
1808  */
mmc_detect_change(struct mmc_host * host,unsigned long delay)1809 void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1810 {
1811 	_mmc_detect_change(host, delay, true);
1812 }
1813 EXPORT_SYMBOL(mmc_detect_change);
1814 
mmc_init_erase(struct mmc_card * card)1815 void mmc_init_erase(struct mmc_card *card)
1816 {
1817 	unsigned int sz;
1818 
1819 	if (is_power_of_2(card->erase_size))
1820 		card->erase_shift = ffs(card->erase_size) - 1;
1821 	else
1822 		card->erase_shift = 0;
1823 
1824 	/*
1825 	 * It is possible to erase an arbitrarily large area of an SD or MMC
1826 	 * card.  That is not desirable because it can take a long time
1827 	 * (minutes) potentially delaying more important I/O, and also the
1828 	 * timeout calculations become increasingly hugely over-estimated.
1829 	 * Consequently, 'pref_erase' is defined as a guide to limit erases
1830 	 * to that size and alignment.
1831 	 *
1832 	 * For SD cards that define Allocation Unit size, limit erases to one
1833 	 * Allocation Unit at a time.  For MMC cards that define High Capacity
1834 	 * Erase Size, whether it is switched on or not, limit to that size.
1835 	 * Otherwise just have a stab at a good value.  For modern cards it
1836 	 * will end up being 4MiB.  Note that if the value is too small, it
1837 	 * can end up taking longer to erase.
1838 	 */
1839 	if (mmc_card_sd(card) && card->ssr.au) {
1840 		card->pref_erase = card->ssr.au;
1841 		card->erase_shift = ffs(card->ssr.au) - 1;
1842 	} else if (card->ext_csd.hc_erase_size) {
1843 		card->pref_erase = card->ext_csd.hc_erase_size;
1844 	} else if (card->erase_size) {
1845 		sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
1846 		if (sz < 128)
1847 			card->pref_erase = 512 * 1024 / 512;
1848 		else if (sz < 512)
1849 			card->pref_erase = 1024 * 1024 / 512;
1850 		else if (sz < 1024)
1851 			card->pref_erase = 2 * 1024 * 1024 / 512;
1852 		else
1853 			card->pref_erase = 4 * 1024 * 1024 / 512;
1854 		if (card->pref_erase < card->erase_size)
1855 			card->pref_erase = card->erase_size;
1856 		else {
1857 			sz = card->pref_erase % card->erase_size;
1858 			if (sz)
1859 				card->pref_erase += card->erase_size - sz;
1860 		}
1861 	} else
1862 		card->pref_erase = 0;
1863 }
1864 
mmc_mmc_erase_timeout(struct mmc_card * card,unsigned int arg,unsigned int qty)1865 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1866 					  unsigned int arg, unsigned int qty)
1867 {
1868 	unsigned int erase_timeout;
1869 
1870 	if (arg == MMC_DISCARD_ARG ||
1871 	    (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
1872 		erase_timeout = card->ext_csd.trim_timeout;
1873 	} else if (card->ext_csd.erase_group_def & 1) {
1874 		/* High Capacity Erase Group Size uses HC timeouts */
1875 		if (arg == MMC_TRIM_ARG)
1876 			erase_timeout = card->ext_csd.trim_timeout;
1877 		else
1878 			erase_timeout = card->ext_csd.hc_erase_timeout;
1879 	} else {
1880 		/* CSD Erase Group Size uses write timeout */
1881 		unsigned int mult = (10 << card->csd.r2w_factor);
1882 		unsigned int timeout_clks = card->csd.tacc_clks * mult;
1883 		unsigned int timeout_us;
1884 
1885 		/* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
1886 		if (card->csd.tacc_ns < 1000000)
1887 			timeout_us = (card->csd.tacc_ns * mult) / 1000;
1888 		else
1889 			timeout_us = (card->csd.tacc_ns / 1000) * mult;
1890 
1891 		/*
1892 		 * ios.clock is only a target.  The real clock rate might be
1893 		 * less but not that much less, so fudge it by multiplying by 2.
1894 		 */
1895 		timeout_clks <<= 1;
1896 		timeout_us += (timeout_clks * 1000) /
1897 			      (mmc_host_clk_rate(card->host) / 1000);
1898 
1899 		erase_timeout = timeout_us / 1000;
1900 
1901 		/*
1902 		 * Theoretically, the calculation could underflow so round up
1903 		 * to 1ms in that case.
1904 		 */
1905 		if (!erase_timeout)
1906 			erase_timeout = 1;
1907 	}
1908 
1909 	/* Multiplier for secure operations */
1910 	if (arg & MMC_SECURE_ARGS) {
1911 		if (arg == MMC_SECURE_ERASE_ARG)
1912 			erase_timeout *= card->ext_csd.sec_erase_mult;
1913 		else
1914 			erase_timeout *= card->ext_csd.sec_trim_mult;
1915 	}
1916 
1917 	erase_timeout *= qty;
1918 
1919 	/*
1920 	 * Ensure at least a 1 second timeout for SPI as per
1921 	 * 'mmc_set_data_timeout()'
1922 	 */
1923 	if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
1924 		erase_timeout = 1000;
1925 
1926 	return erase_timeout;
1927 }
1928 
mmc_sd_erase_timeout(struct mmc_card * card,unsigned int arg,unsigned int qty)1929 static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
1930 					 unsigned int arg,
1931 					 unsigned int qty)
1932 {
1933 	unsigned int erase_timeout;
1934 
1935 	if (card->ssr.erase_timeout) {
1936 		/* Erase timeout specified in SD Status Register (SSR) */
1937 		erase_timeout = card->ssr.erase_timeout * qty +
1938 				card->ssr.erase_offset;
1939 	} else {
1940 		/*
1941 		 * Erase timeout not specified in SD Status Register (SSR) so
1942 		 * use 250ms per write block.
1943 		 */
1944 		erase_timeout = 250 * qty;
1945 	}
1946 
1947 	/* Must not be less than 1 second */
1948 	if (erase_timeout < 1000)
1949 		erase_timeout = 1000;
1950 
1951 	return erase_timeout;
1952 }
1953 
mmc_erase_timeout(struct mmc_card * card,unsigned int arg,unsigned int qty)1954 static unsigned int mmc_erase_timeout(struct mmc_card *card,
1955 				      unsigned int arg,
1956 				      unsigned int qty)
1957 {
1958 	if (mmc_card_sd(card))
1959 		return mmc_sd_erase_timeout(card, arg, qty);
1960 	else
1961 		return mmc_mmc_erase_timeout(card, arg, qty);
1962 }
1963 
mmc_do_erase(struct mmc_card * card,unsigned int from,unsigned int to,unsigned int arg)1964 static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1965 			unsigned int to, unsigned int arg)
1966 {
1967 	struct mmc_command cmd = {0};
1968 	unsigned int qty = 0;
1969 	unsigned long timeout;
1970 	unsigned int fr, nr;
1971 	int err;
1972 
1973 	fr = from;
1974 	nr = to - from + 1;
1975 	trace_mmc_blk_erase_start(arg, fr, nr);
1976 
1977 	/*
1978 	 * qty is used to calculate the erase timeout which depends on how many
1979 	 * erase groups (or allocation units in SD terminology) are affected.
1980 	 * We count erasing part of an erase group as one erase group.
1981 	 * For SD, the allocation units are always a power of 2.  For MMC, the
1982 	 * erase group size is almost certainly also power of 2, but it does not
1983 	 * seem to insist on that in the JEDEC standard, so we fall back to
1984 	 * division in that case.  SD may not specify an allocation unit size,
1985 	 * in which case the timeout is based on the number of write blocks.
1986 	 *
1987 	 * Note that the timeout for secure trim 2 will only be correct if the
1988 	 * number of erase groups specified is the same as the total of all
1989 	 * preceding secure trim 1 commands.  Since the power may have been
1990 	 * lost since the secure trim 1 commands occurred, it is generally
1991 	 * impossible to calculate the secure trim 2 timeout correctly.
1992 	 */
1993 	if (card->erase_shift)
1994 		qty += ((to >> card->erase_shift) -
1995 			(from >> card->erase_shift)) + 1;
1996 	else if (mmc_card_sd(card))
1997 		qty += to - from + 1;
1998 	else
1999 		qty += ((to / card->erase_size) -
2000 			(from / card->erase_size)) + 1;
2001 
2002 	if (!mmc_card_blockaddr(card)) {
2003 		from <<= 9;
2004 		to <<= 9;
2005 	}
2006 
2007 	if (mmc_card_sd(card))
2008 		cmd.opcode = SD_ERASE_WR_BLK_START;
2009 	else
2010 		cmd.opcode = MMC_ERASE_GROUP_START;
2011 	cmd.arg = from;
2012 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2013 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
2014 	if (err) {
2015 		pr_err("mmc_erase: group start error %d, "
2016 		       "status %#x\n", err, cmd.resp[0]);
2017 		err = -EIO;
2018 		goto out;
2019 	}
2020 
2021 	memset(&cmd, 0, sizeof(struct mmc_command));
2022 	if (mmc_card_sd(card))
2023 		cmd.opcode = SD_ERASE_WR_BLK_END;
2024 	else
2025 		cmd.opcode = MMC_ERASE_GROUP_END;
2026 	cmd.arg = to;
2027 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2028 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
2029 	if (err) {
2030 		pr_err("mmc_erase: group end error %d, status %#x\n",
2031 		       err, cmd.resp[0]);
2032 		err = -EIO;
2033 		goto out;
2034 	}
2035 
2036 	memset(&cmd, 0, sizeof(struct mmc_command));
2037 	cmd.opcode = MMC_ERASE;
2038 	cmd.arg = arg;
2039 	cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
2040 	cmd.busy_timeout = mmc_erase_timeout(card, arg, qty);
2041 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
2042 	if (err) {
2043 		pr_err("mmc_erase: erase error %d, status %#x\n",
2044 		       err, cmd.resp[0]);
2045 		err = -EIO;
2046 		goto out;
2047 	}
2048 
2049 	if (mmc_host_is_spi(card->host))
2050 		goto out;
2051 
2052 	timeout = jiffies + msecs_to_jiffies(MMC_CORE_TIMEOUT_MS);
2053 	do {
2054 		memset(&cmd, 0, sizeof(struct mmc_command));
2055 		cmd.opcode = MMC_SEND_STATUS;
2056 		cmd.arg = card->rca << 16;
2057 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
2058 		/* Do not retry else we can't see errors */
2059 		err = mmc_wait_for_cmd(card->host, &cmd, 0);
2060 		if (err || (cmd.resp[0] & 0xFDF92000)) {
2061 			pr_err("error %d requesting status %#x\n",
2062 				err, cmd.resp[0]);
2063 			err = -EIO;
2064 			goto out;
2065 		}
2066 
2067 		/* Timeout if the device never becomes ready for data and
2068 		 * never leaves the program state.
2069 		 */
2070 		if (time_after(jiffies, timeout)) {
2071 			pr_err("%s: Card stuck in programming state! %s\n",
2072 				mmc_hostname(card->host), __func__);
2073 			err =  -EIO;
2074 			goto out;
2075 		}
2076 
2077 	} while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
2078 		 (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
2079 out:
2080 
2081 	trace_mmc_blk_erase_end(arg, fr, nr);
2082 	return err;
2083 }
2084 
2085 /**
2086  * mmc_erase - erase sectors.
2087  * @card: card to erase
2088  * @from: first sector to erase
2089  * @nr: number of sectors to erase
2090  * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
2091  *
2092  * Caller must claim host before calling this function.
2093  */
mmc_erase(struct mmc_card * card,unsigned int from,unsigned int nr,unsigned int arg)2094 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
2095 	      unsigned int arg)
2096 {
2097 	unsigned int rem, to = from + nr;
2098 
2099 	if (!(card->host->caps & MMC_CAP_ERASE) ||
2100 	    !(card->csd.cmdclass & CCC_ERASE))
2101 		return -EOPNOTSUPP;
2102 
2103 	if (!card->erase_size)
2104 		return -EOPNOTSUPP;
2105 
2106 	if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
2107 		return -EOPNOTSUPP;
2108 
2109 	if ((arg & MMC_SECURE_ARGS) &&
2110 	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
2111 		return -EOPNOTSUPP;
2112 
2113 	if ((arg & MMC_TRIM_ARGS) &&
2114 	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
2115 		return -EOPNOTSUPP;
2116 
2117 	if (arg == MMC_SECURE_ERASE_ARG) {
2118 		if (from % card->erase_size || nr % card->erase_size)
2119 			return -EINVAL;
2120 	}
2121 
2122 	if (arg == MMC_ERASE_ARG) {
2123 		rem = from % card->erase_size;
2124 		if (rem) {
2125 			rem = card->erase_size - rem;
2126 			from += rem;
2127 			if (nr > rem)
2128 				nr -= rem;
2129 			else
2130 				return 0;
2131 		}
2132 		rem = nr % card->erase_size;
2133 		if (rem)
2134 			nr -= rem;
2135 	}
2136 
2137 	if (nr == 0)
2138 		return 0;
2139 
2140 	to = from + nr;
2141 
2142 	if (to <= from)
2143 		return -EINVAL;
2144 
2145 	/* 'from' and 'to' are inclusive */
2146 	to -= 1;
2147 
2148 	return mmc_do_erase(card, from, to, arg);
2149 }
2150 EXPORT_SYMBOL(mmc_erase);
2151 
mmc_can_erase(struct mmc_card * card)2152 int mmc_can_erase(struct mmc_card *card)
2153 {
2154 	if ((card->host->caps & MMC_CAP_ERASE) &&
2155 	    (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
2156 		return 1;
2157 	return 0;
2158 }
2159 EXPORT_SYMBOL(mmc_can_erase);
2160 
mmc_can_trim(struct mmc_card * card)2161 int mmc_can_trim(struct mmc_card *card)
2162 {
2163 	if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
2164 		return 1;
2165 	return 0;
2166 }
2167 EXPORT_SYMBOL(mmc_can_trim);
2168 
mmc_can_discard(struct mmc_card * card)2169 int mmc_can_discard(struct mmc_card *card)
2170 {
2171 	/*
2172 	 * As there's no way to detect the discard support bit at v4.5
2173 	 * use the s/w feature support filed.
2174 	 */
2175 	if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
2176 		return 1;
2177 	return 0;
2178 }
2179 EXPORT_SYMBOL(mmc_can_discard);
2180 
mmc_can_sanitize(struct mmc_card * card)2181 int mmc_can_sanitize(struct mmc_card *card)
2182 {
2183 	if (!mmc_can_trim(card) && !mmc_can_erase(card))
2184 		return 0;
2185 	if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
2186 		return 1;
2187 	return 0;
2188 }
2189 EXPORT_SYMBOL(mmc_can_sanitize);
2190 
mmc_can_secure_erase_trim(struct mmc_card * card)2191 int mmc_can_secure_erase_trim(struct mmc_card *card)
2192 {
2193 	if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
2194 	    !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
2195 		return 1;
2196 	return 0;
2197 }
2198 EXPORT_SYMBOL(mmc_can_secure_erase_trim);
2199 
mmc_erase_group_aligned(struct mmc_card * card,unsigned int from,unsigned int nr)2200 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
2201 			    unsigned int nr)
2202 {
2203 	if (!card->erase_size)
2204 		return 0;
2205 	if (from % card->erase_size || nr % card->erase_size)
2206 		return 0;
2207 	return 1;
2208 }
2209 EXPORT_SYMBOL(mmc_erase_group_aligned);
2210 
mmc_do_calc_max_discard(struct mmc_card * card,unsigned int arg)2211 static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
2212 					    unsigned int arg)
2213 {
2214 	struct mmc_host *host = card->host;
2215 	unsigned int max_discard, x, y, qty = 0, max_qty, timeout;
2216 	unsigned int last_timeout = 0;
2217 
2218 	if (card->erase_shift)
2219 		max_qty = UINT_MAX >> card->erase_shift;
2220 	else if (mmc_card_sd(card))
2221 		max_qty = UINT_MAX;
2222 	else
2223 		max_qty = UINT_MAX / card->erase_size;
2224 
2225 	/* Find the largest qty with an OK timeout */
2226 	do {
2227 		y = 0;
2228 		for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
2229 			timeout = mmc_erase_timeout(card, arg, qty + x);
2230 			if (timeout > host->max_busy_timeout)
2231 				break;
2232 			if (timeout < last_timeout)
2233 				break;
2234 			last_timeout = timeout;
2235 			y = x;
2236 		}
2237 		qty += y;
2238 	} while (y);
2239 
2240 	if (!qty)
2241 		return 0;
2242 
2243 	if (qty == 1)
2244 		return 1;
2245 
2246 	/* Convert qty to sectors */
2247 	if (card->erase_shift)
2248 		max_discard = --qty << card->erase_shift;
2249 	else if (mmc_card_sd(card))
2250 		max_discard = qty;
2251 	else
2252 		max_discard = --qty * card->erase_size;
2253 
2254 	return max_discard;
2255 }
2256 
mmc_calc_max_discard(struct mmc_card * card)2257 unsigned int mmc_calc_max_discard(struct mmc_card *card)
2258 {
2259 	struct mmc_host *host = card->host;
2260 	unsigned int max_discard, max_trim;
2261 
2262 	if (!host->max_busy_timeout)
2263 		return UINT_MAX;
2264 
2265 	/*
2266 	 * Without erase_group_def set, MMC erase timeout depends on clock
2267 	 * frequence which can change.  In that case, the best choice is
2268 	 * just the preferred erase size.
2269 	 */
2270 	if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
2271 		return card->pref_erase;
2272 
2273 	max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
2274 	if (mmc_can_trim(card)) {
2275 		max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
2276 		if (max_trim < max_discard)
2277 			max_discard = max_trim;
2278 	} else if (max_discard < card->erase_size) {
2279 		max_discard = 0;
2280 	}
2281 	pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
2282 		 mmc_hostname(host), max_discard, host->max_busy_timeout);
2283 	return max_discard;
2284 }
2285 EXPORT_SYMBOL(mmc_calc_max_discard);
2286 
mmc_set_blocklen(struct mmc_card * card,unsigned int blocklen)2287 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
2288 {
2289 	struct mmc_command cmd = {0};
2290 
2291 	if (mmc_card_blockaddr(card) || mmc_card_ddr52(card))
2292 		return 0;
2293 
2294 	cmd.opcode = MMC_SET_BLOCKLEN;
2295 	cmd.arg = blocklen;
2296 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2297 	return mmc_wait_for_cmd(card->host, &cmd, 5);
2298 }
2299 EXPORT_SYMBOL(mmc_set_blocklen);
2300 
mmc_set_blockcount(struct mmc_card * card,unsigned int blockcount,bool is_rel_write)2301 int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
2302 			bool is_rel_write)
2303 {
2304 	struct mmc_command cmd = {0};
2305 
2306 	cmd.opcode = MMC_SET_BLOCK_COUNT;
2307 	cmd.arg = blockcount & 0x0000FFFF;
2308 	if (is_rel_write)
2309 		cmd.arg |= 1 << 31;
2310 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2311 	return mmc_wait_for_cmd(card->host, &cmd, 5);
2312 }
2313 EXPORT_SYMBOL(mmc_set_blockcount);
2314 
mmc_hw_reset_for_init(struct mmc_host * host)2315 static void mmc_hw_reset_for_init(struct mmc_host *host)
2316 {
2317 	if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
2318 		return;
2319 	mmc_host_clk_hold(host);
2320 	host->ops->hw_reset(host);
2321 	mmc_host_clk_release(host);
2322 }
2323 
mmc_can_reset(struct mmc_card * card)2324 int mmc_can_reset(struct mmc_card *card)
2325 {
2326 	u8 rst_n_function;
2327 
2328 	if (!mmc_card_mmc(card))
2329 		return 0;
2330 	rst_n_function = card->ext_csd.rst_n_function;
2331 	if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED)
2332 		return 0;
2333 	return 1;
2334 }
2335 EXPORT_SYMBOL(mmc_can_reset);
2336 
mmc_do_hw_reset(struct mmc_host * host,int check)2337 static int mmc_do_hw_reset(struct mmc_host *host, int check)
2338 {
2339 	struct mmc_card *card = host->card;
2340 
2341 	if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
2342 		return -EOPNOTSUPP;
2343 
2344 	if (!card)
2345 		return -EINVAL;
2346 
2347 	if (!mmc_can_reset(card))
2348 		return -EOPNOTSUPP;
2349 
2350 	mmc_host_clk_hold(host);
2351 	mmc_set_clock(host, host->f_init);
2352 
2353 	host->ops->hw_reset(host);
2354 
2355 	/* If the reset has happened, then a status command will fail */
2356 	if (check) {
2357 		struct mmc_command cmd = {0};
2358 		int err;
2359 
2360 		cmd.opcode = MMC_SEND_STATUS;
2361 		if (!mmc_host_is_spi(card->host))
2362 			cmd.arg = card->rca << 16;
2363 		cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
2364 		err = mmc_wait_for_cmd(card->host, &cmd, 0);
2365 		if (!err) {
2366 			mmc_host_clk_release(host);
2367 			return -ENOSYS;
2368 		}
2369 	}
2370 
2371 	if (mmc_host_is_spi(host)) {
2372 		host->ios.chip_select = MMC_CS_HIGH;
2373 		host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
2374 	} else {
2375 		host->ios.chip_select = MMC_CS_DONTCARE;
2376 		host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
2377 	}
2378 	host->ios.bus_width = MMC_BUS_WIDTH_1;
2379 	host->ios.timing = MMC_TIMING_LEGACY;
2380 	mmc_set_ios(host);
2381 
2382 	mmc_host_clk_release(host);
2383 
2384 	return host->bus_ops->power_restore(host);
2385 }
2386 
mmc_hw_reset(struct mmc_host * host)2387 int mmc_hw_reset(struct mmc_host *host)
2388 {
2389 	return mmc_do_hw_reset(host, 0);
2390 }
2391 EXPORT_SYMBOL(mmc_hw_reset);
2392 
mmc_hw_reset_check(struct mmc_host * host)2393 int mmc_hw_reset_check(struct mmc_host *host)
2394 {
2395 	return mmc_do_hw_reset(host, 1);
2396 }
2397 EXPORT_SYMBOL(mmc_hw_reset_check);
2398 
mmc_rescan_try_freq(struct mmc_host * host,unsigned freq)2399 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
2400 {
2401 	host->f_init = freq;
2402 
2403 #ifdef CONFIG_MMC_DEBUG
2404 	pr_info("%s: %s: trying to init card at %u Hz\n",
2405 		mmc_hostname(host), __func__, host->f_init);
2406 #endif
2407 	mmc_power_up(host, host->ocr_avail);
2408 
2409 	/*
2410 	 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
2411 	 * do a hardware reset if possible.
2412 	 */
2413 	mmc_hw_reset_for_init(host);
2414 
2415 	/*
2416 	 * sdio_reset sends CMD52 to reset card.  Since we do not know
2417 	 * if the card is being re-initialized, just send it.  CMD52
2418 	 * should be ignored by SD/eMMC cards.
2419 	 */
2420 	sdio_reset(host);
2421 	mmc_go_idle(host);
2422 
2423 	mmc_send_if_cond(host, host->ocr_avail);
2424 
2425 	/* Order's important: probe SDIO, then SD, then MMC */
2426 	if (!mmc_attach_sdio(host))
2427 		return 0;
2428 	if (!mmc_attach_sd(host))
2429 		return 0;
2430 	if (!mmc_attach_mmc(host))
2431 		return 0;
2432 
2433 	mmc_power_off(host);
2434 	return -EIO;
2435 }
2436 
_mmc_detect_card_removed(struct mmc_host * host)2437 int _mmc_detect_card_removed(struct mmc_host *host)
2438 {
2439 	int ret;
2440 
2441 	if (host->caps & MMC_CAP_NONREMOVABLE)
2442 		return 0;
2443 
2444 	if (!host->card || mmc_card_removed(host->card))
2445 		return 1;
2446 
2447 	ret = host->bus_ops->alive(host);
2448 
2449 	/*
2450 	 * Card detect status and alive check may be out of sync if card is
2451 	 * removed slowly, when card detect switch changes while card/slot
2452 	 * pads are still contacted in hardware (refer to "SD Card Mechanical
2453 	 * Addendum, Appendix C: Card Detection Switch"). So reschedule a
2454 	 * detect work 200ms later for this case.
2455 	 */
2456 	if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
2457 		mmc_detect_change(host, msecs_to_jiffies(200));
2458 		pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
2459 	}
2460 
2461 	if (ret) {
2462 		mmc_card_set_removed(host->card);
2463 		pr_debug("%s: card remove detected\n", mmc_hostname(host));
2464 	}
2465 
2466 	return ret;
2467 }
2468 
mmc_detect_card_removed(struct mmc_host * host)2469 int mmc_detect_card_removed(struct mmc_host *host)
2470 {
2471 	struct mmc_card *card = host->card;
2472 	int ret;
2473 
2474 	WARN_ON(!host->claimed);
2475 
2476 	if (!card)
2477 		return 1;
2478 
2479 	ret = mmc_card_removed(card);
2480 	/*
2481 	 * The card will be considered unchanged unless we have been asked to
2482 	 * detect a change or host requires polling to provide card detection.
2483 	 */
2484 	if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
2485 		return ret;
2486 
2487 	host->detect_change = 0;
2488 	if (!ret) {
2489 		ret = _mmc_detect_card_removed(host);
2490 		if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) {
2491 			/*
2492 			 * Schedule a detect work as soon as possible to let a
2493 			 * rescan handle the card removal.
2494 			 */
2495 			cancel_delayed_work(&host->detect);
2496 			_mmc_detect_change(host, 0, false);
2497 		}
2498 	}
2499 
2500 	return ret;
2501 }
2502 EXPORT_SYMBOL(mmc_detect_card_removed);
2503 
mmc_rescan(struct work_struct * work)2504 void mmc_rescan(struct work_struct *work)
2505 {
2506 	struct mmc_host *host =
2507 		container_of(work, struct mmc_host, detect.work);
2508 	int i;
2509 
2510 	if (host->trigger_card_event && host->ops->card_event) {
2511 		host->ops->card_event(host);
2512 		host->trigger_card_event = false;
2513 	}
2514 
2515 	if (host->rescan_disable)
2516 		return;
2517 
2518 	/* If there is a non-removable card registered, only scan once */
2519 	if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered)
2520 		return;
2521 	host->rescan_entered = 1;
2522 
2523 	mmc_bus_get(host);
2524 
2525 	/*
2526 	 * if there is a _removable_ card registered, check whether it is
2527 	 * still present
2528 	 */
2529 	if (host->bus_ops && !host->bus_dead
2530 	    && !(host->caps & MMC_CAP_NONREMOVABLE))
2531 		host->bus_ops->detect(host);
2532 
2533 	host->detect_change = 0;
2534 
2535 	/*
2536 	 * Let mmc_bus_put() free the bus/bus_ops if we've found that
2537 	 * the card is no longer present.
2538 	 */
2539 	mmc_bus_put(host);
2540 	mmc_bus_get(host);
2541 
2542 	/* if there still is a card present, stop here */
2543 	if (host->bus_ops != NULL) {
2544 		mmc_bus_put(host);
2545 		goto out;
2546 	}
2547 
2548 	/*
2549 	 * Only we can add a new handler, so it's safe to
2550 	 * release the lock here.
2551 	 */
2552 	mmc_bus_put(host);
2553 
2554 	if (!(host->caps & MMC_CAP_NONREMOVABLE) && host->ops->get_cd &&
2555 			host->ops->get_cd(host) == 0) {
2556 		mmc_claim_host(host);
2557 		mmc_power_off(host);
2558 		mmc_release_host(host);
2559 		goto out;
2560 	}
2561 
2562 	mmc_claim_host(host);
2563 	for (i = 0; i < ARRAY_SIZE(freqs); i++) {
2564 		if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
2565 			break;
2566 		if (freqs[i] <= host->f_min)
2567 			break;
2568 	}
2569 	mmc_release_host(host);
2570 
2571  out:
2572 	if (host->caps & MMC_CAP_NEEDS_POLL)
2573 		mmc_schedule_delayed_work(&host->detect, HZ);
2574 }
2575 
mmc_start_host(struct mmc_host * host)2576 void mmc_start_host(struct mmc_host *host)
2577 {
2578 	host->f_init = max(freqs[0], host->f_min);
2579 	host->rescan_disable = 0;
2580 	host->ios.power_mode = MMC_POWER_UNDEFINED;
2581 	if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)
2582 		mmc_power_off(host);
2583 	else
2584 		mmc_power_up(host, host->ocr_avail);
2585 	mmc_gpiod_request_cd_irq(host);
2586 	_mmc_detect_change(host, 0, false);
2587 }
2588 
mmc_stop_host(struct mmc_host * host)2589 void mmc_stop_host(struct mmc_host *host)
2590 {
2591 #ifdef CONFIG_MMC_DEBUG
2592 	unsigned long flags;
2593 	spin_lock_irqsave(&host->lock, flags);
2594 	host->removed = 1;
2595 	spin_unlock_irqrestore(&host->lock, flags);
2596 #endif
2597 	if (host->slot.cd_irq >= 0)
2598 		disable_irq(host->slot.cd_irq);
2599 
2600 	host->rescan_disable = 1;
2601 	cancel_delayed_work_sync(&host->detect);
2602 	mmc_flush_scheduled_work();
2603 
2604 	/* clear pm flags now and let card drivers set them as needed */
2605 	host->pm_flags = 0;
2606 
2607 	mmc_bus_get(host);
2608 	if (host->bus_ops && !host->bus_dead) {
2609 		/* Calling bus_ops->remove() with a claimed host can deadlock */
2610 		host->bus_ops->remove(host);
2611 		mmc_claim_host(host);
2612 		mmc_detach_bus(host);
2613 		mmc_power_off(host);
2614 		mmc_release_host(host);
2615 		mmc_bus_put(host);
2616 		return;
2617 	}
2618 	mmc_bus_put(host);
2619 
2620 	BUG_ON(host->card);
2621 
2622 	mmc_power_off(host);
2623 }
2624 
mmc_power_save_host(struct mmc_host * host)2625 int mmc_power_save_host(struct mmc_host *host)
2626 {
2627 	int ret = 0;
2628 
2629 #ifdef CONFIG_MMC_DEBUG
2630 	pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
2631 #endif
2632 
2633 	mmc_bus_get(host);
2634 
2635 	if (!host->bus_ops || host->bus_dead) {
2636 		mmc_bus_put(host);
2637 		return -EINVAL;
2638 	}
2639 
2640 	if (host->bus_ops->power_save)
2641 		ret = host->bus_ops->power_save(host);
2642 
2643 	mmc_bus_put(host);
2644 
2645 	mmc_power_off(host);
2646 
2647 	return ret;
2648 }
2649 EXPORT_SYMBOL(mmc_power_save_host);
2650 
mmc_power_restore_host(struct mmc_host * host)2651 int mmc_power_restore_host(struct mmc_host *host)
2652 {
2653 	int ret;
2654 
2655 #ifdef CONFIG_MMC_DEBUG
2656 	pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
2657 #endif
2658 
2659 	mmc_bus_get(host);
2660 
2661 	if (!host->bus_ops || host->bus_dead) {
2662 		mmc_bus_put(host);
2663 		return -EINVAL;
2664 	}
2665 
2666 	mmc_power_up(host, host->card->ocr);
2667 	ret = host->bus_ops->power_restore(host);
2668 
2669 	mmc_bus_put(host);
2670 
2671 	return ret;
2672 }
2673 EXPORT_SYMBOL(mmc_power_restore_host);
2674 
2675 /*
2676  * Flush the cache to the non-volatile storage.
2677  */
mmc_flush_cache(struct mmc_card * card)2678 int mmc_flush_cache(struct mmc_card *card)
2679 {
2680 	int err = 0;
2681 
2682 	if (mmc_card_mmc(card) &&
2683 			(card->ext_csd.cache_size > 0) &&
2684 			(card->ext_csd.cache_ctrl & 1)) {
2685 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2686 				EXT_CSD_FLUSH_CACHE, 1, 0);
2687 		if (err)
2688 			pr_err("%s: cache flush error %d\n",
2689 					mmc_hostname(card->host), err);
2690 	}
2691 
2692 	return err;
2693 }
2694 EXPORT_SYMBOL(mmc_flush_cache);
2695 
2696 #ifdef CONFIG_PM
2697 
2698 /* Do the card removal on suspend if card is assumed removeable
2699  * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2700    to sync the card.
2701 */
mmc_pm_notify(struct notifier_block * notify_block,unsigned long mode,void * unused)2702 int mmc_pm_notify(struct notifier_block *notify_block,
2703 					unsigned long mode, void *unused)
2704 {
2705 	struct mmc_host *host = container_of(
2706 		notify_block, struct mmc_host, pm_notify);
2707 	unsigned long flags;
2708 	int err = 0;
2709 
2710 	switch (mode) {
2711 	case PM_HIBERNATION_PREPARE:
2712 	case PM_SUSPEND_PREPARE:
2713 	case PM_RESTORE_PREPARE:
2714 		spin_lock_irqsave(&host->lock, flags);
2715 		host->rescan_disable = 1;
2716 		spin_unlock_irqrestore(&host->lock, flags);
2717 		cancel_delayed_work_sync(&host->detect);
2718 
2719 		if (!host->bus_ops)
2720 			break;
2721 
2722 		/* Validate prerequisites for suspend */
2723 		if (host->bus_ops->pre_suspend)
2724 			err = host->bus_ops->pre_suspend(host);
2725 		if (!err)
2726 			break;
2727 
2728 		/* Calling bus_ops->remove() with a claimed host can deadlock */
2729 		host->bus_ops->remove(host);
2730 		mmc_claim_host(host);
2731 		mmc_detach_bus(host);
2732 		mmc_power_off(host);
2733 		mmc_release_host(host);
2734 		host->pm_flags = 0;
2735 		break;
2736 
2737 	case PM_POST_SUSPEND:
2738 	case PM_POST_HIBERNATION:
2739 	case PM_POST_RESTORE:
2740 
2741 		spin_lock_irqsave(&host->lock, flags);
2742 		host->rescan_disable = 0;
2743 		spin_unlock_irqrestore(&host->lock, flags);
2744 		_mmc_detect_change(host, 0, false);
2745 
2746 	}
2747 
2748 	return 0;
2749 }
2750 #endif
2751 
2752 /**
2753  * mmc_init_context_info() - init synchronization context
2754  * @host: mmc host
2755  *
2756  * Init struct context_info needed to implement asynchronous
2757  * request mechanism, used by mmc core, host driver and mmc requests
2758  * supplier.
2759  */
mmc_init_context_info(struct mmc_host * host)2760 void mmc_init_context_info(struct mmc_host *host)
2761 {
2762 	spin_lock_init(&host->context_info.lock);
2763 	host->context_info.is_new_req = false;
2764 	host->context_info.is_done_rcv = false;
2765 	host->context_info.is_waiting_last_req = false;
2766 	init_waitqueue_head(&host->context_info.wait);
2767 }
2768 
2769 #ifdef CONFIG_MMC_EMBEDDED_SDIO
mmc_set_embedded_sdio_data(struct mmc_host * host,struct sdio_cis * cis,struct sdio_cccr * cccr,struct sdio_embedded_func * funcs,int num_funcs)2770 void mmc_set_embedded_sdio_data(struct mmc_host *host,
2771 				struct sdio_cis *cis,
2772 				struct sdio_cccr *cccr,
2773 				struct sdio_embedded_func *funcs,
2774 				int num_funcs)
2775 {
2776 	host->embedded_sdio_data.cis = cis;
2777 	host->embedded_sdio_data.cccr = cccr;
2778 	host->embedded_sdio_data.funcs = funcs;
2779 	host->embedded_sdio_data.num_funcs = num_funcs;
2780 }
2781 
2782 EXPORT_SYMBOL(mmc_set_embedded_sdio_data);
2783 #endif
2784 
mmc_init(void)2785 static int __init mmc_init(void)
2786 {
2787 	int ret;
2788 
2789 	workqueue = alloc_ordered_workqueue("kmmcd", 0);
2790 	if (!workqueue)
2791 		return -ENOMEM;
2792 
2793 	ret = mmc_register_bus();
2794 	if (ret)
2795 		goto destroy_workqueue;
2796 
2797 	ret = mmc_register_host_class();
2798 	if (ret)
2799 		goto unregister_bus;
2800 
2801 	ret = sdio_register_bus();
2802 	if (ret)
2803 		goto unregister_host_class;
2804 
2805 	return 0;
2806 
2807 unregister_host_class:
2808 	mmc_unregister_host_class();
2809 unregister_bus:
2810 	mmc_unregister_bus();
2811 destroy_workqueue:
2812 	destroy_workqueue(workqueue);
2813 
2814 	return ret;
2815 }
2816 
mmc_exit(void)2817 static void __exit mmc_exit(void)
2818 {
2819 	sdio_unregister_bus();
2820 	mmc_unregister_host_class();
2821 	mmc_unregister_bus();
2822 	destroy_workqueue(workqueue);
2823 }
2824 
2825 #ifdef CONFIG_BLOCK
2826 static ssize_t
latency_hist_show(struct device * dev,struct device_attribute * attr,char * buf)2827 latency_hist_show(struct device *dev, struct device_attribute *attr, char *buf)
2828 {
2829 	struct mmc_host *host = cls_dev_to_mmc_host(dev);
2830 	size_t written_bytes;
2831 
2832 	written_bytes = blk_latency_hist_show("Read", &host->io_lat_read,
2833 			buf, PAGE_SIZE);
2834 	written_bytes += blk_latency_hist_show("Write", &host->io_lat_write,
2835 			buf + written_bytes, PAGE_SIZE - written_bytes);
2836 
2837 	return written_bytes;
2838 }
2839 
2840 /*
2841  * Values permitted 0, 1, 2.
2842  * 0 -> Disable IO latency histograms (default)
2843  * 1 -> Enable IO latency histograms
2844  * 2 -> Zero out IO latency histograms
2845  */
2846 static ssize_t
latency_hist_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2847 latency_hist_store(struct device *dev, struct device_attribute *attr,
2848 		   const char *buf, size_t count)
2849 {
2850 	struct mmc_host *host = cls_dev_to_mmc_host(dev);
2851 	long value;
2852 
2853 	if (kstrtol(buf, 0, &value))
2854 		return -EINVAL;
2855 	if (value == BLK_IO_LAT_HIST_ZERO) {
2856 		memset(&host->io_lat_read, 0, sizeof(host->io_lat_read));
2857 		memset(&host->io_lat_write, 0, sizeof(host->io_lat_write));
2858 	} else if (value == BLK_IO_LAT_HIST_ENABLE ||
2859 		 value == BLK_IO_LAT_HIST_DISABLE)
2860 		host->latency_hist_enabled = value;
2861 	return count;
2862 }
2863 
2864 static DEVICE_ATTR(latency_hist, S_IRUGO | S_IWUSR,
2865 		   latency_hist_show, latency_hist_store);
2866 
2867 void
mmc_latency_hist_sysfs_init(struct mmc_host * host)2868 mmc_latency_hist_sysfs_init(struct mmc_host *host)
2869 {
2870 	if (device_create_file(&host->class_dev, &dev_attr_latency_hist))
2871 		dev_err(&host->class_dev,
2872 			"Failed to create latency_hist sysfs entry\n");
2873 }
2874 
2875 void
mmc_latency_hist_sysfs_exit(struct mmc_host * host)2876 mmc_latency_hist_sysfs_exit(struct mmc_host *host)
2877 {
2878 	device_remove_file(&host->class_dev, &dev_attr_latency_hist);
2879 }
2880 #endif
2881 
2882 subsys_initcall(mmc_init);
2883 module_exit(mmc_exit);
2884 
2885 MODULE_LICENSE("GPL");
2886