• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * MMC software queue support based on command queue interfaces
5  *
6  * Copyright (C) 2019 Linaro, Inc.
7  * Author: Baolin Wang <baolin.wang@linaro.org>
8  */
9 
10 #include <linux/mmc/card.h>
11 #include <linux/mmc/host.h>
12 #include <linux/module.h>
13 
14 #include "mmc_hsq.h"
15 
16 #define HSQ_NUM_SLOTS	64
17 #define HSQ_INVALID_TAG	HSQ_NUM_SLOTS
18 
mmc_hsq_retry_handler(struct work_struct * work)19 static void mmc_hsq_retry_handler(struct work_struct *work)
20 {
21 	struct mmc_hsq *hsq = container_of(work, struct mmc_hsq, retry_work);
22 	struct mmc_host *mmc = hsq->mmc;
23 
24 	mmc->ops->request(mmc, hsq->mrq);
25 }
26 
mmc_hsq_pump_requests(struct mmc_hsq * hsq)27 static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
28 {
29 	struct mmc_host *mmc = hsq->mmc;
30 	struct hsq_slot *slot;
31 	unsigned long flags;
32 	int ret = 0;
33 
34 	spin_lock_irqsave(&hsq->lock, flags);
35 
36 	/* Make sure we are not already running a request now */
37 	if (hsq->mrq || hsq->recovery_halt) {
38 		spin_unlock_irqrestore(&hsq->lock, flags);
39 		return;
40 	}
41 
42 	/* Make sure there are remain requests need to pump */
43 	if (!hsq->qcnt || !hsq->enabled) {
44 		spin_unlock_irqrestore(&hsq->lock, flags);
45 		return;
46 	}
47 
48 	slot = &hsq->slot[hsq->next_tag];
49 	hsq->mrq = slot->mrq;
50 	hsq->qcnt--;
51 
52 	spin_unlock_irqrestore(&hsq->lock, flags);
53 
54 	if (mmc->ops->request_atomic)
55 		ret = mmc->ops->request_atomic(mmc, hsq->mrq);
56 	else
57 		mmc->ops->request(mmc, hsq->mrq);
58 
59 	/*
60 	 * If returning BUSY from request_atomic(), which means the card
61 	 * may be busy now, and we should change to non-atomic context to
62 	 * try again for this unusual case, to avoid time-consuming operations
63 	 * in the atomic context.
64 	 *
65 	 * Note: we just give a warning for other error cases, since the host
66 	 * driver will handle them.
67 	 */
68 	if (ret == -EBUSY)
69 		schedule_work(&hsq->retry_work);
70 	else
71 		WARN_ON_ONCE(ret);
72 }
73 
mmc_hsq_update_next_tag(struct mmc_hsq * hsq,int remains)74 static void mmc_hsq_update_next_tag(struct mmc_hsq *hsq, int remains)
75 {
76 	struct hsq_slot *slot;
77 	int tag;
78 
79 	/*
80 	 * If there are no remain requests in software queue, then set a invalid
81 	 * tag.
82 	 */
83 	if (!remains) {
84 		hsq->next_tag = HSQ_INVALID_TAG;
85 		return;
86 	}
87 
88 	/*
89 	 * Increasing the next tag and check if the corresponding request is
90 	 * available, if yes, then we found a candidate request.
91 	 */
92 	if (++hsq->next_tag != HSQ_INVALID_TAG) {
93 		slot = &hsq->slot[hsq->next_tag];
94 		if (slot->mrq)
95 			return;
96 	}
97 
98 	/* Othersie we should iterate all slots to find a available tag. */
99 	for (tag = 0; tag < HSQ_NUM_SLOTS; tag++) {
100 		slot = &hsq->slot[tag];
101 		if (slot->mrq)
102 			break;
103 	}
104 
105 	if (tag == HSQ_NUM_SLOTS)
106 		tag = HSQ_INVALID_TAG;
107 
108 	hsq->next_tag = tag;
109 }
110 
mmc_hsq_post_request(struct mmc_hsq * hsq)111 static void mmc_hsq_post_request(struct mmc_hsq *hsq)
112 {
113 	unsigned long flags;
114 	int remains;
115 
116 	spin_lock_irqsave(&hsq->lock, flags);
117 
118 	remains = hsq->qcnt;
119 	hsq->mrq = NULL;
120 
121 	/* Update the next available tag to be queued. */
122 	mmc_hsq_update_next_tag(hsq, remains);
123 
124 	if (hsq->waiting_for_idle && !remains) {
125 		hsq->waiting_for_idle = false;
126 		wake_up(&hsq->wait_queue);
127 	}
128 
129 	/* Do not pump new request in recovery mode. */
130 	if (hsq->recovery_halt) {
131 		spin_unlock_irqrestore(&hsq->lock, flags);
132 		return;
133 	}
134 
135 	spin_unlock_irqrestore(&hsq->lock, flags);
136 
137 	 /*
138 	  * Try to pump new request to host controller as fast as possible,
139 	  * after completing previous request.
140 	  */
141 	if (remains > 0)
142 		mmc_hsq_pump_requests(hsq);
143 }
144 
145 /**
146  * mmc_hsq_finalize_request - finalize one request if the request is done
147  * @mmc: the host controller
148  * @mrq: the request need to be finalized
149  *
150  * Return true if we finalized the corresponding request in software queue,
151  * otherwise return false.
152  */
mmc_hsq_finalize_request(struct mmc_host * mmc,struct mmc_request * mrq)153 bool mmc_hsq_finalize_request(struct mmc_host *mmc, struct mmc_request *mrq)
154 {
155 	struct mmc_hsq *hsq = mmc->cqe_private;
156 	unsigned long flags;
157 
158 	spin_lock_irqsave(&hsq->lock, flags);
159 
160 	if (!hsq->enabled || !hsq->mrq || hsq->mrq != mrq) {
161 		spin_unlock_irqrestore(&hsq->lock, flags);
162 		return false;
163 	}
164 
165 	/*
166 	 * Clear current completed slot request to make a room for new request.
167 	 */
168 	hsq->slot[hsq->next_tag].mrq = NULL;
169 
170 	spin_unlock_irqrestore(&hsq->lock, flags);
171 
172 	mmc_cqe_request_done(mmc, hsq->mrq);
173 
174 	mmc_hsq_post_request(hsq);
175 
176 	return true;
177 }
178 EXPORT_SYMBOL_GPL(mmc_hsq_finalize_request);
179 
mmc_hsq_recovery_start(struct mmc_host * mmc)180 static void mmc_hsq_recovery_start(struct mmc_host *mmc)
181 {
182 	struct mmc_hsq *hsq = mmc->cqe_private;
183 	unsigned long flags;
184 
185 	spin_lock_irqsave(&hsq->lock, flags);
186 
187 	hsq->recovery_halt = true;
188 
189 	spin_unlock_irqrestore(&hsq->lock, flags);
190 }
191 
mmc_hsq_recovery_finish(struct mmc_host * mmc)192 static void mmc_hsq_recovery_finish(struct mmc_host *mmc)
193 {
194 	struct mmc_hsq *hsq = mmc->cqe_private;
195 	int remains;
196 
197 	spin_lock_irq(&hsq->lock);
198 
199 	hsq->recovery_halt = false;
200 	remains = hsq->qcnt;
201 
202 	spin_unlock_irq(&hsq->lock);
203 
204 	/*
205 	 * Try to pump new request if there are request pending in software
206 	 * queue after finishing recovery.
207 	 */
208 	if (remains > 0)
209 		mmc_hsq_pump_requests(hsq);
210 }
211 
mmc_hsq_request(struct mmc_host * mmc,struct mmc_request * mrq)212 static int mmc_hsq_request(struct mmc_host *mmc, struct mmc_request *mrq)
213 {
214 	struct mmc_hsq *hsq = mmc->cqe_private;
215 	int tag = mrq->tag;
216 
217 	spin_lock_irq(&hsq->lock);
218 
219 	if (!hsq->enabled) {
220 		spin_unlock_irq(&hsq->lock);
221 		return -ESHUTDOWN;
222 	}
223 
224 	/* Do not queue any new requests in recovery mode. */
225 	if (hsq->recovery_halt) {
226 		spin_unlock_irq(&hsq->lock);
227 		return -EBUSY;
228 	}
229 
230 	hsq->slot[tag].mrq = mrq;
231 
232 	/*
233 	 * Set the next tag as current request tag if no available
234 	 * next tag.
235 	 */
236 	if (hsq->next_tag == HSQ_INVALID_TAG)
237 		hsq->next_tag = tag;
238 
239 	hsq->qcnt++;
240 
241 	spin_unlock_irq(&hsq->lock);
242 
243 	mmc_hsq_pump_requests(hsq);
244 
245 	return 0;
246 }
247 
mmc_hsq_post_req(struct mmc_host * mmc,struct mmc_request * mrq)248 static void mmc_hsq_post_req(struct mmc_host *mmc, struct mmc_request *mrq)
249 {
250 	if (mmc->ops->post_req)
251 		mmc->ops->post_req(mmc, mrq, 0);
252 }
253 
mmc_hsq_queue_is_idle(struct mmc_hsq * hsq,int * ret)254 static bool mmc_hsq_queue_is_idle(struct mmc_hsq *hsq, int *ret)
255 {
256 	bool is_idle;
257 
258 	spin_lock_irq(&hsq->lock);
259 
260 	is_idle = (!hsq->mrq && !hsq->qcnt) ||
261 		hsq->recovery_halt;
262 
263 	*ret = hsq->recovery_halt ? -EBUSY : 0;
264 	hsq->waiting_for_idle = !is_idle;
265 
266 	spin_unlock_irq(&hsq->lock);
267 
268 	return is_idle;
269 }
270 
mmc_hsq_wait_for_idle(struct mmc_host * mmc)271 static int mmc_hsq_wait_for_idle(struct mmc_host *mmc)
272 {
273 	struct mmc_hsq *hsq = mmc->cqe_private;
274 	int ret;
275 
276 	wait_event(hsq->wait_queue,
277 		   mmc_hsq_queue_is_idle(hsq, &ret));
278 
279 	return ret;
280 }
281 
mmc_hsq_disable(struct mmc_host * mmc)282 static void mmc_hsq_disable(struct mmc_host *mmc)
283 {
284 	struct mmc_hsq *hsq = mmc->cqe_private;
285 	u32 timeout = 500;
286 	int ret;
287 
288 	spin_lock_irq(&hsq->lock);
289 
290 	if (!hsq->enabled) {
291 		spin_unlock_irq(&hsq->lock);
292 		return;
293 	}
294 
295 	spin_unlock_irq(&hsq->lock);
296 
297 	ret = wait_event_timeout(hsq->wait_queue,
298 				 mmc_hsq_queue_is_idle(hsq, &ret),
299 				 msecs_to_jiffies(timeout));
300 	if (ret == 0) {
301 		pr_warn("could not stop mmc software queue\n");
302 		return;
303 	}
304 
305 	spin_lock_irq(&hsq->lock);
306 
307 	hsq->enabled = false;
308 
309 	spin_unlock_irq(&hsq->lock);
310 }
311 
mmc_hsq_enable(struct mmc_host * mmc,struct mmc_card * card)312 static int mmc_hsq_enable(struct mmc_host *mmc, struct mmc_card *card)
313 {
314 	struct mmc_hsq *hsq = mmc->cqe_private;
315 
316 	spin_lock_irq(&hsq->lock);
317 
318 	if (hsq->enabled) {
319 		spin_unlock_irq(&hsq->lock);
320 		return -EBUSY;
321 	}
322 
323 	hsq->enabled = true;
324 
325 	spin_unlock_irq(&hsq->lock);
326 
327 	return 0;
328 }
329 
330 static const struct mmc_cqe_ops mmc_hsq_ops = {
331 	.cqe_enable = mmc_hsq_enable,
332 	.cqe_disable = mmc_hsq_disable,
333 	.cqe_request = mmc_hsq_request,
334 	.cqe_post_req = mmc_hsq_post_req,
335 	.cqe_wait_for_idle = mmc_hsq_wait_for_idle,
336 	.cqe_recovery_start = mmc_hsq_recovery_start,
337 	.cqe_recovery_finish = mmc_hsq_recovery_finish,
338 };
339 
mmc_hsq_init(struct mmc_hsq * hsq,struct mmc_host * mmc)340 int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc)
341 {
342 	hsq->num_slots = HSQ_NUM_SLOTS;
343 	hsq->next_tag = HSQ_INVALID_TAG;
344 
345 	hsq->slot = devm_kcalloc(mmc_dev(mmc), hsq->num_slots,
346 				 sizeof(struct hsq_slot), GFP_KERNEL);
347 	if (!hsq->slot)
348 		return -ENOMEM;
349 
350 	hsq->mmc = mmc;
351 	hsq->mmc->cqe_private = hsq;
352 	mmc->cqe_ops = &mmc_hsq_ops;
353 
354 	INIT_WORK(&hsq->retry_work, mmc_hsq_retry_handler);
355 	spin_lock_init(&hsq->lock);
356 	init_waitqueue_head(&hsq->wait_queue);
357 
358 	return 0;
359 }
360 EXPORT_SYMBOL_GPL(mmc_hsq_init);
361 
mmc_hsq_suspend(struct mmc_host * mmc)362 void mmc_hsq_suspend(struct mmc_host *mmc)
363 {
364 	mmc_hsq_disable(mmc);
365 }
366 EXPORT_SYMBOL_GPL(mmc_hsq_suspend);
367 
mmc_hsq_resume(struct mmc_host * mmc)368 int mmc_hsq_resume(struct mmc_host *mmc)
369 {
370 	return mmc_hsq_enable(mmc, NULL);
371 }
372 EXPORT_SYMBOL_GPL(mmc_hsq_resume);
373 
374 MODULE_DESCRIPTION("MMC Host Software Queue support");
375 MODULE_LICENSE("GPL v2");
376