Lines Matching refs:mq
46 struct mmc_queue *mq = d; in mmc_queue_thread() local
47 struct request_queue *q = mq->queue; in mmc_queue_thread()
51 down(&mq->thread_sem); in mmc_queue_thread()
59 mq->req = req; in mmc_queue_thread()
67 up(&mq->thread_sem); in mmc_queue_thread()
69 down(&mq->thread_sem); in mmc_queue_thread()
74 mq->issue_fn(mq, req); in mmc_queue_thread()
76 up(&mq->thread_sem); in mmc_queue_thread()
89 struct mmc_queue *mq = q->queuedata; in mmc_request() local
93 if (!mq) { in mmc_request()
104 if (!mq->req) in mmc_request()
105 wake_up_process(mq->thread); in mmc_request()
116 int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock) in mmc_init_queue() argument
125 mq->card = card; in mmc_init_queue()
126 mq->queue = blk_init_queue(mmc_request, lock); in mmc_init_queue()
127 if (!mq->queue) in mmc_init_queue()
130 mq->queue->queuedata = mq; in mmc_init_queue()
131 mq->req = NULL; in mmc_init_queue()
133 blk_queue_prep_rq(mq->queue, mmc_prep_request); in mmc_init_queue()
134 blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN, NULL); in mmc_init_queue()
135 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); in mmc_init_queue()
151 mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); in mmc_init_queue()
152 if (!mq->bounce_buf) { in mmc_init_queue()
159 if (mq->bounce_buf) { in mmc_init_queue()
160 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); in mmc_init_queue()
161 blk_queue_max_sectors(mq->queue, bouncesz / 512); in mmc_init_queue()
162 blk_queue_max_phys_segments(mq->queue, bouncesz / 512); in mmc_init_queue()
163 blk_queue_max_hw_segments(mq->queue, bouncesz / 512); in mmc_init_queue()
164 blk_queue_max_segment_size(mq->queue, bouncesz); in mmc_init_queue()
166 mq->sg = kmalloc(sizeof(struct scatterlist), in mmc_init_queue()
168 if (!mq->sg) { in mmc_init_queue()
172 sg_init_table(mq->sg, 1); in mmc_init_queue()
174 mq->bounce_sg = kmalloc(sizeof(struct scatterlist) * in mmc_init_queue()
176 if (!mq->bounce_sg) { in mmc_init_queue()
180 sg_init_table(mq->bounce_sg, bouncesz / 512); in mmc_init_queue()
185 if (!mq->bounce_buf) { in mmc_init_queue()
186 blk_queue_bounce_limit(mq->queue, limit); in mmc_init_queue()
187 blk_queue_max_sectors(mq->queue, in mmc_init_queue()
189 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); in mmc_init_queue()
190 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); in mmc_init_queue()
191 blk_queue_max_segment_size(mq->queue, host->max_seg_size); in mmc_init_queue()
193 mq->sg = kmalloc(sizeof(struct scatterlist) * in mmc_init_queue()
195 if (!mq->sg) { in mmc_init_queue()
199 sg_init_table(mq->sg, host->max_phys_segs); in mmc_init_queue()
202 init_MUTEX(&mq->thread_sem); in mmc_init_queue()
204 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd"); in mmc_init_queue()
205 if (IS_ERR(mq->thread)) { in mmc_init_queue()
206 ret = PTR_ERR(mq->thread); in mmc_init_queue()
212 if (mq->bounce_sg) in mmc_init_queue()
213 kfree(mq->bounce_sg); in mmc_init_queue()
214 mq->bounce_sg = NULL; in mmc_init_queue()
216 if (mq->sg) in mmc_init_queue()
217 kfree(mq->sg); in mmc_init_queue()
218 mq->sg = NULL; in mmc_init_queue()
219 if (mq->bounce_buf) in mmc_init_queue()
220 kfree(mq->bounce_buf); in mmc_init_queue()
221 mq->bounce_buf = NULL; in mmc_init_queue()
222 blk_cleanup_queue(mq->queue); in mmc_init_queue()
226 void mmc_cleanup_queue(struct mmc_queue *mq) in mmc_cleanup_queue() argument
228 struct request_queue *q = mq->queue; in mmc_cleanup_queue()
237 mmc_queue_resume(mq); in mmc_cleanup_queue()
240 kthread_stop(mq->thread); in mmc_cleanup_queue()
242 if (mq->bounce_sg) in mmc_cleanup_queue()
243 kfree(mq->bounce_sg); in mmc_cleanup_queue()
244 mq->bounce_sg = NULL; in mmc_cleanup_queue()
246 kfree(mq->sg); in mmc_cleanup_queue()
247 mq->sg = NULL; in mmc_cleanup_queue()
249 if (mq->bounce_buf) in mmc_cleanup_queue()
250 kfree(mq->bounce_buf); in mmc_cleanup_queue()
251 mq->bounce_buf = NULL; in mmc_cleanup_queue()
253 blk_cleanup_queue(mq->queue); in mmc_cleanup_queue()
255 mq->card = NULL; in mmc_cleanup_queue()
267 void mmc_queue_suspend(struct mmc_queue *mq) in mmc_queue_suspend() argument
269 struct request_queue *q = mq->queue; in mmc_queue_suspend()
272 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) { in mmc_queue_suspend()
273 mq->flags |= MMC_QUEUE_SUSPENDED; in mmc_queue_suspend()
279 down(&mq->thread_sem); in mmc_queue_suspend()
287 void mmc_queue_resume(struct mmc_queue *mq) in mmc_queue_resume() argument
289 struct request_queue *q = mq->queue; in mmc_queue_resume()
292 if (mq->flags & MMC_QUEUE_SUSPENDED) { in mmc_queue_resume()
293 mq->flags &= ~MMC_QUEUE_SUSPENDED; in mmc_queue_resume()
295 up(&mq->thread_sem); in mmc_queue_resume()
306 unsigned int mmc_queue_map_sg(struct mmc_queue *mq) in mmc_queue_map_sg() argument
313 if (!mq->bounce_buf) in mmc_queue_map_sg()
314 return blk_rq_map_sg(mq->queue, mq->req, mq->sg); in mmc_queue_map_sg()
316 BUG_ON(!mq->bounce_sg); in mmc_queue_map_sg()
318 sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg); in mmc_queue_map_sg()
320 mq->bounce_sg_len = sg_len; in mmc_queue_map_sg()
323 for_each_sg(mq->bounce_sg, sg, sg_len, i) in mmc_queue_map_sg()
326 sg_init_one(mq->sg, mq->bounce_buf, buflen); in mmc_queue_map_sg()
335 void mmc_queue_bounce_pre(struct mmc_queue *mq) in mmc_queue_bounce_pre() argument
339 if (!mq->bounce_buf) in mmc_queue_bounce_pre()
342 if (rq_data_dir(mq->req) != WRITE) in mmc_queue_bounce_pre()
346 sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len, in mmc_queue_bounce_pre()
347 mq->bounce_buf, mq->sg[0].length); in mmc_queue_bounce_pre()
355 void mmc_queue_bounce_post(struct mmc_queue *mq) in mmc_queue_bounce_post() argument
359 if (!mq->bounce_buf) in mmc_queue_bounce_post()
362 if (rq_data_dir(mq->req) != READ) in mmc_queue_bounce_post()
366 sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len, in mmc_queue_bounce_post()
367 mq->bounce_buf, mq->sg[0].length); in mmc_queue_bounce_post()