Lines Matching refs:bdev
137 static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req) in scm_permit_request() argument
139 return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT; in scm_permit_request()
144 struct scm_blk_dev *bdev = scmrq->bdev; in scm_request_prepare() local
145 struct scm_device *scmdev = bdev->gendisk->private_data; in scm_request_prepare()
168 static inline void scm_request_init(struct scm_blk_dev *bdev, in scm_request_init() argument
177 aobrq->scmdev = bdev->scmdev; in scm_request_init()
181 scmrq->bdev = bdev; in scm_request_init()
187 static void scm_ensure_queue_restart(struct scm_blk_dev *bdev) in scm_ensure_queue_restart() argument
189 if (atomic_read(&bdev->queued_reqs)) { in scm_ensure_queue_restart()
193 blk_delay_queue(bdev->rq, SCM_QUEUE_DELAY); in scm_ensure_queue_restart()
198 struct scm_blk_dev *bdev = scmrq->bdev; in scm_request_requeue() local
201 blk_requeue_request(bdev->rq, scmrq->request); in scm_request_requeue()
202 atomic_dec(&bdev->queued_reqs); in scm_request_requeue()
204 scm_ensure_queue_restart(bdev); in scm_request_requeue()
209 struct scm_blk_dev *bdev = scmrq->bdev; in scm_request_finish() local
213 atomic_dec(&bdev->queued_reqs); in scm_request_finish()
220 struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev); in scm_blk_request() local
229 if (!scm_permit_request(bdev, req)) { in scm_blk_request()
230 scm_ensure_queue_restart(bdev); in scm_blk_request()
236 scm_ensure_queue_restart(bdev); in scm_blk_request()
239 scm_request_init(bdev, scmrq, req); in scm_blk_request()
246 atomic_inc(&bdev->queued_reqs); in scm_blk_request()
252 atomic_inc(&bdev->queued_reqs); in scm_blk_request()
284 struct scm_blk_dev *bdev = scmrq->bdev; in scm_blk_irq() local
290 spin_lock(&bdev->lock); in scm_blk_irq()
291 list_add_tail(&scmrq->list, &bdev->finished_requests); in scm_blk_irq()
292 spin_unlock(&bdev->lock); in scm_blk_irq()
293 tasklet_hi_schedule(&bdev->tasklet); in scm_blk_irq()
298 struct scm_blk_dev *bdev = scmrq->bdev; in scm_blk_handle_error() local
307 spin_lock_irqsave(&bdev->lock, flags); in scm_blk_handle_error()
308 if (bdev->state != SCM_WR_PROHIBIT) in scm_blk_handle_error()
310 (unsigned long) bdev->scmdev->address); in scm_blk_handle_error()
311 bdev->state = SCM_WR_PROHIBIT; in scm_blk_handle_error()
312 spin_unlock_irqrestore(&bdev->lock, flags); in scm_blk_handle_error()
323 spin_lock_irqsave(&bdev->rq_lock, flags); in scm_blk_handle_error()
325 spin_unlock_irqrestore(&bdev->rq_lock, flags); in scm_blk_handle_error()
328 static void scm_blk_tasklet(struct scm_blk_dev *bdev) in scm_blk_tasklet() argument
333 spin_lock_irqsave(&bdev->lock, flags); in scm_blk_tasklet()
334 while (!list_empty(&bdev->finished_requests)) { in scm_blk_tasklet()
335 scmrq = list_first_entry(&bdev->finished_requests, in scm_blk_tasklet()
338 spin_unlock_irqrestore(&bdev->lock, flags); in scm_blk_tasklet()
344 spin_lock_irqsave(&bdev->lock, flags); in scm_blk_tasklet()
350 spin_lock_irqsave(&bdev->lock, flags); in scm_blk_tasklet()
355 spin_lock_irqsave(&bdev->lock, flags); in scm_blk_tasklet()
357 spin_unlock_irqrestore(&bdev->lock, flags); in scm_blk_tasklet()
359 blk_run_queue(bdev->rq); in scm_blk_tasklet()
362 int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) in scm_blk_dev_setup() argument
375 bdev->scmdev = scmdev; in scm_blk_dev_setup()
376 bdev->state = SCM_OPER; in scm_blk_dev_setup()
377 spin_lock_init(&bdev->rq_lock); in scm_blk_dev_setup()
378 spin_lock_init(&bdev->lock); in scm_blk_dev_setup()
379 INIT_LIST_HEAD(&bdev->finished_requests); in scm_blk_dev_setup()
380 atomic_set(&bdev->queued_reqs, 0); in scm_blk_dev_setup()
381 tasklet_init(&bdev->tasklet, in scm_blk_dev_setup()
383 (unsigned long) bdev); in scm_blk_dev_setup()
385 rq = blk_init_queue(scm_blk_request, &bdev->rq_lock); in scm_blk_dev_setup()
389 bdev->rq = rq; in scm_blk_dev_setup()
398 scm_blk_dev_cluster_setup(bdev); in scm_blk_dev_setup()
400 bdev->gendisk = alloc_disk(SCM_NR_PARTS); in scm_blk_dev_setup()
401 if (!bdev->gendisk) in scm_blk_dev_setup()
405 bdev->gendisk->driverfs_dev = &scmdev->dev; in scm_blk_dev_setup()
406 bdev->gendisk->private_data = scmdev; in scm_blk_dev_setup()
407 bdev->gendisk->fops = &scm_blk_devops; in scm_blk_dev_setup()
408 bdev->gendisk->queue = rq; in scm_blk_dev_setup()
409 bdev->gendisk->major = scm_major; in scm_blk_dev_setup()
410 bdev->gendisk->first_minor = devindex * SCM_NR_PARTS; in scm_blk_dev_setup()
412 len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm"); in scm_blk_dev_setup()
414 len += snprintf(bdev->gendisk->disk_name + len, in scm_blk_dev_setup()
419 snprintf(bdev->gendisk->disk_name + len, DISK_NAME_LEN - len, "%c", in scm_blk_dev_setup()
423 set_capacity(bdev->gendisk, scmdev->size >> 9); in scm_blk_dev_setup()
424 add_disk(bdev->gendisk); in scm_blk_dev_setup()
434 void scm_blk_dev_cleanup(struct scm_blk_dev *bdev) in scm_blk_dev_cleanup() argument
436 tasklet_kill(&bdev->tasklet); in scm_blk_dev_cleanup()
437 del_gendisk(bdev->gendisk); in scm_blk_dev_cleanup()
438 blk_cleanup_queue(bdev->gendisk->queue); in scm_blk_dev_cleanup()
439 put_disk(bdev->gendisk); in scm_blk_dev_cleanup()
442 void scm_blk_set_available(struct scm_blk_dev *bdev) in scm_blk_set_available() argument
446 spin_lock_irqsave(&bdev->lock, flags); in scm_blk_set_available()
447 if (bdev->state == SCM_WR_PROHIBIT) in scm_blk_set_available()
449 (unsigned long) bdev->scmdev->address); in scm_blk_set_available()
450 bdev->state = SCM_OPER; in scm_blk_set_available()
451 spin_unlock_irqrestore(&bdev->lock, flags); in scm_blk_set_available()