• Home
  • Raw
  • Download

Lines Matching refs:bd

81 	struct bsg_device *bd;  member
92 struct bsg_device *bd = bc->bd; in bsg_free_command() local
97 spin_lock_irqsave(&bd->lock, flags); in bsg_free_command()
98 bd->queued_cmds--; in bsg_free_command()
99 spin_unlock_irqrestore(&bd->lock, flags); in bsg_free_command()
101 wake_up(&bd->wq_free); in bsg_free_command()
104 static struct bsg_command *bsg_alloc_command(struct bsg_device *bd) in bsg_alloc_command() argument
108 spin_lock_irq(&bd->lock); in bsg_alloc_command()
110 if (bd->queued_cmds >= bd->max_queue) in bsg_alloc_command()
113 bd->queued_cmds++; in bsg_alloc_command()
114 spin_unlock_irq(&bd->lock); in bsg_alloc_command()
118 spin_lock_irq(&bd->lock); in bsg_alloc_command()
119 bd->queued_cmds--; in bsg_alloc_command()
124 bc->bd = bd; in bsg_alloc_command()
126 dprintk("%s: returning free cmd %p\n", bd->name, bc); in bsg_alloc_command()
129 spin_unlock_irq(&bd->lock); in bsg_alloc_command()
139 struct sg_io_v4 *hdr, struct bsg_device *bd, in blk_fill_sgv4_hdr_rq() argument
209 bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm) in bsg_map_hdr() argument
211 struct request_queue *q = bd->queue; in bsg_map_hdr()
240 ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm); in bsg_map_hdr()
299 struct bsg_device *bd = bc->bd; in bsg_rq_end_io() local
303 bd->name, rq, bc, bc->bio); in bsg_rq_end_io()
307 spin_lock_irqsave(&bd->lock, flags); in bsg_rq_end_io()
308 list_move_tail(&bc->list, &bd->done_list); in bsg_rq_end_io()
309 bd->done_cmds++; in bsg_rq_end_io()
310 spin_unlock_irqrestore(&bd->lock, flags); in bsg_rq_end_io()
312 wake_up(&bd->wq_done); in bsg_rq_end_io()
319 static void bsg_add_command(struct bsg_device *bd, struct request_queue *q, in bsg_add_command() argument
332 spin_lock_irq(&bd->lock); in bsg_add_command()
333 list_add_tail(&bc->list, &bd->busy_list); in bsg_add_command()
334 spin_unlock_irq(&bd->lock); in bsg_add_command()
336 dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc); in bsg_add_command()
342 static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd) in bsg_next_done_cmd() argument
346 spin_lock_irq(&bd->lock); in bsg_next_done_cmd()
347 if (bd->done_cmds) { in bsg_next_done_cmd()
348 bc = list_first_entry(&bd->done_list, struct bsg_command, list); in bsg_next_done_cmd()
350 bd->done_cmds--; in bsg_next_done_cmd()
352 spin_unlock_irq(&bd->lock); in bsg_next_done_cmd()
360 static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd) in bsg_get_done_cmd() argument
366 bc = bsg_next_done_cmd(bd); in bsg_get_done_cmd()
370 if (!test_bit(BSG_F_BLOCK, &bd->flags)) { in bsg_get_done_cmd()
375 ret = wait_event_interruptible(bd->wq_done, bd->done_cmds); in bsg_get_done_cmd()
382 dprintk("%s: returning done %p\n", bd->name, bc); in bsg_get_done_cmd()
443 static bool bsg_complete(struct bsg_device *bd) in bsg_complete() argument
449 spin_lock_irq(&bd->lock); in bsg_complete()
451 BUG_ON(bd->done_cmds > bd->queued_cmds); in bsg_complete()
456 if (bd->done_cmds == bd->queued_cmds) in bsg_complete()
459 spin = !test_bit(BSG_F_BLOCK, &bd->flags); in bsg_complete()
461 spin_unlock_irq(&bd->lock); in bsg_complete()
467 static int bsg_complete_all_commands(struct bsg_device *bd) in bsg_complete_all_commands() argument
472 dprintk("%s: entered\n", bd->name); in bsg_complete_all_commands()
477 io_wait_event(bd->wq_done, bsg_complete(bd)); in bsg_complete_all_commands()
484 spin_lock_irq(&bd->lock); in bsg_complete_all_commands()
485 if (!bd->queued_cmds) { in bsg_complete_all_commands()
486 spin_unlock_irq(&bd->lock); in bsg_complete_all_commands()
489 spin_unlock_irq(&bd->lock); in bsg_complete_all_commands()
491 bc = bsg_get_done_cmd(bd); in bsg_complete_all_commands()
507 __bsg_read(char __user *buf, size_t count, struct bsg_device *bd, in __bsg_read() argument
519 bc = bsg_get_done_cmd(bd); in __bsg_read()
549 static inline void bsg_set_block(struct bsg_device *bd, struct file *file) in bsg_set_block() argument
552 clear_bit(BSG_F_BLOCK, &bd->flags); in bsg_set_block()
554 set_bit(BSG_F_BLOCK, &bd->flags); in bsg_set_block()
571 struct bsg_device *bd = file->private_data; in bsg_read() local
575 dprintk("%s: read %zd bytes\n", bd->name, count); in bsg_read()
577 bsg_set_block(bd, file); in bsg_read()
580 ret = __bsg_read(buf, count, bd, NULL, &bytes_read); in bsg_read()
589 static int __bsg_write(struct bsg_device *bd, const char __user *buf, in __bsg_write() argument
605 struct request_queue *q = bd->queue; in __bsg_write()
607 bc = bsg_alloc_command(bd); in __bsg_write()
622 rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm); in __bsg_write()
629 bsg_add_command(bd, q, bc, rq); in __bsg_write()
646 struct bsg_device *bd = file->private_data; in bsg_write() local
650 dprintk("%s: write %zd bytes\n", bd->name, count); in bsg_write()
655 bsg_set_block(bd, file); in bsg_write()
658 ret = __bsg_write(bd, buf, count, &bytes_written, in bsg_write()
669 dprintk("%s: returning %zd\n", bd->name, bytes_written); in bsg_write()
675 struct bsg_device *bd; in bsg_alloc_device() local
677 bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL); in bsg_alloc_device()
678 if (unlikely(!bd)) in bsg_alloc_device()
681 spin_lock_init(&bd->lock); in bsg_alloc_device()
683 bd->max_queue = BSG_DEFAULT_CMDS; in bsg_alloc_device()
685 INIT_LIST_HEAD(&bd->busy_list); in bsg_alloc_device()
686 INIT_LIST_HEAD(&bd->done_list); in bsg_alloc_device()
687 INIT_HLIST_NODE(&bd->dev_list); in bsg_alloc_device()
689 init_waitqueue_head(&bd->wq_free); in bsg_alloc_device()
690 init_waitqueue_head(&bd->wq_done); in bsg_alloc_device()
691 return bd; in bsg_alloc_device()
706 static int bsg_put_device(struct bsg_device *bd) in bsg_put_device() argument
709 struct request_queue *q = bd->queue; in bsg_put_device()
713 do_free = atomic_dec_and_test(&bd->ref_count); in bsg_put_device()
719 hlist_del(&bd->dev_list); in bsg_put_device()
722 dprintk("%s: tearing down\n", bd->name); in bsg_put_device()
727 set_bit(BSG_F_BLOCK, &bd->flags); in bsg_put_device()
734 ret = bsg_complete_all_commands(bd); in bsg_put_device()
736 kfree(bd); in bsg_put_device()
748 struct bsg_device *bd; in bsg_add_device() local
761 bd = bsg_alloc_device(); in bsg_add_device()
762 if (!bd) { in bsg_add_device()
767 bd->queue = rq; in bsg_add_device()
769 bsg_set_block(bd, file); in bsg_add_device()
771 atomic_set(&bd->ref_count, 1); in bsg_add_device()
773 hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode))); in bsg_add_device()
775 strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1); in bsg_add_device()
777 format_dev_t(buf, inode->i_rdev), bd->max_queue); in bsg_add_device()
780 return bd; in bsg_add_device()
785 struct bsg_device *bd; in __bsg_get_device() local
789 hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) { in __bsg_get_device()
790 if (bd->queue == q) { in __bsg_get_device()
791 atomic_inc(&bd->ref_count); in __bsg_get_device()
795 bd = NULL; in __bsg_get_device()
798 return bd; in __bsg_get_device()
803 struct bsg_device *bd; in bsg_get_device() local
818 bd = __bsg_get_device(iminor(inode), bcd->queue); in bsg_get_device()
819 if (bd) in bsg_get_device()
820 return bd; in bsg_get_device()
822 bd = bsg_add_device(inode, bcd->queue, file); in bsg_get_device()
823 if (IS_ERR(bd)) in bsg_get_device()
826 return bd; in bsg_get_device()
831 struct bsg_device *bd; in bsg_open() local
833 bd = bsg_get_device(inode, file); in bsg_open()
835 if (IS_ERR(bd)) in bsg_open()
836 return PTR_ERR(bd); in bsg_open()
838 file->private_data = bd; in bsg_open()
844 struct bsg_device *bd = file->private_data; in bsg_release() local
847 return bsg_put_device(bd); in bsg_release()
852 struct bsg_device *bd = file->private_data; in bsg_poll() local
855 poll_wait(file, &bd->wq_done, wait); in bsg_poll()
856 poll_wait(file, &bd->wq_free, wait); in bsg_poll()
858 spin_lock_irq(&bd->lock); in bsg_poll()
859 if (!list_empty(&bd->done_list)) in bsg_poll()
861 if (bd->queued_cmds < bd->max_queue) in bsg_poll()
863 spin_unlock_irq(&bd->lock); in bsg_poll()
870 struct bsg_device *bd = file->private_data; in bsg_ioctl() local
879 return put_user(bd->max_queue, uarg); in bsg_ioctl()
888 spin_lock_irq(&bd->lock); in bsg_ioctl()
889 bd->max_queue = queue; in bsg_ioctl()
890 spin_unlock_irq(&bd->lock); in bsg_ioctl()
907 return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg); in bsg_ioctl()
918 rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE); in bsg_ioctl()
927 blk_execute_rq(bd->queue, NULL, rq, at_head); in bsg_ioctl()